2018-03-05 11:59:47 +00:00
|
|
|
/*
|
2019-04-03 08:46:15 +00:00
|
|
|
Copyright 2018 The Ceph-CSI Authors.
|
2018-03-05 11:59:47 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cephfs
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2018-08-28 08:21:11 +00:00
|
|
|
"fmt"
|
2018-04-13 13:53:43 +00:00
|
|
|
"os"
|
2018-03-05 11:59:47 +00:00
|
|
|
|
2019-02-18 11:30:28 +00:00
|
|
|
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
|
2019-06-01 21:26:42 +00:00
|
|
|
"github.com/ceph/ceph-csi/pkg/util"
|
2019-02-18 11:30:28 +00:00
|
|
|
|
|
|
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
2018-03-05 11:59:47 +00:00
|
|
|
"google.golang.org/grpc/codes"
|
|
|
|
"google.golang.org/grpc/status"
|
2019-02-04 13:04:11 +00:00
|
|
|
"k8s.io/klog"
|
2018-03-05 11:59:47 +00:00
|
|
|
)
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// NodeServer struct of ceph CSI driver with supported methods of CSI
|
|
|
|
// node server spec.
|
2019-01-17 07:51:06 +00:00
|
|
|
type NodeServer struct {
|
2018-03-05 11:59:47 +00:00
|
|
|
*csicommon.DefaultNodeServer
|
2019-09-12 04:53:37 +00:00
|
|
|
// A map storing all volumes with ongoing operations so that additional operations
|
|
|
|
// for that same volume (as defined by VolumeID) return an Aborted error
|
|
|
|
VolumeLocks *util.VolumeLocks
|
2018-03-05 11:59:47 +00:00
|
|
|
}
|
|
|
|
|
2019-06-20 07:35:53 +00:00
|
|
|
func getCredentialsForVolume(volOptions *volumeOptions, req *csi.NodeStageVolumeRequest) (*util.Credentials, error) {
|
2018-04-13 13:53:43 +00:00
|
|
|
var (
|
2019-06-25 19:29:17 +00:00
|
|
|
err error
|
2019-06-01 21:26:42 +00:00
|
|
|
cr *util.Credentials
|
2019-02-13 12:57:16 +00:00
|
|
|
secrets = req.GetSecrets()
|
2018-04-13 13:53:43 +00:00
|
|
|
)
|
2019-02-13 12:57:16 +00:00
|
|
|
|
2018-04-13 13:53:43 +00:00
|
|
|
if volOptions.ProvisionVolume {
|
2019-06-25 19:29:17 +00:00
|
|
|
// The volume is provisioned dynamically, use passed in admin credentials
|
2018-04-13 13:53:43 +00:00
|
|
|
|
2019-06-25 19:29:17 +00:00
|
|
|
cr, err = util.NewAdminCredentials(secrets)
|
2018-06-12 15:08:14 +00:00
|
|
|
if err != nil {
|
2018-08-28 08:21:11 +00:00
|
|
|
return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %v", err)
|
2018-06-12 15:08:14 +00:00
|
|
|
}
|
2018-04-13 13:53:43 +00:00
|
|
|
} else {
|
2018-08-28 08:21:11 +00:00
|
|
|
// The volume is pre-made, credentials are in node stage secrets
|
2018-04-13 13:53:43 +00:00
|
|
|
|
2019-06-25 19:29:17 +00:00
|
|
|
cr, err = util.NewUserCredentials(req.GetSecrets())
|
2018-04-13 13:53:43 +00:00
|
|
|
if err != nil {
|
2018-08-28 08:21:11 +00:00
|
|
|
return nil, fmt.Errorf("failed to get user credentials from node stage secrets: %v", err)
|
2018-04-13 13:53:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-13 12:57:16 +00:00
|
|
|
return cr, nil
|
2018-04-13 13:53:43 +00:00
|
|
|
}
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// NodeStageVolume mounts the volume to a staging path on the node.
|
2019-01-17 07:51:06 +00:00
|
|
|
func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
2019-05-28 19:03:18 +00:00
|
|
|
var (
|
|
|
|
volOptions *volumeOptions
|
|
|
|
)
|
2019-07-03 10:02:36 +00:00
|
|
|
if err := util.ValidateNodeStageVolumeRequest(req); err != nil {
|
|
|
|
return nil, err
|
2018-03-05 11:59:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Configuration
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
stagingTargetPath := req.GetStagingTargetPath()
|
2019-01-17 05:46:32 +00:00
|
|
|
volID := volumeID(req.GetVolumeId())
|
2018-03-05 11:59:47 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
if acquired := ns.VolumeLocks.TryAcquire(req.GetVolumeId()); !acquired {
|
|
|
|
klog.Infof(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), volID)
|
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetVolumeId())
|
|
|
|
}
|
|
|
|
defer ns.VolumeLocks.Release(req.GetVolumeId())
|
|
|
|
|
2019-08-22 17:19:06 +00:00
|
|
|
volOptions, _, err := newVolumeOptionsFromVolID(ctx, string(volID), req.GetVolumeContext(), req.GetSecrets())
|
2018-03-20 15:14:14 +00:00
|
|
|
if err != nil {
|
2019-05-28 19:03:18 +00:00
|
|
|
if _, ok := err.(ErrInvalidVolID); !ok {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2018-03-05 11:59:47 +00:00
|
|
|
|
2019-05-28 19:03:18 +00:00
|
|
|
// check for pre-provisioned volumes (plugin versions > 1.0.0)
|
2019-06-20 07:35:53 +00:00
|
|
|
volOptions, _, err = newVolumeOptionsFromStaticVolume(string(volID), req.GetVolumeContext())
|
2019-05-28 19:03:18 +00:00
|
|
|
if err != nil {
|
|
|
|
if _, ok := err.(ErrNonStaticVolume); !ok {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// check for volumes from plugin versions <= 1.0.0
|
2019-06-20 07:35:53 +00:00
|
|
|
volOptions, _, err = newVolumeOptionsFromVersion1Context(string(volID), req.GetVolumeContext(),
|
2019-05-28 19:03:18 +00:00
|
|
|
req.GetSecrets())
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
}
|
2018-08-28 08:21:11 +00:00
|
|
|
}
|
|
|
|
|
2018-03-05 11:59:47 +00:00
|
|
|
// Check if the volume is already mounted
|
|
|
|
|
2019-07-03 10:02:36 +00:00
|
|
|
isMnt, err := util.IsMountPoint(stagingTargetPath)
|
2018-03-05 11:59:47 +00:00
|
|
|
|
|
|
|
if err != nil {
|
2019-08-22 17:19:06 +00:00
|
|
|
klog.Errorf(util.Log(ctx, "stat failed: %v"), err)
|
2018-03-05 11:59:47 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
if isMnt {
|
2019-08-22 17:19:06 +00:00
|
|
|
klog.Infof(util.Log(ctx, "cephfs: volume %s is already mounted to %s, skipping"), volID, stagingTargetPath)
|
2018-07-28 08:24:07 +00:00
|
|
|
return &csi.NodeStageVolumeResponse{}, nil
|
2018-03-05 11:59:47 +00:00
|
|
|
}
|
|
|
|
|
2018-04-13 13:53:43 +00:00
|
|
|
// It's not, mount now
|
2019-08-22 17:19:06 +00:00
|
|
|
if err = ns.mount(ctx, volOptions, req); err != nil {
|
2019-01-29 05:49:16 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-08-22 17:19:06 +00:00
|
|
|
klog.Infof(util.Log(ctx, "cephfs: successfully mounted volume %s to %s"), volID, stagingTargetPath)
|
2019-01-29 05:49:16 +00:00
|
|
|
|
|
|
|
return &csi.NodeStageVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2019-08-22 17:19:06 +00:00
|
|
|
func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *csi.NodeStageVolumeRequest) error {
|
2019-01-29 05:49:16 +00:00
|
|
|
stagingTargetPath := req.GetStagingTargetPath()
|
|
|
|
volID := volumeID(req.GetVolumeId())
|
2018-04-13 13:53:43 +00:00
|
|
|
|
2019-06-20 07:35:53 +00:00
|
|
|
cr, err := getCredentialsForVolume(volOptions, req)
|
2018-03-22 13:11:51 +00:00
|
|
|
if err != nil {
|
2019-08-22 17:19:06 +00:00
|
|
|
klog.Errorf(util.Log(ctx, "failed to get ceph credentials for volume %s: %v"), volID, err)
|
2019-01-29 05:49:16 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
2018-03-22 13:11:51 +00:00
|
|
|
}
|
2019-06-25 19:29:17 +00:00
|
|
|
defer cr.DeleteCredentials()
|
2018-03-09 16:05:19 +00:00
|
|
|
|
2018-08-14 09:19:41 +00:00
|
|
|
m, err := newMounter(volOptions)
|
|
|
|
if err != nil {
|
2019-08-22 17:19:06 +00:00
|
|
|
klog.Errorf(util.Log(ctx, "failed to create mounter for volume %s: %v"), volID, err)
|
2019-01-29 05:49:16 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
2018-08-14 09:19:41 +00:00
|
|
|
}
|
2018-07-28 08:24:07 +00:00
|
|
|
|
2019-08-22 17:19:06 +00:00
|
|
|
klog.V(4).Infof(util.Log(ctx, "cephfs: mounting volume %s with %s"), volID, m.name())
|
2018-08-28 08:21:11 +00:00
|
|
|
|
2019-08-22 17:19:06 +00:00
|
|
|
if err = m.mount(ctx, stagingTargetPath, cr, volOptions); err != nil {
|
|
|
|
klog.Errorf(util.Log(ctx, "failed to mount volume %s: %v"), volID, err)
|
2019-01-29 05:49:16 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
2018-03-05 11:59:47 +00:00
|
|
|
}
|
2019-08-22 17:19:06 +00:00
|
|
|
if err := volumeMountCache.nodeStageVolume(ctx, req.GetVolumeId(), stagingTargetPath, volOptions.Mounter, req.GetSecrets()); err != nil {
|
|
|
|
klog.Warningf(util.Log(ctx, "mount-cache: failed to stage volume %s %s: %v"), volID, stagingTargetPath, err)
|
2019-03-25 14:47:39 +00:00
|
|
|
}
|
2019-01-29 05:49:16 +00:00
|
|
|
return nil
|
2018-07-28 08:24:07 +00:00
|
|
|
}
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// NodePublishVolume mounts the volume mounted to the staging path to the target
|
|
|
|
// path
|
2019-01-17 07:51:06 +00:00
|
|
|
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
|
2019-06-06 12:18:11 +00:00
|
|
|
|
|
|
|
mountOptions := []string{"bind"}
|
2019-07-03 10:02:36 +00:00
|
|
|
if err := util.ValidateNodePublishVolumeRequest(req); err != nil {
|
|
|
|
return nil, err
|
2018-07-28 08:24:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
targetPath := req.GetTargetPath()
|
2019-01-17 05:46:32 +00:00
|
|
|
volID := req.GetVolumeId()
|
2018-07-28 08:24:07 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
|
|
|
|
klog.Infof(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), volID)
|
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
|
|
|
|
}
|
|
|
|
defer ns.VolumeLocks.Release(volID)
|
|
|
|
|
2019-07-03 10:02:36 +00:00
|
|
|
if err := util.CreateMountPoint(targetPath); err != nil {
|
2019-08-22 17:19:06 +00:00
|
|
|
klog.Errorf(util.Log(ctx, "failed to create mount point at %s: %v"), targetPath, err)
|
2018-07-28 08:24:07 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2019-06-06 12:18:11 +00:00
|
|
|
volCap := req.GetVolumeCapability()
|
|
|
|
|
|
|
|
if req.GetReadonly() {
|
|
|
|
mountOptions = append(mountOptions, "ro")
|
|
|
|
}
|
|
|
|
|
|
|
|
if m := volCap.GetMount(); m != nil {
|
|
|
|
hasOption := func(options []string, opt string) bool {
|
|
|
|
for _, o := range options {
|
|
|
|
if o == opt {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for _, f := range m.MountFlags {
|
|
|
|
if !hasOption(mountOptions, f) {
|
|
|
|
mountOptions = append(mountOptions, f)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
// Check if the volume is already mounted
|
|
|
|
|
2019-07-03 10:02:36 +00:00
|
|
|
isMnt, err := util.IsMountPoint(targetPath)
|
2018-07-28 08:24:07 +00:00
|
|
|
|
|
|
|
if err != nil {
|
2019-08-22 17:19:06 +00:00
|
|
|
klog.Errorf(util.Log(ctx, "stat failed: %v"), err)
|
2018-07-28 08:24:07 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
if isMnt {
|
2019-08-22 17:19:06 +00:00
|
|
|
klog.Infof(util.Log(ctx, "cephfs: volume %s is already bind-mounted to %s"), volID, targetPath)
|
2018-07-28 08:24:07 +00:00
|
|
|
return &csi.NodePublishVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// It's not, mount now
|
|
|
|
|
2019-08-22 17:19:06 +00:00
|
|
|
if err = bindMount(ctx, req.GetStagingTargetPath(), req.GetTargetPath(), req.GetReadonly(), mountOptions); err != nil {
|
|
|
|
klog.Errorf(util.Log(ctx, "failed to bind-mount volume %s: %v"), volID, err)
|
2018-07-28 08:24:07 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2019-08-22 17:19:06 +00:00
|
|
|
if err = volumeMountCache.nodePublishVolume(ctx, volID, targetPath, req.GetReadonly()); err != nil {
|
|
|
|
klog.Warningf(util.Log(ctx, "mount-cache: failed to publish volume %s %s: %v"), volID, targetPath, err)
|
2019-03-25 14:47:39 +00:00
|
|
|
}
|
|
|
|
|
2019-08-22 17:19:06 +00:00
|
|
|
klog.Infof(util.Log(ctx, "cephfs: successfully bind-mounted volume %s to %s"), volID, targetPath)
|
2018-03-05 11:59:47 +00:00
|
|
|
|
Address security concerns reported by 'gosec'
gosec reports several issues, none of them looks very critical. With
this change the following concerns have been addressed:
[pkg/cephfs/nodeserver.go:229] - G302: Expect file permissions to be 0600 or less (Confidence: HIGH, Severity: MEDIUM)
> os.Chmod(targetPath, 0777)
[pkg/cephfs/util.go:39] - G204: Subprocess launched with variable (Confidence: HIGH, Severity: MEDIUM)
> exec.Command(program, args...)
[pkg/rbd/nodeserver.go:156] - G302: Expect file permissions to be 0600 or less (Confidence: HIGH, Severity: MEDIUM)
> os.Chmod(stagingTargetPath, 0777)
[pkg/rbd/nodeserver.go:205] - G302: Expect file permissions to be 0600 or less (Confidence: HIGH, Severity: MEDIUM)
> os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0750)
[pkg/rbd/rbd_util.go:797] - G304: Potential file inclusion via variable (Confidence: HIGH, Severity: MEDIUM)
> ioutil.ReadFile(fPath)
[pkg/util/cephcmds.go:35] - G204: Subprocess launched with variable (Confidence: HIGH, Severity: MEDIUM)
> exec.Command(program, args...)
[pkg/util/credentials.go:47] - G104: Errors unhandled. (Confidence: HIGH, Severity: LOW)
> os.Remove(tmpfile.Name())
[pkg/util/credentials.go:92] - G104: Errors unhandled. (Confidence: HIGH, Severity: LOW)
> os.Remove(cr.KeyFile)
[pkg/util/pidlimit.go:74] - G304: Potential file inclusion via variable (Confidence: HIGH, Severity: MEDIUM)
> os.Open(pidsMax)
URL: https://github.com/securego/gosec
Signed-off-by: Niels de Vos <ndevos@redhat.com>
2019-08-30 10:23:10 +00:00
|
|
|
// #nosec - allow anyone to write inside the target path
|
2019-06-11 12:40:31 +00:00
|
|
|
err = os.Chmod(targetPath, 0777)
|
|
|
|
if err != nil {
|
2019-08-22 17:19:06 +00:00
|
|
|
klog.Errorf(util.Log(ctx, "failed to change targetpath permission for volume %s: %v"), volID, err)
|
2019-06-11 12:40:31 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2018-03-05 11:59:47 +00:00
|
|
|
return &csi.NodePublishVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// NodeUnpublishVolume unmounts the volume from the target path
|
2019-01-17 07:51:06 +00:00
|
|
|
func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
|
2019-01-28 13:59:16 +00:00
|
|
|
var err error
|
2019-07-03 10:02:36 +00:00
|
|
|
if err = util.ValidateNodeUnpublishVolumeRequest(req); err != nil {
|
|
|
|
return nil, err
|
2018-03-05 11:59:47 +00:00
|
|
|
}
|
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
volID := req.GetVolumeId()
|
2018-04-13 13:53:43 +00:00
|
|
|
targetPath := req.GetTargetPath()
|
2018-03-22 13:11:51 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
|
|
|
|
klog.Infof(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), volID)
|
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
|
|
|
|
}
|
|
|
|
defer ns.VolumeLocks.Release(volID)
|
|
|
|
|
2019-08-22 17:19:06 +00:00
|
|
|
if err = volumeMountCache.nodeUnPublishVolume(ctx, volID, targetPath); err != nil {
|
|
|
|
klog.Warningf(util.Log(ctx, "mount-cache: failed to unpublish volume %s %s: %v"), volID, targetPath, err)
|
2019-03-25 14:47:39 +00:00
|
|
|
}
|
|
|
|
|
2018-04-13 13:53:43 +00:00
|
|
|
// Unmount the bind-mount
|
2019-08-22 17:19:06 +00:00
|
|
|
if err = unmountVolume(ctx, targetPath); err != nil {
|
2018-03-05 11:59:47 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2019-12-17 09:07:46 +00:00
|
|
|
err = os.Remove(targetPath)
|
|
|
|
if err != nil && !os.IsNotExist(err) {
|
2019-01-28 13:59:16 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2018-07-28 08:24:07 +00:00
|
|
|
|
2019-08-22 17:19:06 +00:00
|
|
|
klog.Infof(util.Log(ctx, "cephfs: successfully unbinded volume %s from %s"), req.GetVolumeId(), targetPath)
|
2018-07-28 08:24:07 +00:00
|
|
|
|
|
|
|
return &csi.NodeUnpublishVolumeResponse{}, nil
|
|
|
|
}
|
2018-04-13 13:53:43 +00:00
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// NodeUnstageVolume unstages the volume from the staging path
|
2019-01-17 07:51:06 +00:00
|
|
|
func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
|
2019-01-28 13:59:16 +00:00
|
|
|
var err error
|
2019-07-03 10:02:36 +00:00
|
|
|
if err = util.ValidateNodeUnstageVolumeRequest(req); err != nil {
|
|
|
|
return nil, err
|
2018-07-28 08:24:07 +00:00
|
|
|
}
|
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
volID := req.GetVolumeId()
|
|
|
|
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
|
|
|
|
klog.Infof(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), volID)
|
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
|
|
|
|
}
|
|
|
|
defer ns.VolumeLocks.Release(volID)
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
stagingTargetPath := req.GetStagingTargetPath()
|
|
|
|
|
2019-03-29 02:18:59 +00:00
|
|
|
if err = volumeMountCache.nodeUnStageVolume(volID); err != nil {
|
2019-08-22 17:19:06 +00:00
|
|
|
klog.Warningf(util.Log(ctx, "mount-cache: failed to unstage volume %s %s: %v"), volID, stagingTargetPath, err)
|
2019-03-25 14:47:39 +00:00
|
|
|
}
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
// Unmount the volume
|
2019-08-22 17:19:06 +00:00
|
|
|
if err = unmountVolume(ctx, stagingTargetPath); err != nil {
|
2018-04-13 13:53:43 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2019-08-22 17:19:06 +00:00
|
|
|
klog.Infof(util.Log(ctx, "cephfs: successfully unmounted volume %s from %s"), req.GetVolumeId(), stagingTargetPath)
|
2018-03-20 15:14:14 +00:00
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
return &csi.NodeUnstageVolumeResponse{}, nil
|
2018-03-20 15:14:14 +00:00
|
|
|
}
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// NodeGetCapabilities returns the supported capabilities of the node server
|
2019-01-17 07:51:06 +00:00
|
|
|
func (ns *NodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
|
2018-07-28 08:24:07 +00:00
|
|
|
return &csi.NodeGetCapabilitiesResponse{
|
|
|
|
Capabilities: []*csi.NodeServiceCapability{
|
|
|
|
{
|
|
|
|
Type: &csi.NodeServiceCapability_Rpc{
|
|
|
|
Rpc: &csi.NodeServiceCapability_RPC{
|
|
|
|
Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-07-25 05:15:06 +00:00
|
|
|
{
|
|
|
|
Type: &csi.NodeServiceCapability_Rpc{
|
|
|
|
Rpc: &csi.NodeServiceCapability_RPC{
|
|
|
|
Type: csi.NodeServiceCapability_RPC_GET_VOLUME_STATS,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2018-07-28 08:24:07 +00:00
|
|
|
},
|
|
|
|
}, nil
|
2018-03-20 15:14:14 +00:00
|
|
|
}
|