2018-03-05 11:59:47 +00:00
|
|
|
/*
|
2019-04-03 08:46:15 +00:00
|
|
|
Copyright 2018 The Ceph-CSI Authors.
|
2018-03-05 11:59:47 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cephfs
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2020-06-25 08:35:19 +00:00
|
|
|
"errors"
|
2018-08-28 08:21:11 +00:00
|
|
|
"fmt"
|
2018-04-13 13:53:43 +00:00
|
|
|
"os"
|
2022-04-06 13:26:07 +00:00
|
|
|
"path"
|
2020-06-16 07:29:20 +00:00
|
|
|
"strings"
|
2018-03-05 11:59:47 +00:00
|
|
|
|
2021-08-25 06:46:03 +00:00
|
|
|
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
|
2021-09-16 14:46:07 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/cephfs/mounter"
|
2022-02-15 12:11:09 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/cephfs/store"
|
2021-09-16 13:47:57 +00:00
|
|
|
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
2020-04-17 09:23:49 +00:00
|
|
|
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
|
|
|
|
"github.com/ceph/ceph-csi/internal/util"
|
2021-08-24 15:03:25 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util/log"
|
2019-02-18 11:30:28 +00:00
|
|
|
|
|
|
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
2018-03-05 11:59:47 +00:00
|
|
|
"google.golang.org/grpc/codes"
|
|
|
|
"google.golang.org/grpc/status"
|
|
|
|
)
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// NodeServer struct of ceph CSI driver with supported methods of CSI
|
|
|
|
// node server spec.
|
2019-01-17 07:51:06 +00:00
|
|
|
type NodeServer struct {
|
2018-03-05 11:59:47 +00:00
|
|
|
*csicommon.DefaultNodeServer
|
2019-09-12 04:53:37 +00:00
|
|
|
// A map storing all volumes with ongoing operations so that additional operations
|
|
|
|
// for that same volume (as defined by VolumeID) return an Aborted error
|
2022-07-06 15:46:12 +00:00
|
|
|
VolumeLocks *util.VolumeLocks
|
|
|
|
kernelMountOptions string
|
|
|
|
fuseMountOptions string
|
2018-03-05 11:59:47 +00:00
|
|
|
}
|
|
|
|
|
2021-09-16 13:47:57 +00:00
|
|
|
func getCredentialsForVolume(
|
2022-02-15 12:11:09 +00:00
|
|
|
volOptions *store.VolumeOptions,
|
2022-06-01 10:17:19 +00:00
|
|
|
secrets map[string]string,
|
|
|
|
) (*util.Credentials, error) {
|
2018-04-13 13:53:43 +00:00
|
|
|
var (
|
2022-02-02 13:45:39 +00:00
|
|
|
err error
|
|
|
|
cr *util.Credentials
|
2018-04-13 13:53:43 +00:00
|
|
|
)
|
2019-02-13 12:57:16 +00:00
|
|
|
|
2018-04-13 13:53:43 +00:00
|
|
|
if volOptions.ProvisionVolume {
|
2019-06-25 19:29:17 +00:00
|
|
|
// The volume is provisioned dynamically, use passed in admin credentials
|
2018-04-13 13:53:43 +00:00
|
|
|
|
2019-06-25 19:29:17 +00:00
|
|
|
cr, err = util.NewAdminCredentials(secrets)
|
2018-06-12 15:08:14 +00:00
|
|
|
if err != nil {
|
2020-07-13 03:56:51 +00:00
|
|
|
return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %w", err)
|
2018-06-12 15:08:14 +00:00
|
|
|
}
|
2018-04-13 13:53:43 +00:00
|
|
|
} else {
|
2018-08-28 08:21:11 +00:00
|
|
|
// The volume is pre-made, credentials are in node stage secrets
|
2018-04-13 13:53:43 +00:00
|
|
|
|
2022-02-02 13:45:39 +00:00
|
|
|
cr, err = util.NewUserCredentials(secrets)
|
2018-04-13 13:53:43 +00:00
|
|
|
if err != nil {
|
2020-07-13 03:56:51 +00:00
|
|
|
return nil, fmt.Errorf("failed to get user credentials from node stage secrets: %w", err)
|
2018-04-13 13:53:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-13 12:57:16 +00:00
|
|
|
return cr, nil
|
2018-04-13 13:53:43 +00:00
|
|
|
}
|
|
|
|
|
2022-02-02 13:45:39 +00:00
|
|
|
func (ns *NodeServer) getVolumeOptions(
|
|
|
|
ctx context.Context,
|
|
|
|
volID fsutil.VolumeID,
|
|
|
|
volContext,
|
|
|
|
volSecrets map[string]string,
|
|
|
|
) (*store.VolumeOptions, error) {
|
2022-06-14 13:23:29 +00:00
|
|
|
volOptions, _, err := store.NewVolumeOptionsFromVolID(ctx, string(volID), volContext, volSecrets, "")
|
2022-02-02 13:45:39 +00:00
|
|
|
if err != nil {
|
|
|
|
if !errors.Is(err, cerrors.ErrInvalidVolID) {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
volOptions, _, err = store.NewVolumeOptionsFromStaticVolume(string(volID), volContext)
|
|
|
|
if err != nil {
|
|
|
|
if !errors.Is(err, cerrors.ErrNonStaticVolume) {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
volOptions, _, err = store.NewVolumeOptionsFromMonitorList(string(volID), volContext, volSecrets)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return volOptions, nil
|
|
|
|
}
|
|
|
|
|
2022-04-06 13:26:07 +00:00
|
|
|
func validateSnapshotBackedVolCapability(volCap *csi.VolumeCapability) error {
|
|
|
|
// Snapshot-backed volumes may be used with read-only volume access modes only.
|
|
|
|
|
|
|
|
mode := volCap.AccessMode.Mode
|
|
|
|
|
|
|
|
if mode != csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY &&
|
|
|
|
mode != csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY {
|
|
|
|
return status.Error(codes.InvalidArgument,
|
|
|
|
"snapshot-backed volume supports only read-only access mode")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// NodeStageVolume mounts the volume to a staging path on the node.
|
2021-06-25 10:18:59 +00:00
|
|
|
func (ns *NodeServer) NodeStageVolume(
|
|
|
|
ctx context.Context,
|
2022-06-01 10:17:19 +00:00
|
|
|
req *csi.NodeStageVolumeRequest,
|
|
|
|
) (*csi.NodeStageVolumeResponse, error) {
|
2019-07-03 10:02:36 +00:00
|
|
|
if err := util.ValidateNodeStageVolumeRequest(req); err != nil {
|
|
|
|
return nil, err
|
2018-03-05 11:59:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Configuration
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
stagingTargetPath := req.GetStagingTargetPath()
|
2021-09-16 13:47:57 +00:00
|
|
|
volID := fsutil.VolumeID(req.GetVolumeId())
|
2018-03-05 11:59:47 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
if acquired := ns.VolumeLocks.TryAcquire(req.GetVolumeId()); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetVolumeId())
|
|
|
|
}
|
|
|
|
defer ns.VolumeLocks.Release(req.GetVolumeId())
|
|
|
|
|
2022-02-02 13:45:39 +00:00
|
|
|
volOptions, err := ns.getVolumeOptions(ctx, volID, req.GetVolumeContext(), req.GetSecrets())
|
2018-03-20 15:14:14 +00:00
|
|
|
if err != nil {
|
2022-02-02 13:45:39 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
defer volOptions.Destroy()
|
2018-03-05 11:59:47 +00:00
|
|
|
|
2022-06-02 05:36:14 +00:00
|
|
|
// Skip extracting NetNamespaceFilePath if the clusterID is empty.
|
|
|
|
// In case of pre-provisioned volume the clusterID is not set in the
|
|
|
|
// volume context.
|
|
|
|
if volOptions.ClusterID != "" {
|
|
|
|
volOptions.NetNamespaceFilePath, err = util.GetCephFSNetNamespaceFilePath(
|
|
|
|
util.CsiConfigFile,
|
|
|
|
volOptions.ClusterID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2022-04-18 06:32:31 +00:00
|
|
|
}
|
2022-06-02 05:36:14 +00:00
|
|
|
|
2022-04-06 13:26:07 +00:00
|
|
|
if volOptions.BackingSnapshot {
|
|
|
|
if err = validateSnapshotBackedVolCapability(req.GetVolumeCapability()); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-02 13:45:39 +00:00
|
|
|
mnt, err := mounter.New(volOptions)
|
|
|
|
if err != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to create mounter for volume %s: %v", volID, err)
|
2019-05-28 19:03:18 +00:00
|
|
|
|
2022-02-02 13:45:39 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2018-08-28 08:21:11 +00:00
|
|
|
}
|
|
|
|
|
2018-03-05 11:59:47 +00:00
|
|
|
// Check if the volume is already mounted
|
|
|
|
|
2022-02-02 12:23:06 +00:00
|
|
|
if err = ns.tryRestoreFuseMountInNodeStage(ctx, mnt, stagingTargetPath); err != nil {
|
|
|
|
return nil, status.Errorf(codes.Internal, "failed to try to restore FUSE mounts: %v", err)
|
|
|
|
}
|
|
|
|
|
2022-07-18 16:13:36 +00:00
|
|
|
isMnt, err := util.IsMountPoint(ns.Mounter, stagingTargetPath)
|
2018-03-05 11:59:47 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "stat failed: %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2018-03-05 11:59:47 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
if isMnt {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "cephfs: volume %s is already mounted to %s, skipping", volID, stagingTargetPath)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
return &csi.NodeStageVolumeResponse{}, nil
|
2018-03-05 11:59:47 +00:00
|
|
|
}
|
|
|
|
|
2018-04-13 13:53:43 +00:00
|
|
|
// It's not, mount now
|
2022-02-02 13:45:39 +00:00
|
|
|
|
|
|
|
if err = ns.mount(
|
|
|
|
ctx,
|
|
|
|
mnt,
|
|
|
|
volOptions,
|
|
|
|
fsutil.VolumeID(req.GetVolumeId()),
|
|
|
|
req.GetStagingTargetPath(),
|
|
|
|
req.GetSecrets(),
|
|
|
|
req.GetVolumeCapability(),
|
|
|
|
); err != nil {
|
2019-01-29 05:49:16 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "cephfs: successfully mounted volume %s to %s", volID, stagingTargetPath)
|
2019-01-29 05:49:16 +00:00
|
|
|
|
2022-02-02 12:23:06 +00:00
|
|
|
if _, isFuse := mnt.(*mounter.FuseMounter); isFuse {
|
|
|
|
// FUSE mount recovery needs NodeStageMountinfo records.
|
|
|
|
|
|
|
|
if err = fsutil.WriteNodeStageMountinfo(volID, &fsutil.NodeStageMountinfo{
|
|
|
|
VolumeCapability: req.GetVolumeCapability(),
|
|
|
|
Secrets: req.GetSecrets(),
|
|
|
|
}); err != nil {
|
|
|
|
log.ErrorLog(ctx, "cephfs: failed to write NodeStageMountinfo for volume %s: %v", volID, err)
|
|
|
|
|
|
|
|
// Try to clean node stage mount.
|
2022-04-06 13:26:07 +00:00
|
|
|
if unmountErr := mounter.UnmountAll(ctx, stagingTargetPath); unmountErr != nil {
|
2022-02-02 12:23:06 +00:00
|
|
|
log.ErrorLog(ctx, "cephfs: failed to unmount %s in WriteNodeStageMountinfo clean up: %v",
|
|
|
|
stagingTargetPath, unmountErr)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-29 05:49:16 +00:00
|
|
|
return &csi.NodeStageVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2022-07-06 15:46:12 +00:00
|
|
|
func (ns *NodeServer) mount(
|
2022-02-02 13:45:39 +00:00
|
|
|
ctx context.Context,
|
|
|
|
mnt mounter.VolumeMounter,
|
|
|
|
volOptions *store.VolumeOptions,
|
|
|
|
volID fsutil.VolumeID,
|
|
|
|
stagingTargetPath string,
|
|
|
|
secrets map[string]string,
|
|
|
|
volCap *csi.VolumeCapability,
|
|
|
|
) error {
|
|
|
|
cr, err := getCredentialsForVolume(volOptions, secrets)
|
2018-03-22 13:11:51 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get ceph credentials for volume %s: %v", volID, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-01-29 05:49:16 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
2018-03-22 13:11:51 +00:00
|
|
|
}
|
2019-06-25 19:29:17 +00:00
|
|
|
defer cr.DeleteCredentials()
|
2018-03-09 16:05:19 +00:00
|
|
|
|
2022-02-02 13:45:39 +00:00
|
|
|
log.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, mnt.Name())
|
2018-08-28 08:21:11 +00:00
|
|
|
|
2022-07-06 15:46:12 +00:00
|
|
|
switch mnt.(type) {
|
|
|
|
case *mounter.FuseMounter:
|
|
|
|
volOptions.FuseMountOptions = util.MountOptionsAdd(volOptions.FuseMountOptions, ns.fuseMountOptions)
|
|
|
|
case *mounter.KernelMounter:
|
|
|
|
volOptions.KernelMountOptions = util.MountOptionsAdd(volOptions.KernelMountOptions, ns.kernelMountOptions)
|
|
|
|
}
|
|
|
|
|
2022-04-06 13:26:07 +00:00
|
|
|
const readOnly = "ro"
|
2020-06-16 07:38:37 +00:00
|
|
|
|
2022-02-02 13:45:39 +00:00
|
|
|
if volCap.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY ||
|
|
|
|
volCap.AccessMode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY {
|
|
|
|
switch mnt.(type) {
|
2021-09-16 14:46:07 +00:00
|
|
|
case *mounter.FuseMounter:
|
2020-06-16 07:29:20 +00:00
|
|
|
if !csicommon.MountOptionContains(strings.Split(volOptions.FuseMountOptions, ","), readOnly) {
|
|
|
|
volOptions.FuseMountOptions = util.MountOptionsAdd(volOptions.FuseMountOptions, readOnly)
|
|
|
|
}
|
2021-09-16 14:46:07 +00:00
|
|
|
case *mounter.KernelMounter:
|
2020-06-16 07:29:20 +00:00
|
|
|
if !csicommon.MountOptionContains(strings.Split(volOptions.KernelMountOptions, ","), readOnly) {
|
|
|
|
volOptions.KernelMountOptions = util.MountOptionsAdd(volOptions.KernelMountOptions, readOnly)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-02 13:45:39 +00:00
|
|
|
if err = mnt.Mount(ctx, stagingTargetPath, cr, volOptions); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx,
|
2020-08-11 12:11:51 +00:00
|
|
|
"failed to mount volume %s: %v Check dmesg logs if required.",
|
2020-05-19 12:29:31 +00:00
|
|
|
volID,
|
|
|
|
err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-01-29 05:49:16 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
2018-03-05 11:59:47 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2022-04-06 13:26:07 +00:00
|
|
|
defer func() {
|
|
|
|
if err == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
unmountErr := mounter.UnmountAll(ctx, stagingTargetPath)
|
|
|
|
if unmountErr != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to clean up mounts in rollback procedure: %v", unmountErr)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
if volOptions.BackingSnapshot {
|
|
|
|
snapshotRoot, err := getBackingSnapshotRoot(ctx, volOptions, stagingTargetPath)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
absoluteSnapshotRoot := path.Join(stagingTargetPath, snapshotRoot)
|
|
|
|
err = mounter.BindMount(
|
|
|
|
ctx,
|
|
|
|
absoluteSnapshotRoot,
|
|
|
|
stagingTargetPath,
|
|
|
|
true,
|
|
|
|
[]string{"bind", "_netdev"},
|
|
|
|
)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
log.ErrorLog(ctx,
|
|
|
|
"failed to bind mount snapshot root %s: %v", absoluteSnapshotRoot, err)
|
|
|
|
|
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-29 05:49:16 +00:00
|
|
|
return nil
|
2018-07-28 08:24:07 +00:00
|
|
|
}
|
|
|
|
|
2022-04-06 13:26:07 +00:00
|
|
|
func getBackingSnapshotRoot(
|
|
|
|
ctx context.Context,
|
|
|
|
volOptions *store.VolumeOptions,
|
|
|
|
stagingTargetPath string,
|
|
|
|
) (string, error) {
|
|
|
|
if volOptions.ProvisionVolume {
|
|
|
|
// Provisioned snapshot-backed volumes should have their BackingSnapshotRoot
|
|
|
|
// already populated.
|
|
|
|
return volOptions.BackingSnapshotRoot, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pre-provisioned snapshot-backed volumes are more involved:
|
|
|
|
//
|
|
|
|
// Snapshots created with `ceph fs subvolume snapshot create` have following
|
|
|
|
// snap directory name format inside <root path>/.snap:
|
|
|
|
//
|
|
|
|
// _<snapshot>_<snapshot inode number>
|
|
|
|
//
|
|
|
|
// We don't know what <snapshot inode number> is, and so <root path>/.snap
|
|
|
|
// needs to be traversed in order to determine the full snapshot directory name.
|
|
|
|
|
|
|
|
snapshotsBase := path.Join(stagingTargetPath, ".snap")
|
|
|
|
|
|
|
|
dir, err := os.Open(snapshotsBase)
|
|
|
|
if err != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to open %s when searching for snapshot root: %v", snapshotsBase, err)
|
|
|
|
|
|
|
|
return "", status.Errorf(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the contents of <root path>/.snap directory into a string slice.
|
|
|
|
|
|
|
|
contents, err := dir.Readdirnames(0)
|
|
|
|
if err != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to read %s when searching for snapshot root: %v", snapshotsBase, err)
|
|
|
|
|
|
|
|
return "", status.Errorf(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
found bool
|
|
|
|
snapshotDirName string
|
|
|
|
)
|
|
|
|
|
|
|
|
// Look through the directory's contents and try to find the correct snapshot
|
|
|
|
// dir name. The search must be exhaustive to catch possible ambiguous results.
|
|
|
|
|
|
|
|
for i := range contents {
|
|
|
|
if !strings.Contains(contents[i], volOptions.BackingSnapshotID) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if !found {
|
|
|
|
found = true
|
|
|
|
snapshotDirName = contents[i]
|
|
|
|
} else {
|
|
|
|
return "", status.Errorf(codes.InvalidArgument, "ambiguous backingSnapshotID %s in %s",
|
|
|
|
volOptions.BackingSnapshotID, snapshotsBase)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !found {
|
|
|
|
return "", status.Errorf(codes.InvalidArgument, "no snapshot with backingSnapshotID %s found in %s",
|
|
|
|
volOptions.BackingSnapshotID, snapshotsBase)
|
|
|
|
}
|
|
|
|
|
|
|
|
return path.Join(".snap", snapshotDirName), nil
|
|
|
|
}
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// NodePublishVolume mounts the volume mounted to the staging path to the target
|
2020-07-19 12:21:03 +00:00
|
|
|
// path.
|
2021-06-25 10:18:59 +00:00
|
|
|
func (ns *NodeServer) NodePublishVolume(
|
|
|
|
ctx context.Context,
|
2022-06-01 10:17:19 +00:00
|
|
|
req *csi.NodePublishVolumeRequest,
|
|
|
|
) (*csi.NodePublishVolumeResponse, error) {
|
2020-01-23 08:24:46 +00:00
|
|
|
mountOptions := []string{"bind", "_netdev"}
|
2019-07-03 10:02:36 +00:00
|
|
|
if err := util.ValidateNodePublishVolumeRequest(req); err != nil {
|
|
|
|
return nil, err
|
2018-07-28 08:24:07 +00:00
|
|
|
}
|
|
|
|
|
2022-02-02 13:45:39 +00:00
|
|
|
stagingTargetPath := req.GetStagingTargetPath()
|
2018-07-28 08:24:07 +00:00
|
|
|
targetPath := req.GetTargetPath()
|
2022-02-02 13:45:39 +00:00
|
|
|
volID := fsutil.VolumeID(req.GetVolumeId())
|
2018-07-28 08:24:07 +00:00
|
|
|
|
2021-07-13 05:28:32 +00:00
|
|
|
// Considering kubelet make sure the stage and publish operations
|
|
|
|
// are serialized, we dont need any extra locking in nodePublish
|
2019-09-12 04:53:37 +00:00
|
|
|
|
2019-07-03 10:02:36 +00:00
|
|
|
if err := util.CreateMountPoint(targetPath); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to create mount point at %s: %v", targetPath, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2022-02-02 12:23:06 +00:00
|
|
|
if err := ns.tryRestoreFuseMountsInNodePublish(
|
|
|
|
ctx,
|
|
|
|
volID,
|
|
|
|
stagingTargetPath,
|
|
|
|
targetPath,
|
|
|
|
req.GetVolumeContext(),
|
|
|
|
); err != nil {
|
|
|
|
return nil, status.Errorf(codes.Internal, "failed to try to restore FUSE mounts: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-06-06 12:18:11 +00:00
|
|
|
if req.GetReadonly() {
|
|
|
|
mountOptions = append(mountOptions, "ro")
|
|
|
|
}
|
|
|
|
|
2020-01-23 08:24:46 +00:00
|
|
|
mountOptions = csicommon.ConstructMountOptions(mountOptions, req.GetVolumeCapability())
|
2019-06-06 12:18:11 +00:00
|
|
|
|
2022-02-02 12:23:06 +00:00
|
|
|
// Ensure staging target path is a mountpoint.
|
|
|
|
|
2022-07-18 16:13:36 +00:00
|
|
|
if isMnt, err := util.IsMountPoint(ns.Mounter, stagingTargetPath); err != nil {
|
2022-02-02 12:23:06 +00:00
|
|
|
log.ErrorLog(ctx, "stat failed: %v", err)
|
|
|
|
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
} else if !isMnt {
|
|
|
|
return nil, status.Errorf(
|
|
|
|
codes.Internal, "staging path %s for volume %s is not a mountpoint", stagingTargetPath, volID,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
// Check if the volume is already mounted
|
|
|
|
|
2022-07-18 16:13:36 +00:00
|
|
|
isMnt, err := util.IsMountPoint(ns.Mounter, targetPath)
|
2018-07-28 08:24:07 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "stat failed: %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
if isMnt {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "cephfs: volume %s is already bind-mounted to %s", volID, targetPath)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
return &csi.NodePublishVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// It's not, mount now
|
|
|
|
|
2021-09-16 14:46:07 +00:00
|
|
|
if err = mounter.BindMount(
|
2021-09-16 13:47:57 +00:00
|
|
|
ctx,
|
2022-02-02 13:45:39 +00:00
|
|
|
stagingTargetPath,
|
|
|
|
targetPath,
|
2021-09-16 13:47:57 +00:00
|
|
|
req.GetReadonly(),
|
|
|
|
mountOptions); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to bind-mount volume %s: %v", volID, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "cephfs: successfully bind-mounted volume %s to %s", volID, targetPath)
|
2018-03-05 11:59:47 +00:00
|
|
|
|
|
|
|
return &csi.NodePublishVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// NodeUnpublishVolume unmounts the volume from the target path.
|
2021-06-25 10:18:59 +00:00
|
|
|
func (ns *NodeServer) NodeUnpublishVolume(
|
|
|
|
ctx context.Context,
|
2022-06-01 10:17:19 +00:00
|
|
|
req *csi.NodeUnpublishVolumeRequest,
|
|
|
|
) (*csi.NodeUnpublishVolumeResponse, error) {
|
2019-01-28 13:59:16 +00:00
|
|
|
var err error
|
2019-07-03 10:02:36 +00:00
|
|
|
if err = util.ValidateNodeUnpublishVolumeRequest(req); err != nil {
|
|
|
|
return nil, err
|
2018-03-05 11:59:47 +00:00
|
|
|
}
|
2022-02-02 12:23:06 +00:00
|
|
|
|
2021-07-13 05:24:21 +00:00
|
|
|
// considering kubelet make sure node operations like unpublish/unstage...etc can not be called
|
|
|
|
// at same time, an explicit locking at time of nodeunpublish is not required.
|
2018-04-13 13:53:43 +00:00
|
|
|
targetPath := req.GetTargetPath()
|
2022-07-18 16:13:36 +00:00
|
|
|
isMnt, err := util.IsMountPoint(ns.Mounter, targetPath)
|
2021-06-07 05:30:03 +00:00
|
|
|
if err != nil {
|
2022-02-02 12:23:06 +00:00
|
|
|
log.ErrorLog(ctx, "stat failed: %v", err)
|
|
|
|
|
2021-06-07 05:30:03 +00:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// targetPath has already been deleted
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "targetPath: %s has already been deleted", targetPath)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-06-07 05:30:03 +00:00
|
|
|
return &csi.NodeUnpublishVolumeResponse{}, nil
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2022-02-02 12:23:06 +00:00
|
|
|
if !util.IsCorruptedMountError(err) {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Corrupted mounts need to be unmounted properly too,
|
|
|
|
// regardless of the mounter used. Continue as normal.
|
|
|
|
log.DebugLog(ctx, "cephfs: detected corrupted mount in publish target path %s, trying to unmount anyway", targetPath)
|
|
|
|
isMnt = true
|
2021-06-07 05:30:03 +00:00
|
|
|
}
|
|
|
|
if !isMnt {
|
|
|
|
if err = os.RemoveAll(targetPath); err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-06-07 05:30:03 +00:00
|
|
|
return &csi.NodeUnpublishVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2018-04-13 13:53:43 +00:00
|
|
|
// Unmount the bind-mount
|
2021-09-16 14:46:07 +00:00
|
|
|
if err = mounter.UnmountVolume(ctx, targetPath); err != nil {
|
2018-03-05 11:59:47 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2019-12-17 09:07:46 +00:00
|
|
|
err = os.Remove(targetPath)
|
|
|
|
if err != nil && !os.IsNotExist(err) {
|
2019-01-28 13:59:16 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2018-07-28 08:24:07 +00:00
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "cephfs: successfully unbounded volume %s from %s", req.GetVolumeId(), targetPath)
|
2018-07-28 08:24:07 +00:00
|
|
|
|
|
|
|
return &csi.NodeUnpublishVolumeResponse{}, nil
|
|
|
|
}
|
2018-04-13 13:53:43 +00:00
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// NodeUnstageVolume unstages the volume from the staging path.
|
2021-06-25 10:18:59 +00:00
|
|
|
func (ns *NodeServer) NodeUnstageVolume(
|
|
|
|
ctx context.Context,
|
2022-06-01 10:17:19 +00:00
|
|
|
req *csi.NodeUnstageVolumeRequest,
|
|
|
|
) (*csi.NodeUnstageVolumeResponse, error) {
|
2019-01-28 13:59:16 +00:00
|
|
|
var err error
|
2019-07-03 10:02:36 +00:00
|
|
|
if err = util.ValidateNodeUnstageVolumeRequest(req); err != nil {
|
|
|
|
return nil, err
|
2018-07-28 08:24:07 +00:00
|
|
|
}
|
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
volID := req.GetVolumeId()
|
|
|
|
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
|
|
|
|
}
|
|
|
|
defer ns.VolumeLocks.Release(volID)
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
stagingTargetPath := req.GetStagingTargetPath()
|
2021-06-07 05:30:03 +00:00
|
|
|
|
2022-02-02 12:23:06 +00:00
|
|
|
if err = fsutil.RemoveNodeStageMountinfo(fsutil.VolumeID(volID)); err != nil {
|
|
|
|
log.ErrorLog(ctx, "cephfs: failed to remove NodeStageMountinfo for volume %s: %v", volID, err)
|
|
|
|
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2022-07-18 16:13:36 +00:00
|
|
|
isMnt, err := util.IsMountPoint(ns.Mounter, stagingTargetPath)
|
2021-06-07 05:30:03 +00:00
|
|
|
if err != nil {
|
2022-02-02 12:23:06 +00:00
|
|
|
log.ErrorLog(ctx, "stat failed: %v", err)
|
|
|
|
|
2021-06-07 05:30:03 +00:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// targetPath has already been deleted
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "targetPath: %s has already been deleted", stagingTargetPath)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-06-07 05:30:03 +00:00
|
|
|
return &csi.NodeUnstageVolumeResponse{}, nil
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2022-02-02 12:23:06 +00:00
|
|
|
if !util.IsCorruptedMountError(err) {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Corrupted mounts need to be unmounted properly too,
|
|
|
|
// regardless of the mounter used. Continue as normal.
|
|
|
|
log.DebugLog(ctx,
|
|
|
|
"cephfs: detected corrupted mount in staging target path %s, trying to unmount anyway",
|
|
|
|
stagingTargetPath)
|
|
|
|
isMnt = true
|
2021-06-07 05:30:03 +00:00
|
|
|
}
|
|
|
|
if !isMnt {
|
|
|
|
return &csi.NodeUnstageVolumeResponse{}, nil
|
|
|
|
}
|
2018-07-28 08:24:07 +00:00
|
|
|
// Unmount the volume
|
2022-04-06 13:26:07 +00:00
|
|
|
if err = mounter.UnmountAll(ctx, stagingTargetPath); err != nil {
|
2018-04-13 13:53:43 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "cephfs: successfully unmounted volume %s from %s", req.GetVolumeId(), stagingTargetPath)
|
2018-03-20 15:14:14 +00:00
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
return &csi.NodeUnstageVolumeResponse{}, nil
|
2018-03-20 15:14:14 +00:00
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// NodeGetCapabilities returns the supported capabilities of the node server.
|
2021-06-25 10:18:59 +00:00
|
|
|
func (ns *NodeServer) NodeGetCapabilities(
|
|
|
|
ctx context.Context,
|
2022-06-01 10:17:19 +00:00
|
|
|
req *csi.NodeGetCapabilitiesRequest,
|
|
|
|
) (*csi.NodeGetCapabilitiesResponse, error) {
|
2018-07-28 08:24:07 +00:00
|
|
|
return &csi.NodeGetCapabilitiesResponse{
|
|
|
|
Capabilities: []*csi.NodeServiceCapability{
|
|
|
|
{
|
|
|
|
Type: &csi.NodeServiceCapability_Rpc{
|
|
|
|
Rpc: &csi.NodeServiceCapability_RPC{
|
|
|
|
Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-07-25 05:15:06 +00:00
|
|
|
{
|
|
|
|
Type: &csi.NodeServiceCapability_Rpc{
|
|
|
|
Rpc: &csi.NodeServiceCapability_RPC{
|
|
|
|
Type: csi.NodeServiceCapability_RPC_GET_VOLUME_STATS,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2021-08-17 06:27:01 +00:00
|
|
|
{
|
|
|
|
Type: &csi.NodeServiceCapability_Rpc{
|
|
|
|
Rpc: &csi.NodeServiceCapability_RPC{
|
|
|
|
Type: csi.NodeServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2018-07-28 08:24:07 +00:00
|
|
|
},
|
|
|
|
}, nil
|
2018-03-20 15:14:14 +00:00
|
|
|
}
|
2021-01-11 08:02:41 +00:00
|
|
|
|
|
|
|
// NodeGetVolumeStats returns volume stats.
|
2021-06-25 10:18:59 +00:00
|
|
|
func (ns *NodeServer) NodeGetVolumeStats(
|
|
|
|
ctx context.Context,
|
2022-06-01 10:17:19 +00:00
|
|
|
req *csi.NodeGetVolumeStatsRequest,
|
|
|
|
) (*csi.NodeGetVolumeStatsResponse, error) {
|
2021-01-11 08:02:41 +00:00
|
|
|
var err error
|
|
|
|
targetPath := req.GetVolumePath()
|
|
|
|
if targetPath == "" {
|
|
|
|
err = fmt.Errorf("targetpath %v is empty", targetPath)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-01-11 08:02:41 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
stat, err := os.Stat(targetPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Errorf(codes.InvalidArgument, "failed to get stat for targetpath %q: %v", targetPath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if stat.Mode().IsDir() {
|
2022-07-18 16:13:36 +00:00
|
|
|
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath)
|
2021-01-11 08:02:41 +00:00
|
|
|
}
|
|
|
|
|
2021-05-31 06:38:01 +00:00
|
|
|
return nil, status.Errorf(codes.InvalidArgument, "targetpath %q is not a directory or device", targetPath)
|
2021-01-11 08:02:41 +00:00
|
|
|
}
|