ceph-csi/internal/cephfs/nodeserver.go

330 lines
11 KiB
Go
Raw Normal View History

2018-03-05 11:59:47 +00:00
/*
Copyright 2018 The Ceph-CSI Authors.
2018-03-05 11:59:47 +00:00
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"context"
"errors"
"fmt"
"os"
"strings"
2018-03-05 11:59:47 +00:00
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/util"
"github.com/container-storage-interface/spec/lib/go/csi"
2018-03-05 11:59:47 +00:00
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/klog"
2018-03-05 11:59:47 +00:00
)
// NodeServer struct of ceph CSI driver with supported methods of CSI
// node server spec.
type NodeServer struct {
2018-03-05 11:59:47 +00:00
*csicommon.DefaultNodeServer
// A map storing all volumes with ongoing operations so that additional operations
// for that same volume (as defined by VolumeID) return an Aborted error
VolumeLocks *util.VolumeLocks
2018-03-05 11:59:47 +00:00
}
func getCredentialsForVolume(volOptions *volumeOptions, req *csi.NodeStageVolumeRequest) (*util.Credentials, error) {
var (
err error
cr *util.Credentials
2019-02-13 12:57:16 +00:00
secrets = req.GetSecrets()
)
2019-02-13 12:57:16 +00:00
if volOptions.ProvisionVolume {
// The volume is provisioned dynamically, use passed in admin credentials
cr, err = util.NewAdminCredentials(secrets)
if err != nil {
return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %v", err)
}
} else {
// The volume is pre-made, credentials are in node stage secrets
cr, err = util.NewUserCredentials(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to get user credentials from node stage secrets: %v", err)
}
}
2019-02-13 12:57:16 +00:00
return cr, nil
}
// NodeStageVolume mounts the volume to a staging path on the node.
func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
Make CephFS plugin stateless reusing RADOS based journal scheme This is a part of the stateless set of commits for CephCSI. This commit removes the dependency on config maps to store cephFS provisioned volumes, and instead relies on RADOS based objects and keys, and required CSI VolumeID encoding to detect the provisioned volumes. Changes: - Provide backward compatibility to provisioned volumes by older plugin versions (1.0.0 or older) - Remove Create/Delete support for statically provisioned volumes (fixes #382) - Added namespace support to RADOS OMaps and used the same to store RADOS CSI objects and keys in the CephFS metadata pool - Added support to mention fsname for CephFS provisioning (fixes #359) - Changed field name in CSI Identifier to 'location', to denote a pool or fscid - Updated mounter cache to use new scheme - Required Helm manifests are updated - Required documentation and other manifests are updated - Made driver option 'metadatastorage' as optional, as fresh installs do not need to specify the same Testing done: - Create/Mount/Delete PVC - Create/Delete 5 PVCs - Mount version 1.0.0 PVC - Delete version 1.0.0 PV - Mount Statically defined PV/PVC/Pod - Mount Statically defined version 1.0.0 PV/PVC/Pod - Delete Statically defined version 1.0.0 PV/PVC/Pod - Node restart when mounted to test mountcache - Use InstanceID other than 'default' - RBD basic round of tests, as namespace is added to OMaps - csitest against ceph-fs plugin - NOTE: CephFS plugin still does not detect and address already created volumes but of a different size - Test not providing any value to the metadata storage parameter Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-05-28 19:03:18 +00:00
var (
volOptions *volumeOptions
)
if err := util.ValidateNodeStageVolumeRequest(req); err != nil {
return nil, err
2018-03-05 11:59:47 +00:00
}
// Configuration
stagingTargetPath := req.GetStagingTargetPath()
volID := volumeID(req.GetVolumeId())
2018-03-05 11:59:47 +00:00
if acquired := ns.VolumeLocks.TryAcquire(req.GetVolumeId()); !acquired {
2020-03-23 02:15:35 +00:00
klog.Errorf(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetVolumeId())
}
defer ns.VolumeLocks.Release(req.GetVolumeId())
volOptions, _, err := newVolumeOptionsFromVolID(ctx, string(volID), req.GetVolumeContext(), req.GetSecrets())
if err != nil {
var eivi ErrInvalidVolID
if !errors.As(err, &eivi) {
Make CephFS plugin stateless reusing RADOS based journal scheme This is a part of the stateless set of commits for CephCSI. This commit removes the dependency on config maps to store cephFS provisioned volumes, and instead relies on RADOS based objects and keys, and required CSI VolumeID encoding to detect the provisioned volumes. Changes: - Provide backward compatibility to provisioned volumes by older plugin versions (1.0.0 or older) - Remove Create/Delete support for statically provisioned volumes (fixes #382) - Added namespace support to RADOS OMaps and used the same to store RADOS CSI objects and keys in the CephFS metadata pool - Added support to mention fsname for CephFS provisioning (fixes #359) - Changed field name in CSI Identifier to 'location', to denote a pool or fscid - Updated mounter cache to use new scheme - Required Helm manifests are updated - Required documentation and other manifests are updated - Made driver option 'metadatastorage' as optional, as fresh installs do not need to specify the same Testing done: - Create/Mount/Delete PVC - Create/Delete 5 PVCs - Mount version 1.0.0 PVC - Delete version 1.0.0 PV - Mount Statically defined PV/PVC/Pod - Mount Statically defined version 1.0.0 PV/PVC/Pod - Delete Statically defined version 1.0.0 PV/PVC/Pod - Node restart when mounted to test mountcache - Use InstanceID other than 'default' - RBD basic round of tests, as namespace is added to OMaps - csitest against ceph-fs plugin - NOTE: CephFS plugin still does not detect and address already created volumes but of a different size - Test not providing any value to the metadata storage parameter Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-05-28 19:03:18 +00:00
return nil, status.Error(codes.Internal, err.Error())
}
2018-03-05 11:59:47 +00:00
Make CephFS plugin stateless reusing RADOS based journal scheme This is a part of the stateless set of commits for CephCSI. This commit removes the dependency on config maps to store cephFS provisioned volumes, and instead relies on RADOS based objects and keys, and required CSI VolumeID encoding to detect the provisioned volumes. Changes: - Provide backward compatibility to provisioned volumes by older plugin versions (1.0.0 or older) - Remove Create/Delete support for statically provisioned volumes (fixes #382) - Added namespace support to RADOS OMaps and used the same to store RADOS CSI objects and keys in the CephFS metadata pool - Added support to mention fsname for CephFS provisioning (fixes #359) - Changed field name in CSI Identifier to 'location', to denote a pool or fscid - Updated mounter cache to use new scheme - Required Helm manifests are updated - Required documentation and other manifests are updated - Made driver option 'metadatastorage' as optional, as fresh installs do not need to specify the same Testing done: - Create/Mount/Delete PVC - Create/Delete 5 PVCs - Mount version 1.0.0 PVC - Delete version 1.0.0 PV - Mount Statically defined PV/PVC/Pod - Mount Statically defined version 1.0.0 PV/PVC/Pod - Delete Statically defined version 1.0.0 PV/PVC/Pod - Node restart when mounted to test mountcache - Use InstanceID other than 'default' - RBD basic round of tests, as namespace is added to OMaps - csitest against ceph-fs plugin - NOTE: CephFS plugin still does not detect and address already created volumes but of a different size - Test not providing any value to the metadata storage parameter Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-05-28 19:03:18 +00:00
// check for pre-provisioned volumes (plugin versions > 1.0.0)
volOptions, _, err = newVolumeOptionsFromStaticVolume(string(volID), req.GetVolumeContext())
Make CephFS plugin stateless reusing RADOS based journal scheme This is a part of the stateless set of commits for CephCSI. This commit removes the dependency on config maps to store cephFS provisioned volumes, and instead relies on RADOS based objects and keys, and required CSI VolumeID encoding to detect the provisioned volumes. Changes: - Provide backward compatibility to provisioned volumes by older plugin versions (1.0.0 or older) - Remove Create/Delete support for statically provisioned volumes (fixes #382) - Added namespace support to RADOS OMaps and used the same to store RADOS CSI objects and keys in the CephFS metadata pool - Added support to mention fsname for CephFS provisioning (fixes #359) - Changed field name in CSI Identifier to 'location', to denote a pool or fscid - Updated mounter cache to use new scheme - Required Helm manifests are updated - Required documentation and other manifests are updated - Made driver option 'metadatastorage' as optional, as fresh installs do not need to specify the same Testing done: - Create/Mount/Delete PVC - Create/Delete 5 PVCs - Mount version 1.0.0 PVC - Delete version 1.0.0 PV - Mount Statically defined PV/PVC/Pod - Mount Statically defined version 1.0.0 PV/PVC/Pod - Delete Statically defined version 1.0.0 PV/PVC/Pod - Node restart when mounted to test mountcache - Use InstanceID other than 'default' - RBD basic round of tests, as namespace is added to OMaps - csitest against ceph-fs plugin - NOTE: CephFS plugin still does not detect and address already created volumes but of a different size - Test not providing any value to the metadata storage parameter Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-05-28 19:03:18 +00:00
if err != nil {
var ensv ErrNonStaticVolume
if !errors.As(err, &ensv) {
Make CephFS plugin stateless reusing RADOS based journal scheme This is a part of the stateless set of commits for CephCSI. This commit removes the dependency on config maps to store cephFS provisioned volumes, and instead relies on RADOS based objects and keys, and required CSI VolumeID encoding to detect the provisioned volumes. Changes: - Provide backward compatibility to provisioned volumes by older plugin versions (1.0.0 or older) - Remove Create/Delete support for statically provisioned volumes (fixes #382) - Added namespace support to RADOS OMaps and used the same to store RADOS CSI objects and keys in the CephFS metadata pool - Added support to mention fsname for CephFS provisioning (fixes #359) - Changed field name in CSI Identifier to 'location', to denote a pool or fscid - Updated mounter cache to use new scheme - Required Helm manifests are updated - Required documentation and other manifests are updated - Made driver option 'metadatastorage' as optional, as fresh installs do not need to specify the same Testing done: - Create/Mount/Delete PVC - Create/Delete 5 PVCs - Mount version 1.0.0 PVC - Delete version 1.0.0 PV - Mount Statically defined PV/PVC/Pod - Mount Statically defined version 1.0.0 PV/PVC/Pod - Delete Statically defined version 1.0.0 PV/PVC/Pod - Node restart when mounted to test mountcache - Use InstanceID other than 'default' - RBD basic round of tests, as namespace is added to OMaps - csitest against ceph-fs plugin - NOTE: CephFS plugin still does not detect and address already created volumes but of a different size - Test not providing any value to the metadata storage parameter Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-05-28 19:03:18 +00:00
return nil, status.Error(codes.Internal, err.Error())
}
// check for volumes from plugin versions <= 1.0.0
volOptions, _, err = newVolumeOptionsFromVersion1Context(string(volID), req.GetVolumeContext(),
Make CephFS plugin stateless reusing RADOS based journal scheme This is a part of the stateless set of commits for CephCSI. This commit removes the dependency on config maps to store cephFS provisioned volumes, and instead relies on RADOS based objects and keys, and required CSI VolumeID encoding to detect the provisioned volumes. Changes: - Provide backward compatibility to provisioned volumes by older plugin versions (1.0.0 or older) - Remove Create/Delete support for statically provisioned volumes (fixes #382) - Added namespace support to RADOS OMaps and used the same to store RADOS CSI objects and keys in the CephFS metadata pool - Added support to mention fsname for CephFS provisioning (fixes #359) - Changed field name in CSI Identifier to 'location', to denote a pool or fscid - Updated mounter cache to use new scheme - Required Helm manifests are updated - Required documentation and other manifests are updated - Made driver option 'metadatastorage' as optional, as fresh installs do not need to specify the same Testing done: - Create/Mount/Delete PVC - Create/Delete 5 PVCs - Mount version 1.0.0 PVC - Delete version 1.0.0 PV - Mount Statically defined PV/PVC/Pod - Mount Statically defined version 1.0.0 PV/PVC/Pod - Delete Statically defined version 1.0.0 PV/PVC/Pod - Node restart when mounted to test mountcache - Use InstanceID other than 'default' - RBD basic round of tests, as namespace is added to OMaps - csitest against ceph-fs plugin - NOTE: CephFS plugin still does not detect and address already created volumes but of a different size - Test not providing any value to the metadata storage parameter Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-05-28 19:03:18 +00:00
req.GetSecrets())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
}
2018-03-05 11:59:47 +00:00
// Check if the volume is already mounted
isMnt, err := util.IsMountPoint(stagingTargetPath)
2018-03-05 11:59:47 +00:00
if err != nil {
klog.Errorf(util.Log(ctx, "stat failed: %v"), err)
2018-03-05 11:59:47 +00:00
return nil, status.Error(codes.Internal, err.Error())
}
if isMnt {
2020-03-23 02:15:35 +00:00
klog.V(4).Infof(util.Log(ctx, "cephfs: volume %s is already mounted to %s, skipping"), volID, stagingTargetPath)
return &csi.NodeStageVolumeResponse{}, nil
2018-03-05 11:59:47 +00:00
}
// It's not, mount now
if err = ns.mount(ctx, volOptions, req); err != nil {
return nil, err
}
2020-03-23 02:15:35 +00:00
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully mounted volume %s to %s"), volID, stagingTargetPath)
return &csi.NodeStageVolumeResponse{}, nil
}
func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *csi.NodeStageVolumeRequest) error {
stagingTargetPath := req.GetStagingTargetPath()
volID := volumeID(req.GetVolumeId())
cr, err := getCredentialsForVolume(volOptions, req)
if err != nil {
klog.Errorf(util.Log(ctx, "failed to get ceph credentials for volume %s: %v"), volID, err)
return status.Error(codes.Internal, err.Error())
}
defer cr.DeleteCredentials()
m, err := newMounter(volOptions)
if err != nil {
klog.Errorf(util.Log(ctx, "failed to create mounter for volume %s: %v"), volID, err)
return status.Error(codes.Internal, err.Error())
}
klog.V(4).Infof(util.Log(ctx, "cephfs: mounting volume %s with %s"), volID, m.name())
readOnly := "ro"
fuseMountOptions := strings.Split(volOptions.FuseMountOptions, ",")
kernelMountOptions := strings.Split(volOptions.KernelMountOptions, ",")
if req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY ||
req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY {
switch m.(type) {
case *fuseMounter:
if !csicommon.MountOptionContains(strings.Split(volOptions.FuseMountOptions, ","), readOnly) {
volOptions.FuseMountOptions = util.MountOptionsAdd(volOptions.FuseMountOptions, readOnly)
fuseMountOptions = append(fuseMountOptions, readOnly)
}
case *kernelMounter:
if !csicommon.MountOptionContains(strings.Split(volOptions.KernelMountOptions, ","), readOnly) {
volOptions.KernelMountOptions = util.MountOptionsAdd(volOptions.KernelMountOptions, readOnly)
kernelMountOptions = append(kernelMountOptions, readOnly)
}
}
}
if err = m.mount(ctx, stagingTargetPath, cr, volOptions); err != nil {
klog.Errorf(util.Log(ctx,
"failed to mount volume %s: %v Check dmesg logs if required."),
volID,
err)
return status.Error(codes.Internal, err.Error())
2018-03-05 11:59:47 +00:00
}
if !csicommon.MountOptionContains(kernelMountOptions, readOnly) && !csicommon.MountOptionContains(fuseMountOptions, readOnly) {
// #nosec - allow anyone to write inside the stagingtarget path
err = os.Chmod(stagingTargetPath, 0777)
if err != nil {
klog.Errorf(util.Log(ctx, "failed to change stagingtarget path %s permission for volume %s: %v"), stagingTargetPath, volID, err)
uErr := unmountVolume(ctx, stagingTargetPath)
if uErr != nil {
klog.Errorf(util.Log(ctx, "failed to umount stagingtarget path %s for volume %s: %v"), stagingTargetPath, volID, uErr)
}
return status.Error(codes.Internal, err.Error())
}
}
return nil
}
// NodePublishVolume mounts the volume mounted to the staging path to the target
// path
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
mountOptions := []string{"bind", "_netdev"}
if err := util.ValidateNodePublishVolumeRequest(req); err != nil {
return nil, err
}
targetPath := req.GetTargetPath()
volID := req.GetVolumeId()
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
2020-03-23 02:15:35 +00:00
klog.Errorf(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
}
defer ns.VolumeLocks.Release(volID)
if err := util.CreateMountPoint(targetPath); err != nil {
klog.Errorf(util.Log(ctx, "failed to create mount point at %s: %v"), targetPath, err)
return nil, status.Error(codes.Internal, err.Error())
}
if req.GetReadonly() {
mountOptions = append(mountOptions, "ro")
}
mountOptions = csicommon.ConstructMountOptions(mountOptions, req.GetVolumeCapability())
// Check if the volume is already mounted
isMnt, err := util.IsMountPoint(targetPath)
if err != nil {
klog.Errorf(util.Log(ctx, "stat failed: %v"), err)
return nil, status.Error(codes.Internal, err.Error())
}
if isMnt {
2020-03-23 02:15:35 +00:00
klog.V(4).Infof(util.Log(ctx, "cephfs: volume %s is already bind-mounted to %s"), volID, targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
// It's not, mount now
if err = bindMount(ctx, req.GetStagingTargetPath(), req.GetTargetPath(), req.GetReadonly(), mountOptions); err != nil {
klog.Errorf(util.Log(ctx, "failed to bind-mount volume %s: %v"), volID, err)
return nil, status.Error(codes.Internal, err.Error())
}
2020-03-23 02:15:35 +00:00
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully bind-mounted volume %s to %s"), volID, targetPath)
2018-03-05 11:59:47 +00:00
return &csi.NodePublishVolumeResponse{}, nil
}
// NodeUnpublishVolume unmounts the volume from the target path
func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
var err error
if err = util.ValidateNodeUnpublishVolumeRequest(req); err != nil {
return nil, err
2018-03-05 11:59:47 +00:00
}
volID := req.GetVolumeId()
targetPath := req.GetTargetPath()
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
2020-03-23 02:15:35 +00:00
klog.Errorf(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
}
defer ns.VolumeLocks.Release(volID)
// Unmount the bind-mount
if err = unmountVolume(ctx, targetPath); err != nil {
2018-03-05 11:59:47 +00:00
return nil, status.Error(codes.Internal, err.Error())
}
err = os.Remove(targetPath)
if err != nil && !os.IsNotExist(err) {
return nil, status.Error(codes.Internal, err.Error())
}
2020-03-23 02:15:35 +00:00
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully unbinded volume %s from %s"), req.GetVolumeId(), targetPath)
return &csi.NodeUnpublishVolumeResponse{}, nil
}
// NodeUnstageVolume unstages the volume from the staging path
func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
var err error
if err = util.ValidateNodeUnstageVolumeRequest(req); err != nil {
return nil, err
}
volID := req.GetVolumeId()
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
2020-03-23 02:15:35 +00:00
klog.Errorf(util.Log(ctx, util.VolumeOperationAlreadyExistsFmt), volID)
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
}
defer ns.VolumeLocks.Release(volID)
stagingTargetPath := req.GetStagingTargetPath()
// Unmount the volume
if err = unmountVolume(ctx, stagingTargetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
2020-03-23 02:15:35 +00:00
klog.V(4).Infof(util.Log(ctx, "cephfs: successfully unmounted volume %s from %s"), req.GetVolumeId(), stagingTargetPath)
return &csi.NodeUnstageVolumeResponse{}, nil
}
// NodeGetCapabilities returns the supported capabilities of the node server
func (ns *NodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
return &csi.NodeGetCapabilitiesResponse{
Capabilities: []*csi.NodeServiceCapability{
{
Type: &csi.NodeServiceCapability_Rpc{
Rpc: &csi.NodeServiceCapability_RPC{
Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
},
},
},
{
Type: &csi.NodeServiceCapability_Rpc{
Rpc: &csi.NodeServiceCapability_RPC{
Type: csi.NodeServiceCapability_RPC_GET_VOLUME_STATS,
},
},
},
},
}, nil
}