ceph-csi/pkg/cephfs/nodeserver.go

319 lines
9.7 KiB
Go
Raw Normal View History

2018-03-05 11:59:47 +00:00
/*
Copyright 2018 The Ceph-CSI Authors.
2018-03-05 11:59:47 +00:00
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"context"
"fmt"
"os"
2018-03-05 11:59:47 +00:00
csicommon "github.com/ceph/ceph-csi/pkg/csi-common"
"github.com/container-storage-interface/spec/lib/go/csi"
2018-03-05 11:59:47 +00:00
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/klog"
"k8s.io/utils/keymutex"
2018-03-05 11:59:47 +00:00
)
// NodeServer struct of ceph CSI driver with supported methods of CSI
// node server spec.
type NodeServer struct {
2018-03-05 11:59:47 +00:00
*csicommon.DefaultNodeServer
}
var (
mtxNodeVolumeID = keymutex.NewHashed(0)
)
func getCredentialsForVolume(volOptions *volumeOptions, volID volumeID, req *csi.NodeStageVolumeRequest) (*credentials, error) {
var (
2019-02-13 12:57:16 +00:00
cr *credentials
secrets = req.GetSecrets()
)
2019-02-13 12:57:16 +00:00
if volOptions.ProvisionVolume {
// The volume is provisioned dynamically, get the credentials directly from Ceph
// First, get admin credentials - those are needed for retrieving the user credentials
2019-02-13 12:57:16 +00:00
adminCr, err := getAdminCredentials(secrets)
if err != nil {
return nil, fmt.Errorf("failed to get admin credentials from node stage secrets: %v", err)
}
// Then get the ceph user
2019-02-13 12:57:16 +00:00
entity, err := getCephUser(volOptions, adminCr, volID)
if err != nil {
return nil, fmt.Errorf("failed to get ceph user: %v", err)
}
2019-02-13 12:57:16 +00:00
cr = entity.toCredentials()
} else {
// The volume is pre-made, credentials are in node stage secrets
2019-02-13 12:57:16 +00:00
userCr, err := getUserCredentials(req.GetSecrets())
if err != nil {
return nil, fmt.Errorf("failed to get user credentials from node stage secrets: %v", err)
}
2019-02-13 12:57:16 +00:00
cr = userCr
}
2019-02-13 12:57:16 +00:00
return cr, nil
}
// NodeStageVolume mounts the volume to a staging path on the node.
func (ns *NodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
Make CephFS plugin stateless reusing RADOS based journal scheme This is a part of the stateless set of commits for CephCSI. This commit removes the dependency on config maps to store cephFS provisioned volumes, and instead relies on RADOS based objects and keys, and required CSI VolumeID encoding to detect the provisioned volumes. Changes: - Provide backward compatibility to provisioned volumes by older plugin versions (1.0.0 or older) - Remove Create/Delete support for statically provisioned volumes (fixes #382) - Added namespace support to RADOS OMaps and used the same to store RADOS CSI objects and keys in the CephFS metadata pool - Added support to mention fsname for CephFS provisioning (fixes #359) - Changed field name in CSI Identifier to 'location', to denote a pool or fscid - Updated mounter cache to use new scheme - Required Helm manifests are updated - Required documentation and other manifests are updated - Made driver option 'metadatastorage' as optional, as fresh installs do not need to specify the same Testing done: - Create/Mount/Delete PVC - Create/Delete 5 PVCs - Mount version 1.0.0 PVC - Delete version 1.0.0 PV - Mount Statically defined PV/PVC/Pod - Mount Statically defined version 1.0.0 PV/PVC/Pod - Delete Statically defined version 1.0.0 PV/PVC/Pod - Node restart when mounted to test mountcache - Use InstanceID other than 'default' - RBD basic round of tests, as namespace is added to OMaps - csitest against ceph-fs plugin - NOTE: CephFS plugin still does not detect and address already created volumes but of a different size - Test not providing any value to the metadata storage parameter Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-05-28 19:03:18 +00:00
var (
volOptions *volumeOptions
vid *volumeIdentifier
)
if err := validateNodeStageVolumeRequest(req); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
2018-03-05 11:59:47 +00:00
}
// Configuration
stagingTargetPath := req.GetStagingTargetPath()
volID := volumeID(req.GetVolumeId())
2018-03-05 11:59:47 +00:00
Make CephFS plugin stateless reusing RADOS based journal scheme This is a part of the stateless set of commits for CephCSI. This commit removes the dependency on config maps to store cephFS provisioned volumes, and instead relies on RADOS based objects and keys, and required CSI VolumeID encoding to detect the provisioned volumes. Changes: - Provide backward compatibility to provisioned volumes by older plugin versions (1.0.0 or older) - Remove Create/Delete support for statically provisioned volumes (fixes #382) - Added namespace support to RADOS OMaps and used the same to store RADOS CSI objects and keys in the CephFS metadata pool - Added support to mention fsname for CephFS provisioning (fixes #359) - Changed field name in CSI Identifier to 'location', to denote a pool or fscid - Updated mounter cache to use new scheme - Required Helm manifests are updated - Required documentation and other manifests are updated - Made driver option 'metadatastorage' as optional, as fresh installs do not need to specify the same Testing done: - Create/Mount/Delete PVC - Create/Delete 5 PVCs - Mount version 1.0.0 PVC - Delete version 1.0.0 PV - Mount Statically defined PV/PVC/Pod - Mount Statically defined version 1.0.0 PV/PVC/Pod - Delete Statically defined version 1.0.0 PV/PVC/Pod - Node restart when mounted to test mountcache - Use InstanceID other than 'default' - RBD basic round of tests, as namespace is added to OMaps - csitest against ceph-fs plugin - NOTE: CephFS plugin still does not detect and address already created volumes but of a different size - Test not providing any value to the metadata storage parameter Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-05-28 19:03:18 +00:00
volOptions, vid, err := newVolumeOptionsFromVolID(string(volID), req.GetVolumeContext(), req.GetSecrets())
if err != nil {
Make CephFS plugin stateless reusing RADOS based journal scheme This is a part of the stateless set of commits for CephCSI. This commit removes the dependency on config maps to store cephFS provisioned volumes, and instead relies on RADOS based objects and keys, and required CSI VolumeID encoding to detect the provisioned volumes. Changes: - Provide backward compatibility to provisioned volumes by older plugin versions (1.0.0 or older) - Remove Create/Delete support for statically provisioned volumes (fixes #382) - Added namespace support to RADOS OMaps and used the same to store RADOS CSI objects and keys in the CephFS metadata pool - Added support to mention fsname for CephFS provisioning (fixes #359) - Changed field name in CSI Identifier to 'location', to denote a pool or fscid - Updated mounter cache to use new scheme - Required Helm manifests are updated - Required documentation and other manifests are updated - Made driver option 'metadatastorage' as optional, as fresh installs do not need to specify the same Testing done: - Create/Mount/Delete PVC - Create/Delete 5 PVCs - Mount version 1.0.0 PVC - Delete version 1.0.0 PV - Mount Statically defined PV/PVC/Pod - Mount Statically defined version 1.0.0 PV/PVC/Pod - Delete Statically defined version 1.0.0 PV/PVC/Pod - Node restart when mounted to test mountcache - Use InstanceID other than 'default' - RBD basic round of tests, as namespace is added to OMaps - csitest against ceph-fs plugin - NOTE: CephFS plugin still does not detect and address already created volumes but of a different size - Test not providing any value to the metadata storage parameter Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-05-28 19:03:18 +00:00
if _, ok := err.(ErrInvalidVolID); !ok {
return nil, status.Error(codes.Internal, err.Error())
}
2018-03-05 11:59:47 +00:00
Make CephFS plugin stateless reusing RADOS based journal scheme This is a part of the stateless set of commits for CephCSI. This commit removes the dependency on config maps to store cephFS provisioned volumes, and instead relies on RADOS based objects and keys, and required CSI VolumeID encoding to detect the provisioned volumes. Changes: - Provide backward compatibility to provisioned volumes by older plugin versions (1.0.0 or older) - Remove Create/Delete support for statically provisioned volumes (fixes #382) - Added namespace support to RADOS OMaps and used the same to store RADOS CSI objects and keys in the CephFS metadata pool - Added support to mention fsname for CephFS provisioning (fixes #359) - Changed field name in CSI Identifier to 'location', to denote a pool or fscid - Updated mounter cache to use new scheme - Required Helm manifests are updated - Required documentation and other manifests are updated - Made driver option 'metadatastorage' as optional, as fresh installs do not need to specify the same Testing done: - Create/Mount/Delete PVC - Create/Delete 5 PVCs - Mount version 1.0.0 PVC - Delete version 1.0.0 PV - Mount Statically defined PV/PVC/Pod - Mount Statically defined version 1.0.0 PV/PVC/Pod - Delete Statically defined version 1.0.0 PV/PVC/Pod - Node restart when mounted to test mountcache - Use InstanceID other than 'default' - RBD basic round of tests, as namespace is added to OMaps - csitest against ceph-fs plugin - NOTE: CephFS plugin still does not detect and address already created volumes but of a different size - Test not providing any value to the metadata storage parameter Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-05-28 19:03:18 +00:00
// check for pre-provisioned volumes (plugin versions > 1.0.0)
volOptions, vid, err = newVolumeOptionsFromStaticVolume(string(volID), req.GetVolumeContext())
if err != nil {
if _, ok := err.(ErrNonStaticVolume); !ok {
return nil, status.Error(codes.Internal, err.Error())
}
// check for volumes from plugin versions <= 1.0.0
volOptions, vid, err = newVolumeOptionsFromVersion1Context(string(volID), req.GetVolumeContext(),
req.GetSecrets())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
}
if err = createMountPoint(stagingTargetPath); err != nil {
klog.Errorf("failed to create staging mount point at %s for volume %s: %v", stagingTargetPath, volID, err)
return nil, status.Error(codes.Internal, err.Error())
}
mtxNodeVolumeID.LockKey(string(volID))
defer mustUnlock(mtxNodeVolumeID, string(volID))
2018-03-05 11:59:47 +00:00
// Check if the volume is already mounted
isMnt, err := isMountPoint(stagingTargetPath)
2018-03-05 11:59:47 +00:00
if err != nil {
klog.Errorf("stat failed: %v", err)
2018-03-05 11:59:47 +00:00
return nil, status.Error(codes.Internal, err.Error())
}
if isMnt {
klog.Infof("cephfs: volume %s is already mounted to %s, skipping", volID, stagingTargetPath)
return &csi.NodeStageVolumeResponse{}, nil
2018-03-05 11:59:47 +00:00
}
// It's not, mount now
Make CephFS plugin stateless reusing RADOS based journal scheme This is a part of the stateless set of commits for CephCSI. This commit removes the dependency on config maps to store cephFS provisioned volumes, and instead relies on RADOS based objects and keys, and required CSI VolumeID encoding to detect the provisioned volumes. Changes: - Provide backward compatibility to provisioned volumes by older plugin versions (1.0.0 or older) - Remove Create/Delete support for statically provisioned volumes (fixes #382) - Added namespace support to RADOS OMaps and used the same to store RADOS CSI objects and keys in the CephFS metadata pool - Added support to mention fsname for CephFS provisioning (fixes #359) - Changed field name in CSI Identifier to 'location', to denote a pool or fscid - Updated mounter cache to use new scheme - Required Helm manifests are updated - Required documentation and other manifests are updated - Made driver option 'metadatastorage' as optional, as fresh installs do not need to specify the same Testing done: - Create/Mount/Delete PVC - Create/Delete 5 PVCs - Mount version 1.0.0 PVC - Delete version 1.0.0 PV - Mount Statically defined PV/PVC/Pod - Mount Statically defined version 1.0.0 PV/PVC/Pod - Delete Statically defined version 1.0.0 PV/PVC/Pod - Node restart when mounted to test mountcache - Use InstanceID other than 'default' - RBD basic round of tests, as namespace is added to OMaps - csitest against ceph-fs plugin - NOTE: CephFS plugin still does not detect and address already created volumes but of a different size - Test not providing any value to the metadata storage parameter Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-05-28 19:03:18 +00:00
if err = ns.mount(volOptions, req, vid); err != nil {
return nil, err
}
klog.Infof("cephfs: successfully mounted volume %s to %s", volID, stagingTargetPath)
return &csi.NodeStageVolumeResponse{}, nil
}
Make CephFS plugin stateless reusing RADOS based journal scheme This is a part of the stateless set of commits for CephCSI. This commit removes the dependency on config maps to store cephFS provisioned volumes, and instead relies on RADOS based objects and keys, and required CSI VolumeID encoding to detect the provisioned volumes. Changes: - Provide backward compatibility to provisioned volumes by older plugin versions (1.0.0 or older) - Remove Create/Delete support for statically provisioned volumes (fixes #382) - Added namespace support to RADOS OMaps and used the same to store RADOS CSI objects and keys in the CephFS metadata pool - Added support to mention fsname for CephFS provisioning (fixes #359) - Changed field name in CSI Identifier to 'location', to denote a pool or fscid - Updated mounter cache to use new scheme - Required Helm manifests are updated - Required documentation and other manifests are updated - Made driver option 'metadatastorage' as optional, as fresh installs do not need to specify the same Testing done: - Create/Mount/Delete PVC - Create/Delete 5 PVCs - Mount version 1.0.0 PVC - Delete version 1.0.0 PV - Mount Statically defined PV/PVC/Pod - Mount Statically defined version 1.0.0 PV/PVC/Pod - Delete Statically defined version 1.0.0 PV/PVC/Pod - Node restart when mounted to test mountcache - Use InstanceID other than 'default' - RBD basic round of tests, as namespace is added to OMaps - csitest against ceph-fs plugin - NOTE: CephFS plugin still does not detect and address already created volumes but of a different size - Test not providing any value to the metadata storage parameter Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-05-28 19:03:18 +00:00
func (*NodeServer) mount(volOptions *volumeOptions, req *csi.NodeStageVolumeRequest, vid *volumeIdentifier) error {
stagingTargetPath := req.GetStagingTargetPath()
volID := volumeID(req.GetVolumeId())
Make CephFS plugin stateless reusing RADOS based journal scheme This is a part of the stateless set of commits for CephCSI. This commit removes the dependency on config maps to store cephFS provisioned volumes, and instead relies on RADOS based objects and keys, and required CSI VolumeID encoding to detect the provisioned volumes. Changes: - Provide backward compatibility to provisioned volumes by older plugin versions (1.0.0 or older) - Remove Create/Delete support for statically provisioned volumes (fixes #382) - Added namespace support to RADOS OMaps and used the same to store RADOS CSI objects and keys in the CephFS metadata pool - Added support to mention fsname for CephFS provisioning (fixes #359) - Changed field name in CSI Identifier to 'location', to denote a pool or fscid - Updated mounter cache to use new scheme - Required Helm manifests are updated - Required documentation and other manifests are updated - Made driver option 'metadatastorage' as optional, as fresh installs do not need to specify the same Testing done: - Create/Mount/Delete PVC - Create/Delete 5 PVCs - Mount version 1.0.0 PVC - Delete version 1.0.0 PV - Mount Statically defined PV/PVC/Pod - Mount Statically defined version 1.0.0 PV/PVC/Pod - Delete Statically defined version 1.0.0 PV/PVC/Pod - Node restart when mounted to test mountcache - Use InstanceID other than 'default' - RBD basic round of tests, as namespace is added to OMaps - csitest against ceph-fs plugin - NOTE: CephFS plugin still does not detect and address already created volumes but of a different size - Test not providing any value to the metadata storage parameter Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-05-28 19:03:18 +00:00
cr, err := getCredentialsForVolume(volOptions, volumeID(vid.FsSubvolName), req)
if err != nil {
klog.Errorf("failed to get ceph credentials for volume %s: %v", volID, err)
return status.Error(codes.Internal, err.Error())
}
m, err := newMounter(volOptions)
if err != nil {
klog.Errorf("failed to create mounter for volume %s: %v", volID, err)
return status.Error(codes.Internal, err.Error())
}
klog.V(4).Infof("cephfs: mounting volume %s with %s", volID, m.name())
if err = m.mount(stagingTargetPath, cr, volOptions); err != nil {
klog.Errorf("failed to mount volume %s: %v", volID, err)
return status.Error(codes.Internal, err.Error())
2018-03-05 11:59:47 +00:00
}
Make CephFS plugin stateless reusing RADOS based journal scheme This is a part of the stateless set of commits for CephCSI. This commit removes the dependency on config maps to store cephFS provisioned volumes, and instead relies on RADOS based objects and keys, and required CSI VolumeID encoding to detect the provisioned volumes. Changes: - Provide backward compatibility to provisioned volumes by older plugin versions (1.0.0 or older) - Remove Create/Delete support for statically provisioned volumes (fixes #382) - Added namespace support to RADOS OMaps and used the same to store RADOS CSI objects and keys in the CephFS metadata pool - Added support to mention fsname for CephFS provisioning (fixes #359) - Changed field name in CSI Identifier to 'location', to denote a pool or fscid - Updated mounter cache to use new scheme - Required Helm manifests are updated - Required documentation and other manifests are updated - Made driver option 'metadatastorage' as optional, as fresh installs do not need to specify the same Testing done: - Create/Mount/Delete PVC - Create/Delete 5 PVCs - Mount version 1.0.0 PVC - Delete version 1.0.0 PV - Mount Statically defined PV/PVC/Pod - Mount Statically defined version 1.0.0 PV/PVC/Pod - Delete Statically defined version 1.0.0 PV/PVC/Pod - Node restart when mounted to test mountcache - Use InstanceID other than 'default' - RBD basic round of tests, as namespace is added to OMaps - csitest against ceph-fs plugin - NOTE: CephFS plugin still does not detect and address already created volumes but of a different size - Test not providing any value to the metadata storage parameter Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-05-28 19:03:18 +00:00
if err := volumeMountCache.nodeStageVolume(req.GetVolumeId(), stagingTargetPath, volOptions.Mounter, req.GetSecrets()); err != nil {
2019-04-01 15:02:19 +00:00
klog.Warningf("mount-cache: failed to stage volume %s %s: %v", volID, stagingTargetPath, err)
}
return nil
}
// NodePublishVolume mounts the volume mounted to the staging path to the target
// path
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
mountOptions := []string{"bind"}
if err := validateNodePublishVolumeRequest(req); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
// Configuration
targetPath := req.GetTargetPath()
volID := req.GetVolumeId()
if err := createMountPoint(targetPath); err != nil {
klog.Errorf("failed to create mount point at %s: %v", targetPath, err)
return nil, status.Error(codes.Internal, err.Error())
}
volCap := req.GetVolumeCapability()
if req.GetReadonly() {
mountOptions = append(mountOptions, "ro")
}
if m := volCap.GetMount(); m != nil {
hasOption := func(options []string, opt string) bool {
for _, o := range options {
if o == opt {
return true
}
}
return false
}
for _, f := range m.MountFlags {
if !hasOption(mountOptions, f) {
mountOptions = append(mountOptions, f)
}
}
}
// Check if the volume is already mounted
isMnt, err := isMountPoint(targetPath)
if err != nil {
klog.Errorf("stat failed: %v", err)
return nil, status.Error(codes.Internal, err.Error())
}
if isMnt {
klog.Infof("cephfs: volume %s is already bind-mounted to %s", volID, targetPath)
return &csi.NodePublishVolumeResponse{}, nil
}
// It's not, mount now
if err = bindMount(req.GetStagingTargetPath(), req.GetTargetPath(), req.GetReadonly(), mountOptions); err != nil {
klog.Errorf("failed to bind-mount volume %s: %v", volID, err)
return nil, status.Error(codes.Internal, err.Error())
}
if err := volumeMountCache.nodePublishVolume(volID, targetPath, req.GetReadonly()); err != nil {
2019-04-01 15:02:19 +00:00
klog.Warningf("mount-cache: failed to publish volume %s %s: %v", volID, targetPath, err)
}
klog.Infof("cephfs: successfully bind-mounted volume %s to %s", volID, targetPath)
2018-03-05 11:59:47 +00:00
return &csi.NodePublishVolumeResponse{}, nil
}
// NodeUnpublishVolume unmounts the volume from the target path
func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
var err error
if err = validateNodeUnpublishVolumeRequest(req); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
2018-03-05 11:59:47 +00:00
}
targetPath := req.GetTargetPath()
volID := req.GetVolumeId()
if err = volumeMountCache.nodeUnPublishVolume(volID, targetPath); err != nil {
2019-04-01 15:02:19 +00:00
klog.Warningf("mount-cache: failed to unpublish volume %s %s: %v", volID, targetPath, err)
}
// Unmount the bind-mount
if err = unmountVolume(targetPath); err != nil {
2018-03-05 11:59:47 +00:00
return nil, status.Error(codes.Internal, err.Error())
}
if err = os.Remove(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
klog.Infof("cephfs: successfully unbinded volume %s from %s", req.GetVolumeId(), targetPath)
return &csi.NodeUnpublishVolumeResponse{}, nil
}
// NodeUnstageVolume unstages the volume from the staging path
func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
var err error
if err = validateNodeUnstageVolumeRequest(req); err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
stagingTargetPath := req.GetStagingTargetPath()
volID := req.GetVolumeId()
2019-03-29 02:18:59 +00:00
if err = volumeMountCache.nodeUnStageVolume(volID); err != nil {
2019-04-01 15:02:19 +00:00
klog.Warningf("mount-cache: failed to unstage volume %s %s: %v", volID, stagingTargetPath, err)
}
// Unmount the volume
if err = unmountVolume(stagingTargetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if err = os.Remove(stagingTargetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
klog.Infof("cephfs: successfully unmounted volume %s from %s", req.GetVolumeId(), stagingTargetPath)
return &csi.NodeUnstageVolumeResponse{}, nil
}
// NodeGetCapabilities returns the supported capabilities of the node server
func (ns *NodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
return &csi.NodeGetCapabilitiesResponse{
Capabilities: []*csi.NodeServiceCapability{
{
Type: &csi.NodeServiceCapability_Rpc{
Rpc: &csi.NodeServiceCapability_RPC{
Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
},
},
},
},
}, nil
}