2018-03-05 11:59:47 +00:00
|
|
|
/*
|
|
|
|
Copyright 2018 The Kubernetes Authors.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cephfs
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2018-04-13 13:53:43 +00:00
|
|
|
"os"
|
2018-03-05 11:59:47 +00:00
|
|
|
|
|
|
|
"github.com/golang/glog"
|
|
|
|
"google.golang.org/grpc/codes"
|
|
|
|
"google.golang.org/grpc/status"
|
|
|
|
|
2018-03-20 15:14:14 +00:00
|
|
|
"github.com/container-storage-interface/spec/lib/go/csi/v0"
|
2018-03-05 11:59:47 +00:00
|
|
|
"github.com/kubernetes-csi/drivers/pkg/csi-common"
|
|
|
|
)
|
|
|
|
|
|
|
|
type nodeServer struct {
|
|
|
|
*csicommon.DefaultNodeServer
|
|
|
|
}
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
func getOrCreateUser(volOptions *volumeOptions, volId volumeID, req *csi.NodeStageVolumeRequest) (*credentials, error) {
|
2018-04-13 13:53:43 +00:00
|
|
|
var (
|
2018-07-28 08:24:07 +00:00
|
|
|
userCr = &credentials{}
|
|
|
|
err error
|
2018-04-13 13:53:43 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Retrieve the credentials (possibly create a new user as well)
|
|
|
|
|
|
|
|
if volOptions.ProvisionVolume {
|
|
|
|
// The volume is provisioned dynamically, create a dedicated user
|
|
|
|
|
2018-06-12 15:08:14 +00:00
|
|
|
// First, store admin credentials - those are needed for creating a user
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
adminCr, err := getAdminCredentials(req.GetNodeStageSecrets())
|
2018-06-12 15:08:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
if err = storeCephCredentials(volId, adminCr); err != nil {
|
2018-06-12 15:08:14 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
nodeCache.insert(volId, &nodeCacheEntry{volOptions: volOptions, cephAdminID: adminCr.id})
|
|
|
|
|
2018-06-12 15:08:14 +00:00
|
|
|
// Then create the user
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
if ent, err := createCephUser(volOptions, adminCr, volId); err != nil {
|
2018-04-13 13:53:43 +00:00
|
|
|
return nil, err
|
|
|
|
} else {
|
2018-07-28 08:24:07 +00:00
|
|
|
userCr.id = ent.Entity[len(cephEntityClientPrefix):]
|
|
|
|
userCr.key = ent.Key
|
2018-04-13 13:53:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set the correct volume root path
|
2018-07-28 08:24:07 +00:00
|
|
|
volOptions.RootPath = getVolumeRootPath_ceph(volId)
|
2018-04-13 13:53:43 +00:00
|
|
|
} else {
|
|
|
|
// The volume is pre-made, credentials are supplied by the user
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
userCr, err = getUserCredentials(req.GetNodeStageSecrets())
|
2018-04-13 13:53:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-07-28 08:24:07 +00:00
|
|
|
|
|
|
|
nodeCache.insert(volId, &nodeCacheEntry{volOptions: volOptions})
|
2018-04-13 13:53:43 +00:00
|
|
|
}
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
if err = storeCephCredentials(volId, userCr); err != nil {
|
2018-04-13 13:53:43 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
return userCr, nil
|
2018-04-13 13:53:43 +00:00
|
|
|
}
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
|
|
|
if err := validateNodeStageVolumeRequest(req); err != nil {
|
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
2018-03-05 11:59:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Configuration
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
stagingTargetPath := req.GetStagingTargetPath()
|
|
|
|
volId := volumeID(req.GetVolumeId())
|
2018-03-05 11:59:47 +00:00
|
|
|
|
2018-03-20 15:14:14 +00:00
|
|
|
volOptions, err := newVolumeOptions(req.GetVolumeAttributes())
|
|
|
|
if err != nil {
|
2018-07-28 08:24:07 +00:00
|
|
|
glog.Errorf("error reading volume options for volume %s: %v", volId, err)
|
2018-03-20 15:14:14 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
2018-03-05 11:59:47 +00:00
|
|
|
}
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
if err = createMountPoint(stagingTargetPath); err != nil {
|
|
|
|
glog.Errorf("failed to create staging mount point at %s for volume %s: %v", stagingTargetPath, volId, err)
|
2018-03-22 13:11:51 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2018-03-20 15:14:14 +00:00
|
|
|
}
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
cephConf := cephConfigData{Monitors: volOptions.Monitors, VolumeID: volId}
|
|
|
|
if err = cephConf.writeToFile(); err != nil {
|
|
|
|
glog.Errorf("failed to write ceph config file to %s for volume %s: %v", getCephConfPath(volId), volId, err)
|
2018-05-18 16:15:37 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2018-03-05 11:59:47 +00:00
|
|
|
// Check if the volume is already mounted
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
isMnt, err := isMountPoint(stagingTargetPath)
|
2018-03-05 11:59:47 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("stat failed: %v", err)
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
if isMnt {
|
2018-07-28 08:24:07 +00:00
|
|
|
glog.Infof("cephfs: volume %s is already mounted to %s, skipping", volId, stagingTargetPath)
|
|
|
|
return &csi.NodeStageVolumeResponse{}, nil
|
2018-03-05 11:59:47 +00:00
|
|
|
}
|
|
|
|
|
2018-04-13 13:53:43 +00:00
|
|
|
// It's not, mount now
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
cr, err := getOrCreateUser(volOptions, volId, req)
|
2018-03-22 13:11:51 +00:00
|
|
|
if err != nil {
|
2018-04-13 13:53:43 +00:00
|
|
|
glog.Error(err)
|
2018-03-22 13:11:51 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2018-03-09 16:05:19 +00:00
|
|
|
|
2018-08-14 09:19:41 +00:00
|
|
|
m, err := newMounter(volOptions)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("failed to create mounter for volume %s: %v", volId, err)
|
|
|
|
}
|
2018-07-28 08:24:07 +00:00
|
|
|
|
2018-08-14 09:19:41 +00:00
|
|
|
glog.V(4).Infof("cephfs: mounting volume %s with %s", volId, m.name())
|
2018-07-28 08:24:07 +00:00
|
|
|
if err = m.mount(stagingTargetPath, cr, volOptions, volId); err != nil {
|
2018-04-13 13:53:43 +00:00
|
|
|
glog.Errorf("failed to mount volume %s: %v", volId, err)
|
2018-03-05 11:59:47 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
glog.Infof("cephfs: successfully mounted volume %s to %s", volId, stagingTargetPath)
|
|
|
|
|
|
|
|
return &csi.NodeStageVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
|
|
|
|
if err := validateNodePublishVolumeRequest(req); err != nil {
|
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Configuration
|
|
|
|
|
|
|
|
targetPath := req.GetTargetPath()
|
|
|
|
volId := req.GetVolumeId()
|
|
|
|
|
|
|
|
if err := createMountPoint(targetPath); err != nil {
|
|
|
|
glog.Errorf("failed to create mount point at %s: %v", targetPath, err)
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the volume is already mounted
|
|
|
|
|
|
|
|
isMnt, err := isMountPoint(targetPath)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("stat failed: %v", err)
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
if isMnt {
|
|
|
|
glog.Infof("cephfs: volume %s is already bind-mounted to %s", volId, targetPath)
|
|
|
|
return &csi.NodePublishVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// It's not, mount now
|
|
|
|
|
|
|
|
if err = bindMount(req.GetStagingTargetPath(), req.GetTargetPath(), req.GetReadonly()); err != nil {
|
|
|
|
glog.Errorf("failed to bind-mount volume %s: %v", volId, err)
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
glog.Infof("cephfs: successfuly bind-mounted volume %s to %s", volId, targetPath)
|
2018-03-05 11:59:47 +00:00
|
|
|
|
|
|
|
return &csi.NodePublishVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
|
|
|
|
if err := validateNodeUnpublishVolumeRequest(req); err != nil {
|
2018-07-28 08:24:07 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
2018-03-05 11:59:47 +00:00
|
|
|
}
|
|
|
|
|
2018-04-13 13:53:43 +00:00
|
|
|
targetPath := req.GetTargetPath()
|
2018-03-22 13:11:51 +00:00
|
|
|
|
2018-04-13 13:53:43 +00:00
|
|
|
// Unmount the bind-mount
|
|
|
|
if err := unmountVolume(targetPath); err != nil {
|
2018-03-05 11:59:47 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
os.Remove(targetPath)
|
|
|
|
|
|
|
|
glog.Infof("cephfs: successfuly unbinded volume %s from %s", req.GetVolumeId(), targetPath)
|
|
|
|
|
|
|
|
return &csi.NodeUnpublishVolumeResponse{}, nil
|
|
|
|
}
|
2018-04-13 13:53:43 +00:00
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
|
|
|
|
if err := validateNodeUnstageVolumeRequest(req); err != nil {
|
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
volId := volumeID(req.GetVolumeId())
|
|
|
|
stagingTargetPath := req.GetStagingTargetPath()
|
|
|
|
|
|
|
|
// Unmount the volume
|
|
|
|
if err := unmountVolume(stagingTargetPath); err != nil {
|
2018-04-13 13:53:43 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
os.Remove(stagingTargetPath)
|
2018-04-13 13:53:43 +00:00
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
ent, err := nodeCache.pop(volId)
|
|
|
|
if err != nil {
|
|
|
|
glog.Error(err)
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2018-03-22 13:11:51 +00:00
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
if ent.volOptions.ProvisionVolume {
|
|
|
|
// We've created a dedicated Ceph user in NodeStageVolume,
|
|
|
|
// it's about to be deleted here.
|
|
|
|
|
|
|
|
if err = deleteCephUser(&credentials{id: ent.cephAdminID}, volId); err != nil {
|
|
|
|
glog.Errorf("failed to delete ceph user %s for volume %s: %v", getCephUserName(volId), volId, err)
|
|
|
|
|
|
|
|
// Reinsert cache entry for retry
|
|
|
|
nodeCache.insert(volId, ent)
|
|
|
|
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
glog.Infof("cephfs: successfuly umounted volume %s from %s", req.GetVolumeId(), stagingTargetPath)
|
2018-03-20 15:14:14 +00:00
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
return &csi.NodeUnstageVolumeResponse{}, nil
|
2018-03-20 15:14:14 +00:00
|
|
|
}
|
|
|
|
|
2018-07-28 08:24:07 +00:00
|
|
|
func (ns *nodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
|
|
|
|
return &csi.NodeGetCapabilitiesResponse{
|
|
|
|
Capabilities: []*csi.NodeServiceCapability{
|
|
|
|
{
|
|
|
|
Type: &csi.NodeServiceCapability_Rpc{
|
|
|
|
Rpc: &csi.NodeServiceCapability_RPC{
|
|
|
|
Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, nil
|
2018-03-20 15:14:14 +00:00
|
|
|
}
|