2022-02-25 12:16:15 +00:00
|
|
|
/*
|
|
|
|
Copyright 2022 The Ceph-CSI Authors.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package controller
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
|
2022-03-18 09:51:37 +00:00
|
|
|
fscore "github.com/ceph/ceph-csi/internal/cephfs/core"
|
|
|
|
"github.com/ceph/ceph-csi/internal/cephfs/store"
|
|
|
|
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
2022-02-25 12:16:15 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util"
|
|
|
|
|
|
|
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
|
|
|
)
|
|
|
|
|
2022-03-18 09:51:37 +00:00
|
|
|
const (
|
|
|
|
// clusterNameKey is the key in OMAP that contains the name of the
|
|
|
|
// NFS-cluster. It will be prefixed with the journal configuration.
|
|
|
|
clusterNameKey = "nfs.cluster"
|
|
|
|
)
|
|
|
|
|
2022-02-25 12:16:15 +00:00
|
|
|
// NFSVolume presents the API for consumption by the CSI-controller to create,
|
|
|
|
// modify and delete the NFS-exported CephFS volume. Instances of this struct
|
|
|
|
// are short lived, they only exist as long as a CSI-procedure is active.
|
|
|
|
type NFSVolume struct {
|
2022-03-18 09:49:03 +00:00
|
|
|
// ctx is the context for this short living volume object
|
|
|
|
ctx context.Context
|
|
|
|
|
2022-03-18 09:51:37 +00:00
|
|
|
volumeID string
|
|
|
|
clusterID string
|
|
|
|
mons string
|
|
|
|
fscID int64
|
|
|
|
objectUUID string
|
2022-02-25 12:16:15 +00:00
|
|
|
|
|
|
|
// TODO: drop in favor of a go-ceph connection
|
|
|
|
cr *util.Credentials
|
2022-03-18 09:51:37 +00:00
|
|
|
connected bool
|
|
|
|
conn *util.ClusterConnection
|
2022-02-25 12:16:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewNFSVolume create a new NFSVolume instance for the currently executing
|
|
|
|
// CSI-procedure.
|
2022-03-18 09:49:03 +00:00
|
|
|
func NewNFSVolume(ctx context.Context, volumeID string) (*NFSVolume, error) {
|
2022-02-25 12:16:15 +00:00
|
|
|
vi := util.CSIIdentifier{}
|
|
|
|
|
|
|
|
err := vi.DecomposeCSIID(volumeID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error decoding volume ID (%s): %w", volumeID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &NFSVolume{
|
2022-03-18 09:51:37 +00:00
|
|
|
ctx: ctx,
|
|
|
|
volumeID: volumeID,
|
|
|
|
clusterID: vi.ClusterID,
|
|
|
|
fscID: vi.LocationID,
|
|
|
|
objectUUID: vi.ObjectUUID,
|
|
|
|
conn: &util.ClusterConnection{},
|
2022-02-25 12:16:15 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// String returns a simple/short representation of the NFSVolume.
|
|
|
|
func (nv *NFSVolume) String() string {
|
|
|
|
return nv.volumeID
|
|
|
|
}
|
|
|
|
|
|
|
|
// Connect fetches cluster connection details (like MONs) and connects to the
|
|
|
|
// Ceph cluster. This uses go-ceph, so after Connect(), Destroy() should be
|
|
|
|
// called to cleanup resources.
|
|
|
|
func (nv *NFSVolume) Connect(cr *util.Credentials) error {
|
2022-03-18 09:51:37 +00:00
|
|
|
if nv.connected {
|
|
|
|
return nil
|
|
|
|
}
|
2022-02-25 12:16:15 +00:00
|
|
|
|
2022-03-18 09:51:37 +00:00
|
|
|
var err error
|
|
|
|
nv.mons, err = util.Mons(util.CsiConfigFile, nv.clusterID)
|
2022-02-25 12:16:15 +00:00
|
|
|
if err != nil {
|
2022-03-18 09:51:37 +00:00
|
|
|
return fmt.Errorf("failed to get MONs for cluster (%s): %w", nv.clusterID, err)
|
2022-02-25 12:16:15 +00:00
|
|
|
}
|
|
|
|
|
2022-03-18 09:51:37 +00:00
|
|
|
err = nv.conn.Connect(nv.mons, cr)
|
2022-02-25 12:16:15 +00:00
|
|
|
if err != nil {
|
2022-03-18 09:51:37 +00:00
|
|
|
return fmt.Errorf("failed to connect to cluster: %w", err)
|
2022-02-25 12:16:15 +00:00
|
|
|
}
|
|
|
|
|
2022-03-18 09:51:37 +00:00
|
|
|
nv.cr = cr
|
2022-02-25 12:16:15 +00:00
|
|
|
nv.connected = true
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-03-18 09:51:37 +00:00
|
|
|
// Destroy cleans up resources once the NFSVolume instance is not needed
|
|
|
|
// anymore.
|
|
|
|
func (nv *NFSVolume) Destroy() {
|
|
|
|
if nv.connected {
|
|
|
|
nv.conn.Destroy()
|
|
|
|
nv.connected = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-25 12:16:15 +00:00
|
|
|
// GetExportPath returns the path on the NFS-server that can be used for
|
|
|
|
// mounting.
|
|
|
|
func (nv *NFSVolume) GetExportPath() string {
|
|
|
|
return "/" + nv.volumeID
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateExport takes the (CephFS) CSI-volume and instructs Ceph Mgr to create
|
|
|
|
// a new NFS-export for the volume on the Ceph managed NFS-server.
|
|
|
|
func (nv *NFSVolume) CreateExport(backend *csi.Volume) error {
|
|
|
|
if !nv.connected {
|
|
|
|
return fmt.Errorf("can not created export for %q: not connected", nv)
|
|
|
|
}
|
|
|
|
|
|
|
|
fs := backend.VolumeContext["fsName"]
|
|
|
|
nfsCluster := backend.VolumeContext["nfsCluster"]
|
|
|
|
path := backend.VolumeContext["subvolumePath"]
|
|
|
|
|
2022-03-18 09:51:37 +00:00
|
|
|
err := nv.setNFSCluster(nfsCluster)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to set NFS-cluster: %w", err)
|
|
|
|
}
|
|
|
|
|
2022-02-25 12:16:15 +00:00
|
|
|
// ceph nfs export create cephfs ${FS} ${NFS} /${EXPORT} ${SUBVOL_PATH}
|
|
|
|
args := []string{
|
|
|
|
"--id", nv.cr.ID,
|
|
|
|
"--keyfile=" + nv.cr.KeyFile,
|
|
|
|
"-m", nv.mons,
|
|
|
|
"nfs",
|
|
|
|
"export",
|
|
|
|
"create",
|
|
|
|
"cephfs",
|
|
|
|
fs,
|
|
|
|
nfsCluster,
|
|
|
|
nv.GetExportPath(),
|
|
|
|
path,
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: use new go-ceph API
|
2022-03-18 09:49:03 +00:00
|
|
|
_, stderr, err := util.ExecCommand(nv.ctx, "ceph", args...)
|
2022-02-25 12:16:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("executing ceph export command failed (%w): %s", err, stderr)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteExport removes the NFS-export from the Ceph managed NFS-server.
|
|
|
|
func (nv *NFSVolume) DeleteExport() error {
|
|
|
|
if !nv.connected {
|
|
|
|
return fmt.Errorf("can not delete export for %q: not connected", nv)
|
|
|
|
}
|
|
|
|
|
|
|
|
nfsCluster, err := nv.getNFSCluster()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to identify NFS cluster: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ceph nfs export rm <cluster_id> <pseudo_path>
|
|
|
|
args := []string{
|
|
|
|
"--id", nv.cr.ID,
|
|
|
|
"--keyfile=" + nv.cr.KeyFile,
|
|
|
|
"-m", nv.mons,
|
|
|
|
"nfs",
|
|
|
|
"export",
|
|
|
|
"delete",
|
|
|
|
nfsCluster,
|
|
|
|
nv.GetExportPath(),
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: use new go-ceph API
|
2022-03-18 09:49:03 +00:00
|
|
|
_, stderr, err := util.ExecCommand(nv.ctx, "ceph", args...)
|
2022-02-25 12:16:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("executing ceph export command failed (%w): %s", err, stderr)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2022-03-18 09:51:37 +00:00
|
|
|
|
|
|
|
// getNFSCluster fetches the NFS-cluster name from the CephFS journal.
|
|
|
|
func (nv *NFSVolume) getNFSCluster() (string, error) {
|
|
|
|
if !nv.connected {
|
|
|
|
return "", fmt.Errorf("can not get NFS-cluster for %q: not connected", nv)
|
|
|
|
}
|
|
|
|
|
|
|
|
fs := fscore.NewFileSystem(nv.conn)
|
|
|
|
fsName, err := fs.GetFsName(nv.ctx, nv.fscID)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("failed to get filesystem name for ID %x: %w", nv.fscID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
mdPool, err := fs.GetMetadataPool(nv.ctx, fsName)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("failed to get metadata pool for %q: %w", fsName, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Connect to cephfs' default radosNamespace (csi)
|
|
|
|
j, err := store.VolJournal.Connect(nv.mons, fsutil.RadosNamespace, nv.cr)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("failed to connect to journal: %w", err)
|
|
|
|
}
|
|
|
|
defer j.Destroy()
|
|
|
|
|
|
|
|
clusterName, err := j.FetchAttribute(nv.ctx, mdPool, nv.objectUUID, clusterNameKey)
|
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("failed to get cluster name: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return clusterName, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// setNFSCluster stores the NFS-cluster name in the CephFS journal.
|
|
|
|
func (nv *NFSVolume) setNFSCluster(clusterName string) error {
|
|
|
|
if !nv.connected {
|
|
|
|
return fmt.Errorf("can not set NFS-cluster for %q: not connected", nv)
|
|
|
|
}
|
|
|
|
|
|
|
|
fs := fscore.NewFileSystem(nv.conn)
|
|
|
|
fsName, err := fs.GetFsName(nv.ctx, nv.fscID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to get filesystem name for ID %x: %w", nv.fscID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
mdPool, err := fs.GetMetadataPool(nv.ctx, fsName)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to get metadata pool for %q: %w", fsName, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Connect to cephfs' default radosNamespace (csi)
|
|
|
|
j, err := store.VolJournal.Connect(nv.mons, fsutil.RadosNamespace, nv.cr)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to connect to journal: %w", err)
|
|
|
|
}
|
|
|
|
defer j.Destroy()
|
|
|
|
|
|
|
|
err = j.StoreAttribute(nv.ctx, mdPool, nv.objectUUID, clusterNameKey, clusterName)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to store cluster name: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|