2018-01-09 18:59:50 +00:00
|
|
|
/*
|
2019-04-03 08:46:15 +00:00
|
|
|
Copyright 2018 The Ceph-CSI Authors.
|
2018-01-09 18:59:50 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package rbd
|
|
|
|
|
|
|
|
import (
|
2019-08-24 09:14:15 +00:00
|
|
|
"context"
|
2020-06-25 08:35:19 +00:00
|
|
|
"errors"
|
2018-01-16 01:52:28 +00:00
|
|
|
"fmt"
|
2018-01-09 18:59:50 +00:00
|
|
|
"os"
|
2020-01-20 05:03:42 +00:00
|
|
|
"strconv"
|
2020-07-21 11:55:10 +00:00
|
|
|
"strings"
|
2018-01-09 18:59:50 +00:00
|
|
|
|
2020-04-17 09:23:49 +00:00
|
|
|
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
|
2020-04-23 18:22:55 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/journal"
|
2020-04-17 09:23:49 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util"
|
2021-08-24 15:03:25 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util/log"
|
2018-01-09 18:59:50 +00:00
|
|
|
|
2020-06-24 08:12:12 +00:00
|
|
|
librbd "github.com/ceph/go-ceph/rbd"
|
2018-11-24 19:18:24 +00:00
|
|
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
2018-01-09 18:59:50 +00:00
|
|
|
"google.golang.org/grpc/codes"
|
|
|
|
"google.golang.org/grpc/status"
|
2021-10-05 09:50:33 +00:00
|
|
|
"k8s.io/kubernetes/pkg/volume"
|
2020-12-17 13:01:48 +00:00
|
|
|
mount "k8s.io/mount-utils"
|
2020-01-14 10:38:55 +00:00
|
|
|
utilexec "k8s.io/utils/exec"
|
2018-01-09 18:59:50 +00:00
|
|
|
)
|
|
|
|
|
2019-01-28 11:47:06 +00:00
|
|
|
// NodeServer struct of ceph rbd driver with supported methods of CSI
|
2020-07-19 12:21:03 +00:00
|
|
|
// node server spec.
|
2019-01-17 07:51:06 +00:00
|
|
|
type NodeServer struct {
|
2018-01-09 18:59:50 +00:00
|
|
|
*csicommon.DefaultNodeServer
|
2021-12-09 08:04:48 +00:00
|
|
|
Mounter mount.Interface
|
2019-09-12 04:53:37 +00:00
|
|
|
// A map storing all volumes with ongoing operations so that additional operations
|
|
|
|
// for that same volume (as defined by VolumeID) return an Aborted error
|
|
|
|
VolumeLocks *util.VolumeLocks
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
|
|
|
|
2020-02-25 08:00:55 +00:00
|
|
|
// stageTransaction struct represents the state a transaction was when it either completed
|
|
|
|
// or failed
|
2020-07-19 12:21:03 +00:00
|
|
|
// this transaction state can be used to rollback the transaction.
|
2020-02-25 08:00:55 +00:00
|
|
|
type stageTransaction struct {
|
|
|
|
// isStagePathCreated represents whether the mount path to stage the volume on was created or not
|
|
|
|
isStagePathCreated bool
|
|
|
|
// isMounted represents if the volume was mounted or not
|
|
|
|
isMounted bool
|
|
|
|
// isEncrypted represents if the volume was encrypted or not
|
|
|
|
isEncrypted bool
|
2020-05-18 11:43:15 +00:00
|
|
|
// devicePath represents the path where rbd device is mapped
|
|
|
|
devicePath string
|
2020-02-25 08:00:55 +00:00
|
|
|
}
|
|
|
|
|
2020-07-21 11:55:10 +00:00
|
|
|
const (
|
2021-07-08 14:59:34 +00:00
|
|
|
// values for xfsHasReflink.
|
2020-07-21 11:55:10 +00:00
|
|
|
xfsReflinkUnset int = iota
|
|
|
|
xfsReflinkNoSupport
|
|
|
|
xfsReflinkSupport
|
2021-10-29 06:56:29 +00:00
|
|
|
|
2021-10-05 10:47:11 +00:00
|
|
|
staticVol = "staticVolume"
|
|
|
|
volHealerCtx = "volumeHealerContext"
|
|
|
|
tryOtherMounters = "tryOtherMounters"
|
2020-07-21 11:55:10 +00:00
|
|
|
)
|
|
|
|
|
2020-06-24 08:12:12 +00:00
|
|
|
var (
|
|
|
|
kernelRelease = ""
|
|
|
|
// deepFlattenSupport holds the list of kernel which support mapping rbd
|
|
|
|
// image with deep-flatten image feature
|
2020-07-21 05:10:13 +00:00
|
|
|
// nolint:gomnd // numbers specify Kernel versions.
|
2020-06-24 08:12:12 +00:00
|
|
|
deepFlattenSupport = []util.KernelVersion{
|
|
|
|
{
|
|
|
|
Version: 5,
|
2020-07-06 05:21:50 +00:00
|
|
|
PatchLevel: 1,
|
2020-06-24 08:12:12 +00:00
|
|
|
SubLevel: 0,
|
|
|
|
ExtraVersion: 0,
|
|
|
|
Distribution: "",
|
|
|
|
Backport: false,
|
2020-07-06 05:21:50 +00:00
|
|
|
}, // standard 5.1+ versions
|
2020-07-06 05:28:58 +00:00
|
|
|
{
|
|
|
|
Version: 4,
|
|
|
|
PatchLevel: 18,
|
|
|
|
SubLevel: 0,
|
|
|
|
ExtraVersion: 193,
|
|
|
|
Distribution: ".el8",
|
|
|
|
Backport: true,
|
|
|
|
}, // RHEL 8.2
|
2020-06-24 08:12:12 +00:00
|
|
|
}
|
2020-07-21 11:55:10 +00:00
|
|
|
|
|
|
|
// xfsHasReflink is set by xfsSupportsReflink(), use the function when
|
2021-07-08 14:59:34 +00:00
|
|
|
// checking the support for reflink.
|
2020-07-21 11:55:10 +00:00
|
|
|
xfsHasReflink = xfsReflinkUnset
|
2020-06-24 08:12:12 +00:00
|
|
|
)
|
|
|
|
|
2021-10-29 06:56:29 +00:00
|
|
|
// parseBoolOption checks if parameters contain option and parse it. If it is
|
|
|
|
// empty or not set return default.
|
|
|
|
// nolint:unparam // currently defValue is always false, this can change in the future
|
|
|
|
func parseBoolOption(ctx context.Context, parameters map[string]string, optionName string, defValue bool) bool {
|
|
|
|
boolVal := defValue
|
2021-06-03 11:40:16 +00:00
|
|
|
|
2021-10-29 06:56:29 +00:00
|
|
|
if val, ok := parameters[optionName]; ok {
|
|
|
|
var err error
|
|
|
|
if boolVal, err = strconv.ParseBool(val); err != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to parse value of %q: %q", optionName, val)
|
2021-06-03 11:40:16 +00:00
|
|
|
}
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-10-29 06:56:29 +00:00
|
|
|
return boolVal
|
2021-06-03 11:40:16 +00:00
|
|
|
}
|
|
|
|
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
// healerStageTransaction attempts to attach the rbd Image with previously
|
|
|
|
// updated device path at stashFile.
|
|
|
|
func healerStageTransaction(ctx context.Context, cr *util.Credentials, volOps *rbdVolume, metaDataPath string) error {
|
|
|
|
imgInfo, err := lookupRBDImageMetadataStash(metaDataPath)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to find image metadata, at stagingPath: %s, err: %v", metaDataPath, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if imgInfo.DevicePath == "" {
|
|
|
|
return fmt.Errorf("device is empty in image metadata, at stagingPath: %s", metaDataPath)
|
|
|
|
}
|
|
|
|
var devicePath string
|
|
|
|
devicePath, err = attachRBDImage(ctx, volOps, imgInfo.DevicePath, cr)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "rbd volID: %s was successfully attached to device: %s", volOps.VolID, devicePath)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-09-02 08:20:26 +00:00
|
|
|
// populateRbdVol update the fields in rbdVolume struct based on the request it received.
|
2021-10-25 09:18:43 +00:00
|
|
|
// this function also receive the credentials and secrets args as it differs in its data.
|
|
|
|
// The credentials are used directly by functions like voljournal.Connect() and other functions
|
|
|
|
// like genVolFromVolumeOptions() make use of secrets.
|
2021-09-02 08:20:26 +00:00
|
|
|
func populateRbdVol(
|
2021-06-25 11:39:42 +00:00
|
|
|
ctx context.Context,
|
2021-09-02 08:20:26 +00:00
|
|
|
req *csi.NodeStageVolumeRequest,
|
2021-10-25 09:18:43 +00:00
|
|
|
cr *util.Credentials,
|
|
|
|
secrets map[string]string) (*rbdVolume, error) {
|
2021-09-02 08:20:26 +00:00
|
|
|
var err error
|
|
|
|
var j *journal.Connection
|
|
|
|
volID := req.GetVolumeId()
|
2019-07-03 10:02:36 +00:00
|
|
|
isBlock := req.GetVolumeCapability().GetBlock() != nil
|
|
|
|
disableInUseChecks := false
|
|
|
|
// MULTI_NODE_MULTI_WRITER is supported by default for Block access type volumes
|
|
|
|
if req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER {
|
2020-02-24 13:19:42 +00:00
|
|
|
if !isBlock {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.WarningLog(
|
2021-06-25 11:39:42 +00:00
|
|
|
ctx,
|
|
|
|
"MULTI_NODE_MULTI_WRITER currently only supported with volumes of access type `block`,"+
|
|
|
|
"invalid AccessMode for volume: %v",
|
|
|
|
req.GetVolumeId(),
|
|
|
|
)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-06-25 11:39:42 +00:00
|
|
|
return nil, status.Error(
|
|
|
|
codes.InvalidArgument,
|
|
|
|
"rbd: RWX access mode request is only valid for volumes with access type `block`",
|
|
|
|
)
|
2019-07-03 10:02:36 +00:00
|
|
|
}
|
2020-02-24 13:19:42 +00:00
|
|
|
|
|
|
|
disableInUseChecks = true
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
|
|
|
|
2021-10-25 09:18:43 +00:00
|
|
|
rv, err := genVolFromVolumeOptions(ctx, req.GetVolumeContext(), secrets, disableInUseChecks, true)
|
2021-09-02 08:20:26 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
rv.ThickProvision = isThickProvisionRequest(req.GetVolumeContext())
|
2021-10-29 06:56:29 +00:00
|
|
|
isStaticVol := parseBoolOption(ctx, req.GetVolumeContext(), staticVol, false)
|
2021-09-02 08:20:26 +00:00
|
|
|
// get rbd image name from the volume journal
|
|
|
|
// for static volumes, the image name is actually the volume ID itself
|
|
|
|
if isStaticVol {
|
2021-09-16 08:26:06 +00:00
|
|
|
if req.GetVolumeContext()[intreeMigrationKey] == intreeMigrationLabel {
|
|
|
|
// if migration static volume, use imageName as volID
|
|
|
|
volID = req.GetVolumeContext()["imageName"]
|
|
|
|
}
|
2021-09-02 08:20:26 +00:00
|
|
|
rv.RbdImageName = volID
|
|
|
|
} else {
|
|
|
|
var vi util.CSIIdentifier
|
|
|
|
var imageAttributes *journal.ImageAttributes
|
|
|
|
err = vi.DecomposeCSIID(volID)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error decoding volume ID (%s): %w", volID, err)
|
|
|
|
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
j, err = volJournal.Connect(rv.Monitors, rv.RadosNamespace, cr)
|
|
|
|
if err != nil {
|
|
|
|
log.ErrorLog(ctx, "failed to establish cluster connection: %v", err)
|
|
|
|
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
defer j.Destroy()
|
|
|
|
|
|
|
|
imageAttributes, err = j.GetImageAttributes(
|
|
|
|
ctx, rv.Pool, vi.ObjectUUID, false)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("error fetching image attributes for volume ID (%s): %w", volID, err)
|
|
|
|
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
rv.RbdImageName = imageAttributes.ImageName
|
|
|
|
}
|
|
|
|
|
2021-11-14 09:03:01 +00:00
|
|
|
if req.GetVolumeContext()["mounter"] == rbdDefaultMounter &&
|
|
|
|
!isKrbdFeatureSupported(ctx, req.GetVolumeContext()["imageFeatures"]) {
|
|
|
|
if !parseBoolOption(ctx, req.GetVolumeContext(), tryOtherMounters, false) {
|
2021-10-05 10:47:11 +00:00
|
|
|
log.ErrorLog(ctx, "unsupported krbd Feature, set `tryOtherMounters:true` or fix krbd driver")
|
|
|
|
|
|
|
|
return nil, status.Errorf(codes.Internal, "unsupported krbd Feature")
|
|
|
|
}
|
2021-10-05 09:29:07 +00:00
|
|
|
// fallback to rbd-nbd,
|
|
|
|
rv.Mounter = rbdNbdMounter
|
2021-11-14 09:03:01 +00:00
|
|
|
} else {
|
|
|
|
rv.Mounter = req.GetVolumeContext()["mounter"]
|
rbd: provide a way to supply mounter specific mapOptions from sc
Uses the below schema to supply mounter specific map/unmapOptions to the
nodeplugin based on the discussion we all had at
https://github.com/ceph/ceph-csi/pull/2636
This should specifically be really helpful with the `tryOthermonters`
set to true, i.e with fallback mechanism settings turned ON.
mapOption: "kbrd:v1,v2,v3;nbd:v1,v2,v3"
- By omitting `krbd:` or `nbd:`, the option(s) apply to
rbdDefaultMounter which is krbd.
- A user can _override_ the options for a mounter by specifying `krbd:`
or `nbd:`.
mapOption: "v1,v2,v3;nbd:v1,v2,v3"
is effectively the same as the 1st example.
- Sections are split by `;`.
- If users want to specify common options for both `krbd` and `nbd`,
they should mention them twice.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-11-16 13:10:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err = getMapOptions(req, rv)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2021-10-05 09:29:07 +00:00
|
|
|
}
|
|
|
|
|
2021-09-02 08:20:26 +00:00
|
|
|
rv.VolID = volID
|
2021-10-05 09:29:07 +00:00
|
|
|
|
2021-09-02 08:20:26 +00:00
|
|
|
rv.LogDir = req.GetVolumeContext()["cephLogDir"]
|
|
|
|
if rv.LogDir == "" {
|
|
|
|
rv.LogDir = defaultLogDir
|
|
|
|
}
|
2021-09-01 11:53:43 +00:00
|
|
|
rv.LogStrategy = req.GetVolumeContext()["cephLogStrategy"]
|
|
|
|
if rv.LogStrategy == "" {
|
|
|
|
rv.LogStrategy = defaultLogStrategy
|
|
|
|
}
|
2021-09-02 08:20:26 +00:00
|
|
|
|
|
|
|
return rv, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// NodeStageVolume mounts the volume to a staging path on the node.
|
|
|
|
// Implementation notes:
|
|
|
|
// - stagingTargetPath is the directory passed in the request where the volume needs to be staged
|
|
|
|
// - We stage the volume into a directory, named after the VolumeID inside stagingTargetPath if
|
|
|
|
// it is a file system
|
|
|
|
// - We stage the volume into a file, named after the VolumeID inside stagingTargetPath if it is
|
|
|
|
// a block volume
|
|
|
|
// - Order of operation execution: (useful for defer stacking and when Unstaging to ensure steps
|
|
|
|
// are done in reverse, this is done in undoStagingTransaction)
|
|
|
|
// - Stash image metadata under staging path
|
|
|
|
// - Map the image (creates a device)
|
|
|
|
// - Create the staging file/directory under staging path
|
|
|
|
// - Stage the device (mount the device mapped for image)
|
|
|
|
func (ns *NodeServer) NodeStageVolume(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
2021-10-25 09:18:43 +00:00
|
|
|
var err error
|
|
|
|
if err = util.ValidateNodeStageVolumeRequest(req); err != nil {
|
2021-09-02 08:20:26 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-07-03 10:02:36 +00:00
|
|
|
volID := req.GetVolumeId()
|
2021-10-25 09:18:43 +00:00
|
|
|
secrets := req.GetSecrets()
|
|
|
|
if util.IsMigrationSecret(secrets) {
|
|
|
|
secrets, err = util.ParseAndSetSecretMapFromMigSecret(secrets)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cr, err := util.NewUserCredentials(secrets)
|
2019-06-01 21:26:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2019-06-25 19:29:17 +00:00
|
|
|
defer cr.DeleteCredentials()
|
2019-06-01 21:26:42 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
|
|
|
|
}
|
|
|
|
defer ns.VolumeLocks.Release(volID)
|
|
|
|
|
2019-08-03 22:11:28 +00:00
|
|
|
stagingParentPath := req.GetStagingTargetPath()
|
2019-09-12 04:53:37 +00:00
|
|
|
stagingTargetPath := stagingParentPath + "/" + volID
|
2019-07-03 10:02:36 +00:00
|
|
|
|
2021-10-29 06:56:29 +00:00
|
|
|
isHealer := parseBoolOption(ctx, req.GetVolumeContext(), volHealerCtx, false)
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
if !isHealer {
|
|
|
|
var isNotMnt bool
|
|
|
|
// check if stagingPath is already mounted
|
2021-12-09 08:04:48 +00:00
|
|
|
isNotMnt, err = isNotMountPoint(ns.Mounter, stagingTargetPath)
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
} else if !isNotMnt {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "rbd: volume %s is already mounted to %s, skipping", volID, stagingTargetPath)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
return &csi.NodeStageVolumeResponse{}, nil
|
|
|
|
}
|
2019-03-14 00:18:04 +00:00
|
|
|
}
|
|
|
|
|
2021-10-29 06:56:29 +00:00
|
|
|
isStaticVol := parseBoolOption(ctx, req.GetVolumeContext(), staticVol, false)
|
2021-06-03 11:40:16 +00:00
|
|
|
|
2021-06-07 07:31:38 +00:00
|
|
|
// throw error when imageFeatures parameter is missing or empty
|
|
|
|
// for backward compatibility, ignore error for non-static volumes from older cephcsi version
|
2021-06-03 11:40:16 +00:00
|
|
|
if imageFeatures, ok := req.GetVolumeContext()["imageFeatures"]; checkImageFeatures(imageFeatures, ok, isStaticVol) {
|
2021-06-07 07:31:38 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, "missing required parameter imageFeatures")
|
|
|
|
}
|
|
|
|
|
2021-10-25 09:18:43 +00:00
|
|
|
rv, err := populateRbdVol(ctx, req, cr, secrets)
|
Move locks to more granular locking than CPU count based
As detailed in issue #279, current lock scheme has hash
buckets that are count of CPUs. This causes a lot of contention
when parallel requests are made to the CSI plugin. To reduce
lock contention, this commit introduces granular locks per
identifier.
The commit also changes the timeout for gRPC requests to Create
and Delete volumes, as the current timeout is 10s (kubernetes
documentation says 15s but code defaults are 10s). A virtual
setup takes about 12-15s to complete a request at times, that leads
to unwanted retries of the same request, hence the increased
timeout to enable operation completion with minimal retries.
Tests to create PVCs before and after these changes look like so,
Before:
Default master code + sidecar provisioner --timeout option set
to 30 seconds
20 PVCs
Creation: 3 runs, 396/391/400 seconds
Deletion: 3 runs, 218/271/118 seconds
- Once was stalled for more than 8 minutes and cancelled the run
After:
Current commit + sidecar provisioner --timeout option set to 30 sec
20 PVCs
Creation: 3 runs, 42/59/65 seconds
Deletion: 3 runs, 32/32/31 seconds
Fixes: #279
Signed-off-by: ShyamsundarR <srangana@redhat.com>
2019-06-22 16:43:28 +00:00
|
|
|
if err != nil {
|
2021-09-02 08:20:26 +00:00
|
|
|
return nil, err
|
2021-08-20 01:06:35 +00:00
|
|
|
}
|
2020-09-08 05:23:28 +00:00
|
|
|
|
2021-09-02 08:20:26 +00:00
|
|
|
err = rv.Connect(cr)
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
if err != nil {
|
2021-09-02 08:20:26 +00:00
|
|
|
log.ErrorLog(ctx, "failed to connect to volume %s: %v", rv, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2021-09-02 08:20:26 +00:00
|
|
|
defer rv.Destroy()
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
|
|
|
|
if isHealer {
|
2021-09-02 08:20:26 +00:00
|
|
|
err = healerStageTransaction(ctx, cr, rv, stagingParentPath)
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
return &csi.NodeStageVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2019-08-03 22:11:28 +00:00
|
|
|
// Stash image details prior to mapping the image (useful during Unstage as it has no
|
|
|
|
// voloptions passed to the RPC as per the CSI spec)
|
2021-09-02 08:20:26 +00:00
|
|
|
err = stashRBDImageMetadata(rv, stagingParentPath)
|
2018-01-09 18:59:50 +00:00
|
|
|
if err != nil {
|
2019-07-03 10:02:36 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2021-11-03 13:18:20 +00:00
|
|
|
|
|
|
|
// perform the actual staging and if this fails, have undoStagingTransaction
|
|
|
|
// cleans up for us
|
|
|
|
txn, err := ns.stageTransaction(ctx, req, cr, rv, isStaticVol)
|
2019-07-24 13:18:23 +00:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2021-11-03 13:18:20 +00:00
|
|
|
ns.undoStagingTransaction(ctx, req, txn, rv)
|
2019-07-24 13:18:23 +00:00
|
|
|
}
|
|
|
|
}()
|
2020-02-24 13:19:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(
|
2021-06-25 11:39:42 +00:00
|
|
|
ctx,
|
|
|
|
"rbd: successfully mounted volume %s to stagingTargetPath %s",
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
volID,
|
2021-06-25 11:39:42 +00:00
|
|
|
stagingTargetPath)
|
2020-02-24 13:19:42 +00:00
|
|
|
|
|
|
|
return &csi.NodeStageVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 11:39:42 +00:00
|
|
|
func (ns *NodeServer) stageTransaction(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.NodeStageVolumeRequest,
|
2021-10-25 09:18:43 +00:00
|
|
|
cr *util.Credentials,
|
2021-06-25 11:39:42 +00:00
|
|
|
volOptions *rbdVolume,
|
2021-11-03 13:18:20 +00:00
|
|
|
staticVol bool) (*stageTransaction, error) {
|
|
|
|
transaction := &stageTransaction{}
|
2020-02-24 13:19:42 +00:00
|
|
|
|
|
|
|
var err error
|
2020-04-16 14:47:43 +00:00
|
|
|
var readOnly bool
|
2020-06-24 08:12:12 +00:00
|
|
|
|
2020-04-16 14:47:43 +00:00
|
|
|
// Allow image to be mounted on multiple nodes if it is ROX
|
|
|
|
if req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ExtendedLog(ctx, "setting disableInUseChecks on rbd volume to: %v", req.GetVolumeId)
|
2020-04-16 14:47:43 +00:00
|
|
|
volOptions.DisableInUseChecks = true
|
|
|
|
volOptions.readOnly = true
|
|
|
|
}
|
|
|
|
|
2021-10-05 09:53:08 +00:00
|
|
|
err = flattenImageBeforeMapping(ctx, volOptions, cr)
|
|
|
|
if err != nil {
|
|
|
|
return transaction, err
|
2020-06-24 08:12:12 +00:00
|
|
|
}
|
2021-10-05 09:53:08 +00:00
|
|
|
|
2019-08-03 22:11:28 +00:00
|
|
|
// Mapping RBD image
|
2020-02-24 13:19:42 +00:00
|
|
|
var devicePath string
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
devicePath, err = attachRBDImage(ctx, volOptions, devicePath, cr)
|
2019-08-03 22:11:28 +00:00
|
|
|
if err != nil {
|
2020-02-25 08:00:55 +00:00
|
|
|
return transaction, err
|
2019-08-03 22:11:28 +00:00
|
|
|
}
|
2020-05-18 11:43:15 +00:00
|
|
|
transaction.devicePath = devicePath
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
|
2021-11-08 09:58:04 +00:00
|
|
|
log.DebugLog(ctx, "rbd image: %s was successfully mapped at %s\n",
|
|
|
|
volOptions, devicePath)
|
2019-12-13 11:41:32 +00:00
|
|
|
|
2021-05-31 11:09:36 +00:00
|
|
|
// userspace mounters like nbd need the device path as a reference while
|
|
|
|
// restarting the userspace processes on a nodeplugin restart. For kernel
|
|
|
|
// mounter(krbd) we don't need it as there won't be any process running
|
|
|
|
// in userspace, hence we don't store the device path for krbd devices.
|
|
|
|
if volOptions.Mounter == rbdNbdMounter {
|
|
|
|
err = updateRBDImageMetadataStash(req.GetStagingTargetPath(), devicePath)
|
|
|
|
if err != nil {
|
|
|
|
return transaction, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-22 15:25:35 +00:00
|
|
|
if volOptions.isEncrypted() {
|
2020-01-09 10:31:07 +00:00
|
|
|
devicePath, err = ns.processEncryptedDevice(ctx, volOptions, devicePath)
|
2019-12-13 11:41:32 +00:00
|
|
|
if err != nil {
|
2020-02-25 08:00:55 +00:00
|
|
|
return transaction, err
|
2019-12-13 11:41:32 +00:00
|
|
|
}
|
2020-02-25 08:00:55 +00:00
|
|
|
transaction.isEncrypted = true
|
2019-12-13 11:41:32 +00:00
|
|
|
}
|
2019-08-03 22:11:28 +00:00
|
|
|
|
2020-02-24 13:19:42 +00:00
|
|
|
stagingTargetPath := getStagingTargetPath(req)
|
|
|
|
|
|
|
|
isBlock := req.GetVolumeCapability().GetBlock() != nil
|
2019-08-22 16:57:23 +00:00
|
|
|
err = ns.createStageMountPoint(ctx, stagingTargetPath, isBlock)
|
2019-07-25 09:01:10 +00:00
|
|
|
if err != nil {
|
2020-02-25 08:00:55 +00:00
|
|
|
return transaction, err
|
2019-07-25 09:01:10 +00:00
|
|
|
}
|
2020-02-24 13:19:42 +00:00
|
|
|
|
2020-02-25 08:00:55 +00:00
|
|
|
transaction.isStagePathCreated = true
|
2019-07-25 09:01:10 +00:00
|
|
|
|
2019-07-03 10:02:36 +00:00
|
|
|
// nodeStage Path
|
2020-04-16 14:47:43 +00:00
|
|
|
readOnly, err = ns.mountVolumeToStagePath(ctx, req, staticVol, stagingTargetPath, devicePath)
|
2019-01-28 19:55:10 +00:00
|
|
|
if err != nil {
|
2020-02-25 08:00:55 +00:00
|
|
|
return transaction, err
|
2019-01-28 19:55:10 +00:00
|
|
|
}
|
2020-02-25 08:00:55 +00:00
|
|
|
transaction.isMounted = true
|
2019-07-25 09:01:10 +00:00
|
|
|
|
2021-10-05 10:01:35 +00:00
|
|
|
// resize if its fileSystemType static volume.
|
|
|
|
if staticVol && !isBlock {
|
|
|
|
var ok bool
|
|
|
|
resizer := mount.NewResizeFs(utilexec.New())
|
|
|
|
ok, err = resizer.NeedResize(devicePath, stagingTargetPath)
|
|
|
|
if err != nil {
|
|
|
|
return transaction, status.Errorf(codes.Internal,
|
2021-11-08 10:00:09 +00:00
|
|
|
"need resize check failed on devicePath %s and staingPath %s, error: %v",
|
2021-10-05 10:01:35 +00:00
|
|
|
devicePath,
|
|
|
|
stagingTargetPath,
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
if ok {
|
|
|
|
ok, err = resizer.Resize(devicePath, stagingTargetPath)
|
|
|
|
if !ok {
|
|
|
|
return transaction, status.Errorf(codes.Internal,
|
|
|
|
"resize failed on path %s, error: %v", stagingTargetPath, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-04-16 14:47:43 +00:00
|
|
|
if !readOnly {
|
|
|
|
// #nosec - allow anyone to write inside the target path
|
2021-07-13 12:21:05 +00:00
|
|
|
err = os.Chmod(stagingTargetPath, 0o777)
|
2020-04-16 14:47:43 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-02-25 08:00:55 +00:00
|
|
|
return transaction, err
|
2019-07-03 10:02:36 +00:00
|
|
|
}
|
|
|
|
|
2021-10-05 09:53:08 +00:00
|
|
|
func flattenImageBeforeMapping(
|
|
|
|
ctx context.Context,
|
|
|
|
volOptions *rbdVolume,
|
|
|
|
cr *util.Credentials) error {
|
|
|
|
var err error
|
|
|
|
var feature bool
|
|
|
|
var depth uint
|
|
|
|
|
|
|
|
if kernelRelease == "" {
|
|
|
|
// fetch the current running kernel info
|
|
|
|
kernelRelease, err = util.GetKernelVersion()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !util.CheckKernelSupport(kernelRelease, deepFlattenSupport) && !skipForceFlatten {
|
|
|
|
feature, err = volOptions.checkImageChainHasFeature(ctx, librbd.FeatureDeepFlatten)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
depth, err = volOptions.getCloneDepth(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if feature || depth != 0 {
|
|
|
|
err = volOptions.flattenRbdImage(ctx, cr, true, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 11:39:42 +00:00
|
|
|
func (ns *NodeServer) undoStagingTransaction(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.NodeStageVolumeRequest,
|
2021-11-03 13:18:20 +00:00
|
|
|
transaction *stageTransaction,
|
2021-06-25 11:39:42 +00:00
|
|
|
volOptions *rbdVolume) {
|
2019-07-24 13:18:23 +00:00
|
|
|
var err error
|
2019-08-03 22:11:28 +00:00
|
|
|
|
2020-02-24 13:19:42 +00:00
|
|
|
stagingTargetPath := getStagingTargetPath(req)
|
2020-02-25 08:00:55 +00:00
|
|
|
if transaction.isMounted {
|
2021-12-09 08:04:48 +00:00
|
|
|
err = ns.Mounter.Unmount(stagingTargetPath)
|
2019-07-24 13:18:23 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to unmount stagingtargetPath: %s with error: %v", stagingTargetPath, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-08-03 22:11:28 +00:00
|
|
|
return
|
2019-07-24 13:18:23 +00:00
|
|
|
}
|
|
|
|
}
|
2019-08-03 22:11:28 +00:00
|
|
|
|
|
|
|
// remove the file/directory created on staging path
|
2020-02-25 08:00:55 +00:00
|
|
|
if transaction.isStagePathCreated {
|
2019-07-24 13:18:23 +00:00
|
|
|
err = os.Remove(stagingTargetPath)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to remove stagingtargetPath: %s with error: %v", stagingTargetPath, err)
|
2021-06-25 11:39:42 +00:00
|
|
|
// continue on failure to unmap the image, as leaving stale images causes more issues than a stale
|
|
|
|
// file/directory
|
2019-07-24 13:18:23 +00:00
|
|
|
}
|
|
|
|
}
|
2019-08-03 22:11:28 +00:00
|
|
|
|
2020-02-24 13:19:42 +00:00
|
|
|
volID := req.GetVolumeId()
|
|
|
|
|
2019-07-24 13:18:23 +00:00
|
|
|
// Unmapping rbd device
|
2020-05-18 11:43:15 +00:00
|
|
|
if transaction.devicePath != "" {
|
2020-09-08 05:23:28 +00:00
|
|
|
err = detachRBDDevice(ctx, transaction.devicePath, volID, volOptions.UnmapOptions, transaction.isEncrypted)
|
2019-08-03 22:11:28 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(
|
2021-06-25 11:39:42 +00:00
|
|
|
ctx,
|
|
|
|
"failed to unmap rbd device: %s for volume %s with error: %v",
|
|
|
|
transaction.devicePath,
|
|
|
|
volID,
|
|
|
|
err)
|
|
|
|
// continue on failure to delete the stash file, as kubernetes will fail to delete the staging path
|
|
|
|
// otherwise
|
2019-08-03 22:11:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cleanup the stashed image metadata
|
2020-02-24 13:19:42 +00:00
|
|
|
if err = cleanupRBDImageMetadataStash(req.GetStagingTargetPath()); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to cleanup image metadata stash (%v)", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-08-03 22:11:28 +00:00
|
|
|
return
|
2019-07-24 13:18:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-22 16:57:23 +00:00
|
|
|
func (ns *NodeServer) createStageMountPoint(ctx context.Context, mountPath string, isBlock bool) error {
|
2019-07-25 09:01:10 +00:00
|
|
|
if isBlock {
|
2021-05-05 15:41:25 +00:00
|
|
|
// #nosec:G304, intentionally creating file mountPath, not a security issue
|
2021-07-13 12:21:05 +00:00
|
|
|
pathFile, err := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0o600)
|
2019-07-25 09:01:10 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to create mountPath:%s with error: %v", mountPath, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-07-25 09:01:10 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
if err = pathFile.Close(); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to close mountPath:%s with error: %v", mountPath, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-07-25 09:01:10 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2019-07-31 16:24:19 +00:00
|
|
|
|
|
|
|
return nil
|
2019-07-25 09:01:10 +00:00
|
|
|
}
|
2019-07-31 16:24:19 +00:00
|
|
|
|
2021-07-13 12:21:05 +00:00
|
|
|
err := os.Mkdir(mountPath, 0o750)
|
2019-07-31 16:24:19 +00:00
|
|
|
if err != nil {
|
2019-08-03 22:11:28 +00:00
|
|
|
if !os.IsExist(err) {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to create mountPath:%s with error: %v", mountPath, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-08-03 22:11:28 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2019-07-31 16:24:19 +00:00
|
|
|
}
|
|
|
|
|
2019-07-25 09:01:10 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-07-03 10:02:36 +00:00
|
|
|
// NodePublishVolume mounts the volume mounted to the device path to the target
|
2020-07-19 12:21:03 +00:00
|
|
|
// path.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (ns *NodeServer) NodePublishVolume(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
|
2019-07-03 10:02:36 +00:00
|
|
|
err := util.ValidateNodePublishVolumeRequest(req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
targetPath := req.GetTargetPath()
|
|
|
|
isBlock := req.GetVolumeCapability().GetBlock() != nil
|
|
|
|
stagingPath := req.GetStagingTargetPath()
|
2019-09-12 04:53:37 +00:00
|
|
|
volID := req.GetVolumeId()
|
|
|
|
stagingPath += "/" + volID
|
2019-07-03 10:02:36 +00:00
|
|
|
|
2021-07-13 05:27:20 +00:00
|
|
|
// Considering kubelet make sure the stage and publish operations
|
|
|
|
// are serialized, we dont need any extra locking in nodePublish
|
2019-07-03 10:02:36 +00:00
|
|
|
|
|
|
|
// Check if that target path exists properly
|
2019-08-09 17:11:21 +00:00
|
|
|
notMnt, err := ns.createTargetMountPath(ctx, targetPath, isBlock)
|
2019-07-03 10:02:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !notMnt {
|
|
|
|
return &csi.NodePublishVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Publish Path
|
2019-08-09 17:11:21 +00:00
|
|
|
err = ns.mountVolume(ctx, stagingPath, req)
|
2019-07-03 10:02:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "rbd: successfully mounted stagingPath %s to targetPath %s", stagingPath, targetPath)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-01-28 19:55:10 +00:00
|
|
|
return &csi.NodePublishVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 11:39:42 +00:00
|
|
|
func (ns *NodeServer) mountVolumeToStagePath(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.NodeStageVolumeRequest,
|
|
|
|
staticVol bool,
|
|
|
|
stagingPath, devicePath string) (bool, error) {
|
2020-04-16 14:47:43 +00:00
|
|
|
readOnly := false
|
2019-07-03 10:02:36 +00:00
|
|
|
fsType := req.GetVolumeCapability().GetMount().GetFsType()
|
2021-12-09 08:04:48 +00:00
|
|
|
diskMounter := &mount.SafeFormatAndMount{Interface: ns.Mounter, Exec: utilexec.New()}
|
2019-10-03 16:54:02 +00:00
|
|
|
// rbd images are thin-provisioned and return zeros for unwritten areas. A freshly created
|
|
|
|
// image will not benefit from discard and we also want to avoid as much unnecessary zeroing
|
|
|
|
// as possible. Open-code mkfs here because FormatAndMount() doesn't accept custom mkfs
|
|
|
|
// options.
|
|
|
|
//
|
|
|
|
// Note that "freshly" is very important here. While discard is more of a nice to have,
|
|
|
|
// lazy_journal_init=1 is plain unsafe if the image has been written to before and hasn't
|
|
|
|
// been zeroed afterwards (unlike the name suggests, it leaves the journal completely
|
|
|
|
// uninitialized and carries a risk until the journal is overwritten and wraps around for
|
|
|
|
// the first time).
|
2019-09-19 17:11:32 +00:00
|
|
|
existingFormat, err := diskMounter.GetDiskFormat(devicePath)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get disk format for path %s, error: %v", devicePath, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-04-16 14:47:43 +00:00
|
|
|
return readOnly, err
|
|
|
|
}
|
|
|
|
|
|
|
|
opt := []string{"_netdev"}
|
|
|
|
opt = csicommon.ConstructMountOptions(opt, req.GetVolumeCapability())
|
|
|
|
isBlock := req.GetVolumeCapability().GetBlock() != nil
|
|
|
|
rOnly := "ro"
|
|
|
|
|
|
|
|
if req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY ||
|
|
|
|
req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY {
|
|
|
|
if !csicommon.MountOptionContains(opt, rOnly) {
|
|
|
|
opt = append(opt, rOnly)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if csicommon.MountOptionContains(opt, rOnly) {
|
|
|
|
readOnly = true
|
|
|
|
}
|
|
|
|
|
|
|
|
if fsType == "xfs" {
|
|
|
|
opt = append(opt, "nouuid")
|
2019-09-19 17:11:32 +00:00
|
|
|
}
|
2020-01-20 05:03:42 +00:00
|
|
|
|
2020-04-16 14:47:43 +00:00
|
|
|
if existingFormat == "" && !staticVol && !readOnly {
|
2019-09-19 17:11:32 +00:00
|
|
|
args := []string{}
|
2021-04-05 04:40:00 +00:00
|
|
|
switch fsType {
|
|
|
|
case "ext4":
|
2019-10-03 16:54:02 +00:00
|
|
|
args = []string{"-m0", "-Enodiscard,lazy_itable_init=1,lazy_journal_init=1", devicePath}
|
2021-04-05 04:40:00 +00:00
|
|
|
case "xfs":
|
2019-09-19 17:11:32 +00:00
|
|
|
args = []string{"-K", devicePath}
|
2020-07-21 11:55:10 +00:00
|
|
|
// always disable reflink
|
|
|
|
// TODO: make enabling an option, see ceph/ceph-csi#1256
|
|
|
|
if ns.xfsSupportsReflink() {
|
|
|
|
args = append(args, "-m", "reflink=0")
|
|
|
|
}
|
2019-09-19 17:11:32 +00:00
|
|
|
}
|
2019-10-03 16:54:02 +00:00
|
|
|
if len(args) > 0 {
|
2020-01-16 14:22:48 +00:00
|
|
|
cmdOut, cmdErr := diskMounter.Exec.Command("mkfs."+fsType, args...).CombinedOutput()
|
|
|
|
if cmdErr != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to run mkfs error: %v, output: %v", cmdErr, string(cmdOut))
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-04-16 14:47:43 +00:00
|
|
|
return readOnly, cmdErr
|
2019-10-03 16:54:02 +00:00
|
|
|
}
|
2019-09-19 17:11:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-03 10:02:36 +00:00
|
|
|
if isBlock {
|
|
|
|
opt = append(opt, "bind")
|
|
|
|
err = diskMounter.Mount(devicePath, stagingPath, fsType, opt)
|
|
|
|
} else {
|
|
|
|
err = diskMounter.FormatAndMount(devicePath, stagingPath, fsType, opt)
|
|
|
|
}
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx,
|
2020-05-19 12:29:31 +00:00
|
|
|
"failed to mount device path (%s) to staging path (%s) for volume "+
|
2020-08-19 10:45:28 +00:00
|
|
|
"(%s) error: %s Check dmesg logs if required.",
|
2020-05-19 12:29:31 +00:00
|
|
|
devicePath,
|
|
|
|
stagingPath,
|
|
|
|
req.GetVolumeId(),
|
|
|
|
err)
|
2019-07-03 10:02:36 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-04-16 14:47:43 +00:00
|
|
|
return readOnly, err
|
2019-07-03 10:02:36 +00:00
|
|
|
}
|
|
|
|
|
2019-08-09 17:11:21 +00:00
|
|
|
func (ns *NodeServer) mountVolume(ctx context.Context, stagingPath string, req *csi.NodePublishVolumeRequest) error {
|
2018-11-01 01:03:03 +00:00
|
|
|
// Publish Path
|
|
|
|
fsType := req.GetVolumeCapability().GetMount().GetFsType()
|
2018-01-09 18:59:50 +00:00
|
|
|
readOnly := req.GetReadonly()
|
2020-01-23 08:24:46 +00:00
|
|
|
mountOptions := []string{"bind", "_netdev"}
|
2019-01-28 19:55:10 +00:00
|
|
|
isBlock := req.GetVolumeCapability().GetBlock() != nil
|
|
|
|
targetPath := req.GetTargetPath()
|
2020-01-23 08:24:46 +00:00
|
|
|
|
|
|
|
mountOptions = csicommon.ConstructMountOptions(mountOptions, req.GetVolumeCapability())
|
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "target %v\nisBlock %v\nfstype %v\nstagingPath %v\nreadonly %v\nmountflags %v\n",
|
2020-01-23 08:24:46 +00:00
|
|
|
targetPath, isBlock, fsType, stagingPath, readOnly, mountOptions)
|
|
|
|
|
2019-07-03 10:02:36 +00:00
|
|
|
if readOnly {
|
2020-01-23 08:24:46 +00:00
|
|
|
mountOptions = append(mountOptions, "ro")
|
2019-07-03 10:02:36 +00:00
|
|
|
}
|
2020-01-23 08:24:46 +00:00
|
|
|
if err := util.Mount(stagingPath, targetPath, fsType, mountOptions); err != nil {
|
2019-07-31 16:24:19 +00:00
|
|
|
return status.Error(codes.Internal, err.Error())
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2019-07-31 16:24:19 +00:00
|
|
|
|
2019-01-28 19:55:10 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-08-09 17:11:21 +00:00
|
|
|
func (ns *NodeServer) createTargetMountPath(ctx context.Context, mountPath string, isBlock bool) (bool, error) {
|
2019-07-03 10:02:36 +00:00
|
|
|
// Check if that mount path exists properly
|
2021-12-09 08:04:48 +00:00
|
|
|
notMnt, err := mount.IsNotMountPoint(ns.Mounter, mountPath)
|
2021-04-05 04:40:00 +00:00
|
|
|
if err == nil {
|
2021-07-07 11:41:23 +00:00
|
|
|
return notMnt, nil
|
2021-04-05 04:40:00 +00:00
|
|
|
}
|
|
|
|
if !os.IsNotExist(err) {
|
|
|
|
return false, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
if isBlock {
|
|
|
|
// #nosec
|
2021-07-13 12:21:05 +00:00
|
|
|
pathFile, e := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0o750)
|
2021-04-05 04:40:00 +00:00
|
|
|
if e != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "Failed to create mountPath:%s with error: %v", mountPath, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-04-05 04:40:00 +00:00
|
|
|
return notMnt, status.Error(codes.Internal, e.Error())
|
|
|
|
}
|
|
|
|
if err = pathFile.Close(); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "Failed to close mountPath:%s with error: %v", mountPath, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-04-05 04:40:00 +00:00
|
|
|
return notMnt, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Create a mountpath directory
|
|
|
|
if err = util.CreateMountPoint(mountPath); err != nil {
|
|
|
|
return notMnt, status.Error(codes.Internal, err.Error())
|
2019-01-28 19:55:10 +00:00
|
|
|
}
|
|
|
|
}
|
2021-04-05 04:40:00 +00:00
|
|
|
notMnt = true
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-01-28 19:55:10 +00:00
|
|
|
return notMnt, err
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// NodeUnpublishVolume unmounts the volume from the target path.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (ns *NodeServer) NodeUnpublishVolume(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
|
2019-07-03 10:02:36 +00:00
|
|
|
err := util.ValidateNodeUnpublishVolumeRequest(req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-04-22 21:35:39 +00:00
|
|
|
}
|
|
|
|
|
2019-07-03 10:02:36 +00:00
|
|
|
targetPath := req.GetTargetPath()
|
2021-07-13 05:21:56 +00:00
|
|
|
// considering kubelet make sure node operations like unpublish/unstage...etc can not be called
|
|
|
|
// at same time, an explicit locking at time of nodeunpublish is not required.
|
2021-12-09 08:04:48 +00:00
|
|
|
notMnt, err := mount.IsNotMountPoint(ns.Mounter, targetPath)
|
2018-11-15 02:06:42 +00:00
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// targetPath has already been deleted
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "targetPath: %s has already been deleted", targetPath)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2018-11-15 02:06:42 +00:00
|
|
|
return &csi.NodeUnpublishVolumeResponse{}, nil
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2018-11-15 02:06:42 +00:00
|
|
|
return nil, status.Error(codes.NotFound, err.Error())
|
|
|
|
}
|
2018-11-15 20:40:19 +00:00
|
|
|
if notMnt {
|
2019-07-03 10:02:36 +00:00
|
|
|
if err = os.RemoveAll(targetPath); err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-07-03 10:02:36 +00:00
|
|
|
return &csi.NodeUnpublishVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2021-12-09 08:04:48 +00:00
|
|
|
if err = ns.Mounter.Unmount(targetPath); err != nil {
|
2019-07-03 10:02:36 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = os.RemoveAll(targetPath); err != nil {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "rbd: successfully unbound volume %s from %s", req.GetVolumeId(), targetPath)
|
2019-07-03 10:02:36 +00:00
|
|
|
|
|
|
|
return &csi.NodeUnpublishVolumeResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2020-02-24 13:19:42 +00:00
|
|
|
// getStagingTargetPath concats either NodeStageVolumeRequest's or
|
2020-07-19 12:21:03 +00:00
|
|
|
// NodeUnstageVolumeRequest's target path with the volumeID.
|
2020-02-24 13:19:42 +00:00
|
|
|
func getStagingTargetPath(req interface{}) string {
|
|
|
|
switch vr := req.(type) {
|
|
|
|
case *csi.NodeStageVolumeRequest:
|
|
|
|
return vr.GetStagingTargetPath() + "/" + vr.GetVolumeId()
|
|
|
|
case *csi.NodeUnstageVolumeRequest:
|
|
|
|
return vr.GetStagingTargetPath() + "/" + vr.GetVolumeId()
|
|
|
|
}
|
|
|
|
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// NodeUnstageVolume unstages the volume from the staging path.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (ns *NodeServer) NodeUnstageVolume(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
|
2019-07-03 10:02:36 +00:00
|
|
|
var err error
|
|
|
|
if err = util.ValidateNodeUnstageVolumeRequest(req); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
volID := req.GetVolumeId()
|
|
|
|
|
|
|
|
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-09-12 04:53:37 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
|
|
|
|
}
|
|
|
|
defer ns.VolumeLocks.Release(volID)
|
|
|
|
|
2019-08-03 22:11:28 +00:00
|
|
|
stagingParentPath := req.GetStagingTargetPath()
|
2020-02-24 13:19:42 +00:00
|
|
|
stagingTargetPath := getStagingTargetPath(req)
|
2019-07-03 10:02:36 +00:00
|
|
|
|
2021-12-09 08:04:48 +00:00
|
|
|
notMnt, err := mount.IsNotMountPoint(ns.Mounter, stagingTargetPath)
|
2019-07-03 10:02:36 +00:00
|
|
|
if err != nil {
|
2019-08-03 22:11:28 +00:00
|
|
|
if !os.IsNotExist(err) {
|
|
|
|
return nil, status.Error(codes.NotFound, err.Error())
|
2019-07-03 10:02:36 +00:00
|
|
|
}
|
2019-08-03 22:11:28 +00:00
|
|
|
// Continue on ENOENT errors as we may still have the image mapped
|
|
|
|
notMnt = true
|
2018-11-15 20:40:19 +00:00
|
|
|
}
|
2019-08-03 22:11:28 +00:00
|
|
|
if !notMnt {
|
|
|
|
// Unmounting the image
|
2021-12-09 08:04:48 +00:00
|
|
|
err = ns.Mounter.Unmount(stagingTargetPath)
|
2019-08-03 22:11:28 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ExtendedLog(ctx, "failed to unmount targetPath: %s with error: %v", stagingTargetPath, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-07-31 16:24:19 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2019-07-03 10:02:36 +00:00
|
|
|
}
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "successfully unmounted volume (%s) from staging path (%s)",
|
2021-03-10 05:34:45 +00:00
|
|
|
req.GetVolumeId(), stagingTargetPath)
|
2019-01-29 05:49:16 +00:00
|
|
|
}
|
|
|
|
|
2019-07-31 16:24:19 +00:00
|
|
|
if err = os.Remove(stagingTargetPath); err != nil {
|
2019-08-03 22:11:28 +00:00
|
|
|
// Any error is critical as Staging path is expected to be empty by Kubernetes, it otherwise
|
|
|
|
// keeps invoking Unstage. Hence any errors removing files within this path is a critical
|
|
|
|
// error
|
|
|
|
if !os.IsNotExist(err) {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to remove staging target path (%s): (%v)", stagingTargetPath, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-08-03 22:11:28 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2018-11-15 20:40:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-03 22:11:28 +00:00
|
|
|
imgInfo, err := lookupRBDImageMetadataStash(stagingParentPath)
|
2018-01-09 18:59:50 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.UsefulLog(ctx, "failed to find image metadata: %v", err)
|
2019-08-03 22:11:28 +00:00
|
|
|
// It is an error if it was mounted, as we should have found the image metadata file with
|
|
|
|
// no errors
|
|
|
|
if !notMnt {
|
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
2018-02-20 16:10:59 +00:00
|
|
|
|
2019-08-03 22:11:28 +00:00
|
|
|
// If not mounted, and error is anything other than metadata file missing, it is an error
|
2020-07-10 01:05:42 +00:00
|
|
|
if !errors.Is(err, ErrMissingStash) {
|
2019-08-03 22:11:28 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
// It was not mounted and image metadata is also missing, we are done as the last step in
|
|
|
|
// in the staging transaction is complete
|
|
|
|
return &csi.NodeUnstageVolumeResponse{}, nil
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
2018-02-20 16:10:59 +00:00
|
|
|
|
|
|
|
// Unmapping rbd device
|
2020-05-28 18:39:44 +00:00
|
|
|
imageSpec := imgInfo.String()
|
2021-08-23 11:23:15 +00:00
|
|
|
|
|
|
|
dArgs := detachRBDImageArgs{
|
|
|
|
imageOrDeviceSpec: imageSpec,
|
|
|
|
isImageSpec: true,
|
|
|
|
isNbd: imgInfo.NbdAccess,
|
|
|
|
encrypted: imgInfo.Encrypted,
|
|
|
|
volumeID: req.GetVolumeId(),
|
|
|
|
unmapOptions: imgInfo.UnmapOptions,
|
2021-08-20 01:06:35 +00:00
|
|
|
logDir: imgInfo.LogDir,
|
2021-09-01 11:53:43 +00:00
|
|
|
logStrategy: imgInfo.LogStrategy,
|
2021-08-23 11:23:15 +00:00
|
|
|
}
|
2021-09-01 19:33:12 +00:00
|
|
|
if err = detachRBDImageOrDeviceSpec(ctx, &dArgs); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(
|
2021-06-25 11:39:42 +00:00
|
|
|
ctx,
|
|
|
|
"error unmapping volume (%s) from staging path (%s): (%v)",
|
|
|
|
req.GetVolumeId(),
|
|
|
|
stagingTargetPath,
|
|
|
|
err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-08-03 22:11:28 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2018-01-09 18:59:50 +00:00
|
|
|
}
|
|
|
|
|
2021-08-24 15:03:25 +00:00
|
|
|
log.DebugLog(ctx, "successfully unmapped volume (%s)", req.GetVolumeId())
|
2019-08-03 22:11:28 +00:00
|
|
|
|
|
|
|
if err = cleanupRBDImageMetadataStash(stagingParentPath); err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to cleanup image metadata stash (%v)", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-08-03 22:11:28 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
2018-11-07 02:05:19 +00:00
|
|
|
}
|
|
|
|
|
2019-08-03 22:11:28 +00:00
|
|
|
return &csi.NodeUnstageVolumeResponse{}, nil
|
2018-11-07 02:05:19 +00:00
|
|
|
}
|
2019-07-03 10:02:36 +00:00
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// NodeExpandVolume resizes rbd volumes.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (ns *NodeServer) NodeExpandVolume(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) {
|
2019-11-27 12:14:31 +00:00
|
|
|
volumeID := req.GetVolumeId()
|
|
|
|
if volumeID == "" {
|
2019-12-13 10:29:33 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, "volume ID must be provided")
|
2019-11-27 12:14:31 +00:00
|
|
|
}
|
2020-08-24 12:45:18 +00:00
|
|
|
|
|
|
|
// Get volume path
|
|
|
|
// With Kubernetes version>=v1.19.0, expand request carries volume_path and
|
|
|
|
// staging_target_path, what csi requires is staging_target_path.
|
|
|
|
volumePath := req.GetStagingTargetPath()
|
|
|
|
if volumePath == "" {
|
|
|
|
// If Kubernetes version < v1.19.0 the volume_path would be
|
|
|
|
// having the staging_target_path information
|
|
|
|
volumePath = req.GetVolumePath()
|
|
|
|
}
|
2019-11-27 12:14:31 +00:00
|
|
|
if volumePath == "" {
|
2019-12-13 10:29:33 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, "volume path must be provided")
|
|
|
|
}
|
|
|
|
|
|
|
|
if acquired := ns.VolumeLocks.TryAcquire(volumeID); !acquired {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-12-13 10:29:33 +00:00
|
|
|
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volumeID)
|
|
|
|
}
|
|
|
|
defer ns.VolumeLocks.Release(volumeID)
|
|
|
|
|
|
|
|
imgInfo, err := lookupRBDImageMetadataStash(volumePath)
|
2019-11-27 12:14:31 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to find image metadata: %v", err)
|
2019-11-27 12:14:31 +00:00
|
|
|
}
|
2021-07-08 15:06:42 +00:00
|
|
|
devicePath, found := findDeviceMappingImage(
|
2021-06-25 11:39:42 +00:00
|
|
|
ctx,
|
|
|
|
imgInfo.Pool,
|
|
|
|
imgInfo.RadosNamespace,
|
|
|
|
imgInfo.ImageName,
|
|
|
|
imgInfo.NbdAccess)
|
2021-07-08 15:06:42 +00:00
|
|
|
if !found {
|
|
|
|
return nil, status.Errorf(codes.Internal,
|
|
|
|
"failed to get device for stagingtarget path %v", volumePath)
|
2019-11-27 12:14:31 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-08 15:06:42 +00:00
|
|
|
mapperFile, mapperPath := util.VolumeMapper(volumeID)
|
|
|
|
if imgInfo.Encrypted {
|
|
|
|
// The volume is encrypted, resize an active mapping
|
|
|
|
err = util.ResizeEncryptedVolume(ctx, mapperFile)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to resize device %s, mapper %s: %w",
|
2021-07-08 15:06:42 +00:00
|
|
|
devicePath, mapperFile, err)
|
|
|
|
|
|
|
|
return nil, status.Errorf(codes.Internal,
|
|
|
|
"failed to resize device %s, mapper %s: %v", devicePath, mapperFile, err)
|
|
|
|
}
|
|
|
|
// Use mapper device path for fs resize
|
|
|
|
devicePath = mapperPath
|
|
|
|
}
|
|
|
|
|
|
|
|
if req.GetVolumeCapability().GetBlock() == nil {
|
|
|
|
// TODO check size and return success or error
|
|
|
|
volumePath += "/" + volumeID
|
|
|
|
resizer := mount.NewResizeFs(utilexec.New())
|
|
|
|
var ok bool
|
|
|
|
ok, err = resizer.Resize(devicePath, volumePath)
|
|
|
|
if !ok {
|
|
|
|
return nil, status.Errorf(codes.Internal,
|
|
|
|
"rbd: resize failed on path %s, error: %v", req.GetVolumePath(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &csi.NodeExpandVolumeResponse{}, nil
|
2019-11-27 12:14:31 +00:00
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// NodeGetCapabilities returns the supported capabilities of the node server.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (ns *NodeServer) NodeGetCapabilities(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
|
2019-07-03 10:02:36 +00:00
|
|
|
return &csi.NodeGetCapabilitiesResponse{
|
|
|
|
Capabilities: []*csi.NodeServiceCapability{
|
|
|
|
{
|
|
|
|
Type: &csi.NodeServiceCapability_Rpc{
|
|
|
|
Rpc: &csi.NodeServiceCapability_RPC{
|
|
|
|
Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-07-30 06:20:28 +00:00
|
|
|
{
|
|
|
|
Type: &csi.NodeServiceCapability_Rpc{
|
|
|
|
Rpc: &csi.NodeServiceCapability_RPC{
|
|
|
|
Type: csi.NodeServiceCapability_RPC_GET_VOLUME_STATS,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-11-27 12:14:31 +00:00
|
|
|
{
|
|
|
|
Type: &csi.NodeServiceCapability_Rpc{
|
|
|
|
Rpc: &csi.NodeServiceCapability_RPC{
|
|
|
|
Type: csi.NodeServiceCapability_RPC_EXPAND_VOLUME,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-07-03 10:02:36 +00:00
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
2019-12-13 11:41:32 +00:00
|
|
|
|
2021-06-25 11:39:42 +00:00
|
|
|
func (ns *NodeServer) processEncryptedDevice(
|
|
|
|
ctx context.Context,
|
|
|
|
volOptions *rbdVolume,
|
|
|
|
devicePath string) (string, error) {
|
2020-05-28 18:39:44 +00:00
|
|
|
imageSpec := volOptions.String()
|
2020-01-09 10:31:07 +00:00
|
|
|
encrypted, err := volOptions.checkRbdImageEncrypted(ctx)
|
2019-12-13 11:41:32 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to get encryption status for rbd image %s: %v",
|
2019-12-13 11:41:32 +00:00
|
|
|
imageSpec, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-12-13 11:41:32 +00:00
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2021-02-16 15:56:42 +00:00
|
|
|
switch {
|
|
|
|
case encrypted == rbdImageRequiresEncryption:
|
|
|
|
// If we get here, it means the image was created with a
|
|
|
|
// ceph-csi version that creates a passphrase for the encrypted
|
|
|
|
// device in NodeStage. New versions moved that to
|
|
|
|
// CreateVolume.
|
|
|
|
// Use the same setupEncryption() as CreateVolume does, and
|
|
|
|
// continue with the common process to crypt-format the device.
|
|
|
|
err = volOptions.setupEncryption(ctx)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, "failed to setup encryption for rbd"+
|
2021-02-16 15:56:42 +00:00
|
|
|
"image %s: %v", imageSpec, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-02-16 15:56:42 +00:00
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure we continue with the encrypting of the device
|
|
|
|
fallthrough
|
|
|
|
case encrypted == rbdImageEncryptionPrepared:
|
2021-12-09 08:04:48 +00:00
|
|
|
diskMounter := &mount.SafeFormatAndMount{Interface: ns.Mounter, Exec: utilexec.New()}
|
2019-12-13 11:41:32 +00:00
|
|
|
// TODO: update this when adding support for static (pre-provisioned) PVs
|
|
|
|
var existingFormat string
|
|
|
|
existingFormat, err = diskMounter.GetDiskFormat(devicePath)
|
|
|
|
if err != nil {
|
2020-12-08 14:03:54 +00:00
|
|
|
return "", fmt.Errorf("failed to get disk format for path %s: %w", devicePath, err)
|
2019-12-13 11:41:32 +00:00
|
|
|
}
|
2020-01-29 11:44:45 +00:00
|
|
|
|
|
|
|
switch existingFormat {
|
|
|
|
case "":
|
2021-02-22 16:20:11 +00:00
|
|
|
err = volOptions.encryptDevice(ctx, devicePath)
|
2020-01-29 11:44:45 +00:00
|
|
|
if err != nil {
|
2020-12-08 14:03:54 +00:00
|
|
|
return "", fmt.Errorf("failed to encrypt rbd image %s: %w", imageSpec, err)
|
2020-01-29 11:44:45 +00:00
|
|
|
}
|
2021-04-01 14:08:44 +00:00
|
|
|
case "crypt", "crypto_LUKS":
|
2021-08-24 15:03:25 +00:00
|
|
|
log.WarningLog(ctx, "rbd image %s is encrypted, but encryption state was not updated",
|
2020-01-29 11:44:45 +00:00
|
|
|
imageSpec)
|
2020-01-09 10:31:07 +00:00
|
|
|
err = volOptions.ensureEncryptionMetadataSet(rbdImageEncrypted)
|
2020-01-29 11:44:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("failed to update encryption state for rbd image %s", imageSpec)
|
|
|
|
}
|
|
|
|
default:
|
2019-12-13 11:41:32 +00:00
|
|
|
return "", fmt.Errorf("can not encrypt rbdImage %s that already has file system: %s",
|
|
|
|
imageSpec, existingFormat)
|
|
|
|
}
|
2021-02-16 15:56:42 +00:00
|
|
|
case encrypted != rbdImageEncrypted:
|
2019-12-13 11:41:32 +00:00
|
|
|
return "", fmt.Errorf("rbd image %s found mounted with unexpected encryption status %s",
|
|
|
|
imageSpec, encrypted)
|
|
|
|
}
|
|
|
|
|
2021-02-22 15:57:44 +00:00
|
|
|
devicePath, err = volOptions.openEncryptedDevice(ctx, devicePath)
|
2019-12-13 11:41:32 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return devicePath, nil
|
|
|
|
}
|
|
|
|
|
2020-07-21 11:55:10 +00:00
|
|
|
// xfsSupportsReflink checks if mkfs.xfs supports the "-m reflink=0|1"
|
|
|
|
// argument. In case it is supported, return true.
|
|
|
|
func (ns *NodeServer) xfsSupportsReflink() bool {
|
|
|
|
// return cached value, if set
|
|
|
|
if xfsHasReflink != xfsReflinkUnset {
|
|
|
|
return xfsHasReflink == xfsReflinkSupport
|
|
|
|
}
|
|
|
|
|
|
|
|
// run mkfs.xfs in the same namespace as formatting would be done in
|
|
|
|
// mountVolumeToStagePath()
|
2021-12-09 08:04:48 +00:00
|
|
|
diskMounter := &mount.SafeFormatAndMount{Interface: ns.Mounter, Exec: utilexec.New()}
|
2020-07-21 11:55:10 +00:00
|
|
|
out, err := diskMounter.Exec.Command("mkfs.xfs").CombinedOutput()
|
|
|
|
if err != nil {
|
|
|
|
// mkfs.xfs should fail with an error message (and help text)
|
|
|
|
if strings.Contains(string(out), "reflink=0|1") {
|
|
|
|
xfsHasReflink = xfsReflinkSupport
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-21 11:55:10 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
xfsHasReflink = xfsReflinkNoSupport
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-07-21 11:55:10 +00:00
|
|
|
return false
|
|
|
|
}
|
2021-01-11 08:05:06 +00:00
|
|
|
|
|
|
|
// NodeGetVolumeStats returns volume stats.
|
2021-06-25 11:39:42 +00:00
|
|
|
func (ns *NodeServer) NodeGetVolumeStats(
|
|
|
|
ctx context.Context,
|
|
|
|
req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) {
|
2021-01-11 08:05:06 +00:00
|
|
|
var err error
|
|
|
|
targetPath := req.GetVolumePath()
|
|
|
|
if targetPath == "" {
|
|
|
|
err = fmt.Errorf("targetpath %v is empty", targetPath)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-01-11 08:05:06 +00:00
|
|
|
return nil, status.Error(codes.InvalidArgument, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
stat, err := os.Stat(targetPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, status.Errorf(codes.InvalidArgument, "failed to get stat for targetpath %q: %v", targetPath, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if stat.Mode().IsDir() {
|
|
|
|
return csicommon.FilesystemNodeGetVolumeStats(ctx, targetPath)
|
|
|
|
} else if (stat.Mode() & os.ModeDevice) == os.ModeDevice {
|
|
|
|
return blockNodeGetVolumeStats(ctx, targetPath)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, fmt.Errorf("targetpath %q is not a block device", targetPath)
|
|
|
|
}
|
|
|
|
|
|
|
|
// blockNodeGetVolumeStats gets the metrics for a `volumeMode: Block` type of
|
|
|
|
// volume. At the moment, only the size of the block-device can be returned, as
|
|
|
|
// there are no secrets in the NodeGetVolumeStats request that enables us to
|
|
|
|
// connect to the Ceph cluster.
|
|
|
|
//
|
|
|
|
// TODO: https://github.com/container-storage-interface/spec/issues/371#issuecomment-756834471
|
|
|
|
func blockNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.NodeGetVolumeStatsResponse, error) {
|
2021-10-05 09:50:33 +00:00
|
|
|
mp := volume.NewMetricsBlock(targetPath)
|
|
|
|
m, err := mp.GetMetrics()
|
2021-01-11 08:05:06 +00:00
|
|
|
if err != nil {
|
2021-10-05 09:50:33 +00:00
|
|
|
err = fmt.Errorf("failed to get metrics: %w", err)
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLog(ctx, err.Error())
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-01-11 08:05:06 +00:00
|
|
|
return nil, status.Error(codes.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
return &csi.NodeGetVolumeStatsResponse{
|
|
|
|
Usage: []*csi.VolumeUsage{
|
|
|
|
{
|
2021-10-05 09:50:33 +00:00
|
|
|
Total: m.Capacity.Value(),
|
2021-01-11 08:05:06 +00:00
|
|
|
Unit: csi.VolumeUsage_BYTES,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|