2018-09-17 18:12:22 +00:00
|
|
|
/*
|
2019-04-03 08:46:15 +00:00
|
|
|
Copyright 2018 The Ceph-CSI Authors.
|
2018-09-17 18:12:22 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package rbd
|
|
|
|
|
|
|
|
import (
|
2019-08-22 16:57:23 +00:00
|
|
|
"context"
|
2019-08-01 21:42:33 +00:00
|
|
|
"encoding/json"
|
2020-06-11 08:04:32 +00:00
|
|
|
"errors"
|
2018-09-17 18:12:22 +00:00
|
|
|
"fmt"
|
2020-01-16 13:35:21 +00:00
|
|
|
"os"
|
2018-09-17 18:12:22 +00:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2020-04-17 09:23:49 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util"
|
2019-06-01 21:26:42 +00:00
|
|
|
|
2018-09-17 18:12:22 +00:00
|
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2019-08-01 21:42:33 +00:00
|
|
|
rbdTonbd = "rbd-nbd"
|
|
|
|
moduleNbd = "nbd"
|
|
|
|
|
|
|
|
accessTypeKRbd = "krbd"
|
|
|
|
accessTypeNbd = "nbd"
|
2018-09-17 18:12:22 +00:00
|
|
|
|
2019-08-01 21:42:33 +00:00
|
|
|
rbd = "rbd"
|
2019-08-03 22:11:28 +00:00
|
|
|
|
|
|
|
// Output strings returned during invocation of "rbd unmap --device-type... <imageSpec>" when
|
|
|
|
// image is not found to be mapped. Used to ignore errors when attempting to unmap such images.
|
|
|
|
// The %s format specifier should contain the <imageSpec> string
|
2021-07-08 14:59:34 +00:00
|
|
|
// NOTE: When using devicePath instead of imageSpec, the error strings are different.
|
2019-08-03 22:11:28 +00:00
|
|
|
rbdUnmapCmdkRbdMissingMap = "rbd: %s: not a mapped image or snapshot"
|
|
|
|
rbdUnmapCmdNbdMissingMap = "rbd-nbd: %s is not mapped"
|
2019-08-19 05:10:03 +00:00
|
|
|
rbdMapConnectionTimeout = "Connection timed out"
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
|
|
|
|
defaultNbdReAttachTimeout = 300
|
|
|
|
|
|
|
|
useNbdNetlink = "try-netlink"
|
|
|
|
setNbdReattach = "reattach-timeout"
|
2018-09-17 18:12:22 +00:00
|
|
|
)
|
|
|
|
|
2019-08-01 21:42:33 +00:00
|
|
|
var hasNBD = false
|
|
|
|
|
2018-09-17 18:12:22 +00:00
|
|
|
func init() {
|
2018-09-18 14:09:12 +00:00
|
|
|
hasNBD = checkRbdNbdTools()
|
2018-09-17 18:12:22 +00:00
|
|
|
}
|
|
|
|
|
2020-07-19 12:21:03 +00:00
|
|
|
// rbdDeviceInfo strongly typed JSON spec for rbd device list output (of type krbd).
|
2019-08-01 21:42:33 +00:00
|
|
|
type rbdDeviceInfo struct {
|
2020-06-01 13:57:51 +00:00
|
|
|
ID string `json:"id"`
|
|
|
|
Pool string `json:"pool"`
|
2021-01-02 16:55:16 +00:00
|
|
|
RadosNamespace string `json:"namespace"`
|
2020-06-01 13:57:51 +00:00
|
|
|
Name string `json:"name"`
|
|
|
|
Device string `json:"device"`
|
2018-09-17 18:12:22 +00:00
|
|
|
}
|
|
|
|
|
2019-08-01 21:42:33 +00:00
|
|
|
// nbdDeviceInfo strongly typed JSON spec for rbd-nbd device list output (of type nbd)
|
|
|
|
// NOTE: There is a bug in rbd output that returns id as number for nbd, and string for krbd, thus
|
|
|
|
// requiring 2 different JSON structures to unmarshal the output.
|
2020-07-19 12:21:03 +00:00
|
|
|
// NOTE: image key is "name" in krbd output and "image" in nbd output, which is another difference.
|
2019-08-01 21:42:33 +00:00
|
|
|
type nbdDeviceInfo struct {
|
2020-06-01 13:57:51 +00:00
|
|
|
ID int64 `json:"id"`
|
|
|
|
Pool string `json:"pool"`
|
2021-01-02 16:55:16 +00:00
|
|
|
RadosNamespace string `json:"namespace"`
|
2020-06-01 13:57:51 +00:00
|
|
|
Name string `json:"image"`
|
|
|
|
Device string `json:"device"`
|
2019-08-01 21:42:33 +00:00
|
|
|
}
|
2018-09-17 18:12:22 +00:00
|
|
|
|
2021-08-23 11:23:15 +00:00
|
|
|
type detachRBDImageArgs struct {
|
|
|
|
imageOrDeviceSpec string
|
|
|
|
isImageSpec bool
|
|
|
|
isNbd bool
|
|
|
|
encrypted bool
|
|
|
|
volumeID string
|
|
|
|
unmapOptions string
|
2021-08-20 01:06:35 +00:00
|
|
|
logDir string
|
2021-08-23 11:23:15 +00:00
|
|
|
}
|
|
|
|
|
2019-08-01 21:42:33 +00:00
|
|
|
// rbdGetDeviceList queries rbd about mapped devices and returns a list of rbdDeviceInfo
|
2020-07-19 12:21:03 +00:00
|
|
|
// It will selectively list devices mapped using krbd or nbd as specified by accessType.
|
2020-07-22 12:11:41 +00:00
|
|
|
func rbdGetDeviceList(ctx context.Context, accessType string) ([]rbdDeviceInfo, error) {
|
2019-08-01 21:42:33 +00:00
|
|
|
// rbd device list --format json --device-type [krbd|nbd]
|
|
|
|
var (
|
|
|
|
rbdDeviceList []rbdDeviceInfo
|
|
|
|
nbdDeviceList []nbdDeviceInfo
|
|
|
|
)
|
2018-09-17 18:12:22 +00:00
|
|
|
|
2020-07-22 12:11:41 +00:00
|
|
|
stdout, _, err := util.ExecCommand(ctx, rbd, "device", "list", "--format="+"json", "--device-type", accessType)
|
2018-09-17 18:12:22 +00:00
|
|
|
if err != nil {
|
2020-12-08 14:05:59 +00:00
|
|
|
return nil, fmt.Errorf("error getting device list from rbd for devices of type (%s): %w", accessType, err)
|
2018-09-17 18:12:22 +00:00
|
|
|
}
|
|
|
|
|
2019-08-01 21:42:33 +00:00
|
|
|
if accessType == accessTypeKRbd {
|
2020-07-22 12:53:22 +00:00
|
|
|
err = json.Unmarshal([]byte(stdout), &rbdDeviceList)
|
2019-08-01 21:42:33 +00:00
|
|
|
} else {
|
2020-07-22 12:53:22 +00:00
|
|
|
err = json.Unmarshal([]byte(stdout), &nbdDeviceList)
|
2018-09-17 18:12:22 +00:00
|
|
|
}
|
2019-08-01 21:42:33 +00:00
|
|
|
if err != nil {
|
2021-06-25 11:52:34 +00:00
|
|
|
return nil, fmt.Errorf(
|
|
|
|
"error to parse JSON output of device list for devices of type (%s): %w",
|
|
|
|
accessType,
|
|
|
|
err)
|
2018-09-17 18:12:22 +00:00
|
|
|
}
|
|
|
|
|
2019-08-01 21:42:33 +00:00
|
|
|
// convert output to a rbdDeviceInfo list for consumers
|
|
|
|
if accessType == accessTypeNbd {
|
|
|
|
for _, device := range nbdDeviceList {
|
|
|
|
rbdDeviceList = append(
|
|
|
|
rbdDeviceList,
|
|
|
|
rbdDeviceInfo{
|
2020-06-01 13:57:51 +00:00
|
|
|
ID: strconv.FormatInt(device.ID, 10),
|
|
|
|
Pool: device.Pool,
|
|
|
|
RadosNamespace: device.RadosNamespace,
|
|
|
|
Name: device.Name,
|
|
|
|
Device: device.Device,
|
2019-08-01 21:42:33 +00:00
|
|
|
})
|
2018-09-17 18:12:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-01 21:42:33 +00:00
|
|
|
return rbdDeviceList, nil
|
|
|
|
}
|
2019-01-29 05:49:16 +00:00
|
|
|
|
2020-06-01 13:57:51 +00:00
|
|
|
// findDeviceMappingImage finds a devicePath, if available, based on image spec (pool/{namespace/}image) on the node.
|
|
|
|
func findDeviceMappingImage(ctx context.Context, pool, namespace, image string, useNbdDriver bool) (string, bool) {
|
2019-08-01 21:42:33 +00:00
|
|
|
accessType := accessTypeKRbd
|
|
|
|
if useNbdDriver {
|
|
|
|
accessType = accessTypeNbd
|
2019-01-29 05:49:16 +00:00
|
|
|
}
|
2019-08-01 21:42:33 +00:00
|
|
|
|
2020-06-01 13:57:51 +00:00
|
|
|
imageSpec := fmt.Sprintf("%s/%s", pool, image)
|
|
|
|
if namespace != "" {
|
|
|
|
imageSpec = fmt.Sprintf("%s/%s/%s", pool, namespace, image)
|
|
|
|
}
|
|
|
|
|
2020-07-22 12:11:41 +00:00
|
|
|
rbdDeviceList, err := rbdGetDeviceList(ctx, accessType)
|
2019-01-29 05:49:16 +00:00
|
|
|
if err != nil {
|
2020-08-19 10:47:27 +00:00
|
|
|
util.WarningLog(ctx, "failed to determine if image (%s) is mapped to a device (%v)", imageSpec, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-08-01 21:42:33 +00:00
|
|
|
return "", false
|
2019-01-29 05:49:16 +00:00
|
|
|
}
|
|
|
|
|
2019-08-01 21:42:33 +00:00
|
|
|
for _, device := range rbdDeviceList {
|
2020-06-01 13:57:51 +00:00
|
|
|
if device.Name == image && device.Pool == pool && device.RadosNamespace == namespace {
|
2019-08-01 21:42:33 +00:00
|
|
|
return device.Device, true
|
|
|
|
}
|
2019-01-29 05:49:16 +00:00
|
|
|
}
|
2019-08-01 21:42:33 +00:00
|
|
|
|
|
|
|
return "", false
|
2019-01-29 05:49:16 +00:00
|
|
|
}
|
|
|
|
|
2018-09-17 18:12:22 +00:00
|
|
|
// Stat a path, if it doesn't exist, retry maxRetries times.
|
2020-06-01 13:57:51 +00:00
|
|
|
func waitForPath(ctx context.Context, pool, namespace, image string, maxRetries int, useNbdDriver bool) (string, bool) {
|
2018-09-17 18:12:22 +00:00
|
|
|
for i := 0; i < maxRetries; i++ {
|
|
|
|
if i != 0 {
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
}
|
2019-08-01 21:42:33 +00:00
|
|
|
|
2020-06-01 13:57:51 +00:00
|
|
|
device, found := findDeviceMappingImage(ctx, pool, namespace, image, useNbdDriver)
|
2019-08-01 21:42:33 +00:00
|
|
|
if found {
|
|
|
|
return device, found
|
2018-09-17 18:12:22 +00:00
|
|
|
}
|
|
|
|
}
|
2019-08-01 21:42:33 +00:00
|
|
|
|
2018-09-17 18:12:22 +00:00
|
|
|
return "", false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if rbd-nbd tools are installed.
|
|
|
|
func checkRbdNbdTools() bool {
|
2020-01-16 13:35:21 +00:00
|
|
|
// check if the module is loaded or compiled in
|
|
|
|
_, err := os.Stat(fmt.Sprintf("/sys/module/%s", moduleNbd))
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// try to load the module
|
2020-07-22 12:11:41 +00:00
|
|
|
_, _, err = util.ExecCommand(context.TODO(), "modprobe", moduleNbd)
|
2020-01-16 13:35:21 +00:00
|
|
|
if err != nil {
|
2020-07-09 14:48:24 +00:00
|
|
|
util.ExtendedLogMsg("rbd-nbd: nbd modprobe failed with error %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-01-16 13:35:21 +00:00
|
|
|
return false
|
|
|
|
}
|
2018-09-17 18:12:22 +00:00
|
|
|
}
|
2020-07-22 12:11:41 +00:00
|
|
|
if _, _, err := util.ExecCommand(context.TODO(), rbdTonbd, "--version"); err != nil {
|
2020-07-09 14:48:24 +00:00
|
|
|
util.ExtendedLogMsg("rbd-nbd: running rbd-nbd --version failed with error %v", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2018-09-17 18:12:22 +00:00
|
|
|
return false
|
|
|
|
}
|
2020-07-09 14:48:24 +00:00
|
|
|
util.ExtendedLogMsg("rbd-nbd tools were found.")
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2018-09-17 18:12:22 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
func attachRBDImage(ctx context.Context, volOptions *rbdVolume, device string, cr *util.Credentials) (string, error) {
|
2018-09-17 18:12:22 +00:00
|
|
|
var err error
|
|
|
|
|
2019-04-22 21:35:39 +00:00
|
|
|
image := volOptions.RbdImageName
|
2018-09-18 14:09:12 +00:00
|
|
|
useNBD := false
|
2019-01-17 06:20:33 +00:00
|
|
|
if volOptions.Mounter == rbdTonbd && hasNBD {
|
2018-09-18 14:09:12 +00:00
|
|
|
useNBD = true
|
2018-09-17 18:12:22 +00:00
|
|
|
}
|
2018-09-18 14:09:12 +00:00
|
|
|
|
2020-06-01 13:57:51 +00:00
|
|
|
devicePath, found := waitForPath(ctx, volOptions.Pool, volOptions.RadosNamespace, image, 1, useNBD)
|
2018-09-17 18:12:22 +00:00
|
|
|
if !found {
|
|
|
|
backoff := wait.Backoff{
|
|
|
|
Duration: rbdImageWatcherInitDelay,
|
|
|
|
Factor: rbdImageWatcherFactor,
|
|
|
|
Steps: rbdImageWatcherSteps,
|
|
|
|
}
|
2019-03-14 00:18:04 +00:00
|
|
|
|
2020-07-22 13:33:36 +00:00
|
|
|
err = waitForrbdImage(ctx, backoff, volOptions)
|
2019-01-28 19:55:10 +00:00
|
|
|
|
2018-09-21 14:38:50 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
devicePath, err = createPath(ctx, volOptions, device, cr)
|
2019-01-29 05:49:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return devicePath, err
|
|
|
|
}
|
|
|
|
|
2021-02-02 09:25:37 +00:00
|
|
|
func appendDeviceTypeAndOptions(cmdArgs []string, isNbd, isThick bool, userOptions string) []string {
|
2020-08-21 17:00:06 +00:00
|
|
|
accessType := accessTypeKRbd
|
|
|
|
if isNbd {
|
|
|
|
accessType = accessTypeNbd
|
|
|
|
}
|
|
|
|
|
|
|
|
cmdArgs = append(cmdArgs, "--device-type", accessType)
|
2020-09-22 08:07:10 +00:00
|
|
|
if !isNbd {
|
|
|
|
// Enable mapping and unmapping images from a non-initial network
|
|
|
|
// namespace (e.g. for Multus CNI). The network namespace must be
|
|
|
|
// owned by the initial user namespace.
|
|
|
|
cmdArgs = append(cmdArgs, "--options", "noudev")
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
} else {
|
|
|
|
if !strings.Contains(userOptions, useNbdNetlink) {
|
|
|
|
cmdArgs = append(cmdArgs, "--options", useNbdNetlink)
|
|
|
|
}
|
|
|
|
if !strings.Contains(userOptions, setNbdReattach) {
|
|
|
|
cmdArgs = append(cmdArgs, "--options", fmt.Sprintf("%s=%d", setNbdReattach, defaultNbdReAttachTimeout))
|
|
|
|
}
|
2020-09-22 08:07:10 +00:00
|
|
|
}
|
2021-02-02 09:25:37 +00:00
|
|
|
if isThick {
|
|
|
|
// When an image is thick-provisioned, any discard/unmap/trim
|
|
|
|
// requests should not free extents.
|
|
|
|
cmdArgs = append(cmdArgs, "--options", "notrim")
|
|
|
|
}
|
2020-08-21 17:00:06 +00:00
|
|
|
if userOptions != "" {
|
2020-09-22 08:07:10 +00:00
|
|
|
// userOptions is appended after, possibly overriding the above
|
|
|
|
// default options.
|
2020-08-21 17:00:06 +00:00
|
|
|
cmdArgs = append(cmdArgs, "--options", userOptions)
|
|
|
|
}
|
|
|
|
|
|
|
|
return cmdArgs
|
|
|
|
}
|
|
|
|
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
// appendRbdNbdCliOptions append mandatory options and convert list of useroptions
|
|
|
|
// provided for rbd integrated cli to rbd-nbd cli format specific.
|
|
|
|
func appendRbdNbdCliOptions(cmdArgs []string, userOptions string) []string {
|
|
|
|
if !strings.Contains(userOptions, useNbdNetlink) {
|
|
|
|
cmdArgs = append(cmdArgs, fmt.Sprintf("--%s", useNbdNetlink))
|
|
|
|
}
|
|
|
|
if !strings.Contains(userOptions, setNbdReattach) {
|
|
|
|
cmdArgs = append(cmdArgs, fmt.Sprintf("--%s=%d", setNbdReattach, defaultNbdReAttachTimeout))
|
|
|
|
}
|
|
|
|
if userOptions != "" {
|
|
|
|
options := strings.Split(userOptions, ",")
|
|
|
|
for _, opt := range options {
|
|
|
|
cmdArgs = append(cmdArgs, fmt.Sprintf("--%s", opt))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return cmdArgs
|
|
|
|
}
|
|
|
|
|
|
|
|
func createPath(ctx context.Context, volOpt *rbdVolume, device string, cr *util.Credentials) (string, error) {
|
2019-08-19 05:10:03 +00:00
|
|
|
isNbd := false
|
2020-05-28 18:39:44 +00:00
|
|
|
imagePath := volOpt.String()
|
2019-01-29 05:49:16 +00:00
|
|
|
|
2020-07-09 14:48:24 +00:00
|
|
|
util.TraceLog(ctx, "rbd: map mon %s", volOpt.Monitors)
|
2019-01-29 05:49:16 +00:00
|
|
|
|
2020-09-22 08:06:52 +00:00
|
|
|
mapArgs := []string{
|
2019-08-01 21:42:33 +00:00
|
|
|
"--id", cr.ID,
|
|
|
|
"-m", volOpt.Monitors,
|
2019-08-03 22:11:28 +00:00
|
|
|
"--keyfile=" + cr.KeyFile,
|
|
|
|
}
|
2019-08-01 21:42:33 +00:00
|
|
|
|
|
|
|
// Choose access protocol
|
2019-01-29 05:49:16 +00:00
|
|
|
if volOpt.Mounter == rbdTonbd && hasNBD {
|
2019-08-19 05:10:03 +00:00
|
|
|
isNbd = true
|
2018-09-17 18:12:22 +00:00
|
|
|
}
|
|
|
|
|
2021-02-02 09:25:37 +00:00
|
|
|
// check if the image should stay thick-provisioned
|
|
|
|
isThick, err := volOpt.isThickProvisioned()
|
|
|
|
if err != nil {
|
2021-05-07 05:30:37 +00:00
|
|
|
util.WarningLog(ctx, "failed to detect if image %q is thick-provisioned: %v", volOpt, err)
|
2021-02-02 09:25:37 +00:00
|
|
|
}
|
|
|
|
|
2021-08-18 07:21:23 +00:00
|
|
|
if isNbd {
|
|
|
|
mapArgs = append(mapArgs, "--log-file",
|
|
|
|
getCephClientLogFileName(volOpt.VolID, volOpt.LogDir, "rbd-nbd"))
|
|
|
|
}
|
|
|
|
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
cli := rbd
|
|
|
|
if device != "" {
|
|
|
|
// TODO: use rbd cli for attach/detach in the future
|
|
|
|
cli = rbdNbdMounter
|
|
|
|
mapArgs = append(mapArgs, "attach", imagePath, "--device", device)
|
|
|
|
mapArgs = appendRbdNbdCliOptions(mapArgs, volOpt.MapOptions)
|
|
|
|
} else {
|
|
|
|
mapArgs = append(mapArgs, "map", imagePath)
|
|
|
|
mapArgs = appendDeviceTypeAndOptions(mapArgs, isNbd, isThick, volOpt.MapOptions)
|
|
|
|
}
|
|
|
|
|
2020-04-16 14:47:43 +00:00
|
|
|
if volOpt.readOnly {
|
2020-09-22 08:06:52 +00:00
|
|
|
mapArgs = append(mapArgs, "--read-only")
|
2020-04-16 14:47:43 +00:00
|
|
|
}
|
2020-09-08 05:23:28 +00:00
|
|
|
|
2019-08-01 21:42:33 +00:00
|
|
|
// Execute map
|
rbd: add volume healer
Problem:
-------
For rbd nbd userspace mounter backends, after a restart of the nodeplugin
all the mounts will start seeing IO errors. This is because, for rbd-nbd
backends there will be a userspace mount daemon running per volume, post
restart of the nodeplugin pod, there is no way to restore the daemons
back to life.
Solution:
--------
The volume healer is a one-time activity that is triggered at the startup
time of the rbd nodeplugin. It navigates through the list of volume
attachments on the node and acts accordingly.
For now, it is limited to nbd type storage only, but it is flexible and
can be extended in the future for other backend types as needed.
From a few feets above:
This solves a severe problem for nbd backed csi volumes. The healer while
going through the list of volume attachments on the node, if finds the
volume is in attached state and is of type nbd, then it will attempt to
fix the rbd-nbd volumes by sending a NodeStageVolume request with the
required volume attributes like secrets, device name, image attributes,
and etc.. which will finally help start the required rbd-nbd daemons in
the nodeplugin csi-rbdplugin container. This will allow reattaching the
backend images with the right nbd device, thus allowing the applications
to perform IO without any interruptions even after a nodeplugin restart.
Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
2021-05-31 11:13:54 +00:00
|
|
|
stdout, stderr, err := util.ExecCommand(ctx, cli, mapArgs...)
|
2019-01-29 05:49:16 +00:00
|
|
|
if err != nil {
|
2020-08-19 10:47:27 +00:00
|
|
|
util.WarningLog(ctx, "rbd: map error %v, rbd output: %s", err, stderr)
|
2019-08-19 05:10:03 +00:00
|
|
|
// unmap rbd image if connection timeout
|
|
|
|
if strings.Contains(err.Error(), rbdMapConnectionTimeout) {
|
2021-08-23 11:23:15 +00:00
|
|
|
dArgs := detachRBDImageArgs{
|
|
|
|
imageOrDeviceSpec: imagePath,
|
|
|
|
isImageSpec: true,
|
|
|
|
isNbd: isNbd,
|
|
|
|
encrypted: volOpt.isEncrypted(),
|
|
|
|
volumeID: volOpt.VolID,
|
|
|
|
unmapOptions: volOpt.UnmapOptions,
|
2021-08-20 01:06:35 +00:00
|
|
|
logDir: volOpt.LogDir,
|
2021-08-23 11:23:15 +00:00
|
|
|
}
|
|
|
|
detErr := detachRBDImageOrDeviceSpec(ctx, dArgs)
|
2019-08-19 05:10:03 +00:00
|
|
|
if detErr != nil {
|
2020-08-19 10:47:27 +00:00
|
|
|
util.WarningLog(ctx, "rbd: %s unmap error %v", imagePath, detErr)
|
2019-08-19 05:10:03 +00:00
|
|
|
}
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-07-10 10:45:11 +00:00
|
|
|
return "", fmt.Errorf("rbd: map failed with error %w, rbd error output: %s", err, stderr)
|
2019-01-29 05:49:16 +00:00
|
|
|
}
|
2020-07-22 12:53:22 +00:00
|
|
|
devicePath := strings.TrimSuffix(stdout, "\n")
|
2019-08-01 21:42:33 +00:00
|
|
|
|
2018-09-17 18:12:22 +00:00
|
|
|
return devicePath, nil
|
|
|
|
}
|
|
|
|
|
2020-07-22 13:33:36 +00:00
|
|
|
func waitForrbdImage(ctx context.Context, backoff wait.Backoff, volOptions *rbdVolume) error {
|
2020-05-28 18:39:44 +00:00
|
|
|
imagePath := volOptions.String()
|
2019-01-28 19:55:10 +00:00
|
|
|
|
|
|
|
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
2020-07-22 13:33:36 +00:00
|
|
|
used, err := volOptions.isInUse()
|
2019-01-28 19:55:10 +00:00
|
|
|
if err != nil {
|
2020-07-22 13:33:36 +00:00
|
|
|
return false, fmt.Errorf("fail to check rbd image status: (%w)", err)
|
2019-01-28 19:55:10 +00:00
|
|
|
}
|
2019-03-14 00:18:04 +00:00
|
|
|
if (volOptions.DisableInUseChecks) && (used) {
|
2020-07-09 14:48:24 +00:00
|
|
|
util.UsefulLog(ctx, "valid multi-node attach requested, ignoring watcher in-use result")
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-03-14 00:18:04 +00:00
|
|
|
return used, nil
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-01-28 19:55:10 +00:00
|
|
|
return !used, nil
|
|
|
|
})
|
|
|
|
// return error if rbd image has not become available for the specified timeout
|
2020-06-11 08:04:32 +00:00
|
|
|
if errors.Is(err, wait.ErrWaitTimeout) {
|
2019-01-28 19:55:10 +00:00
|
|
|
return fmt.Errorf("rbd image %s is still being used", imagePath)
|
|
|
|
}
|
|
|
|
// return error if any other errors were encountered during waiting for the image to become available
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-09-08 05:23:28 +00:00
|
|
|
func detachRBDDevice(ctx context.Context, devicePath, volumeID, unmapOptions string, encrypted bool) error {
|
2019-08-03 22:11:28 +00:00
|
|
|
nbdType := false
|
|
|
|
if strings.HasPrefix(devicePath, "/dev/nbd") {
|
|
|
|
nbdType = true
|
|
|
|
}
|
|
|
|
|
2021-08-23 11:23:15 +00:00
|
|
|
dArgs := detachRBDImageArgs{
|
|
|
|
imageOrDeviceSpec: devicePath,
|
|
|
|
isImageSpec: false,
|
|
|
|
isNbd: nbdType,
|
|
|
|
encrypted: encrypted,
|
|
|
|
volumeID: volumeID,
|
|
|
|
unmapOptions: unmapOptions,
|
|
|
|
}
|
|
|
|
|
|
|
|
return detachRBDImageOrDeviceSpec(ctx, dArgs)
|
2019-08-03 22:11:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// detachRBDImageOrDeviceSpec detaches an rbd imageSpec or devicePath, with additional checking
|
2020-07-19 12:21:03 +00:00
|
|
|
// when imageSpec is used to decide if image is already unmapped.
|
2021-06-25 11:52:34 +00:00
|
|
|
func detachRBDImageOrDeviceSpec(
|
|
|
|
ctx context.Context,
|
2021-08-23 11:23:15 +00:00
|
|
|
dArgs detachRBDImageArgs) error {
|
|
|
|
if dArgs.encrypted {
|
|
|
|
mapperFile, mapperPath := util.VolumeMapper(dArgs.volumeID)
|
2020-01-29 11:44:45 +00:00
|
|
|
mappedDevice, mapper, err := util.DeviceEncryptionStatus(ctx, mapperPath)
|
2019-12-13 11:41:32 +00:00
|
|
|
if err != nil {
|
2020-08-19 10:47:27 +00:00
|
|
|
util.ErrorLog(ctx, "error determining LUKS device on %s, %s: %s",
|
2021-08-23 11:23:15 +00:00
|
|
|
mapperPath, dArgs.imageOrDeviceSpec, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-12-13 11:41:32 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-01-29 11:44:45 +00:00
|
|
|
if len(mapper) > 0 {
|
|
|
|
// mapper found, so it is open Luks device
|
|
|
|
err = util.CloseEncryptedVolume(ctx, mapperFile)
|
|
|
|
if err != nil {
|
2020-08-19 10:47:27 +00:00
|
|
|
util.ErrorLog(ctx, "error closing LUKS device on %s, %s: %s",
|
2021-08-23 11:23:15 +00:00
|
|
|
mapperPath, dArgs.imageOrDeviceSpec, err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-01-29 11:44:45 +00:00
|
|
|
return err
|
|
|
|
}
|
2021-08-23 11:23:15 +00:00
|
|
|
dArgs.imageOrDeviceSpec = mappedDevice
|
2020-01-29 11:44:45 +00:00
|
|
|
}
|
2019-12-13 11:41:32 +00:00
|
|
|
}
|
|
|
|
|
2021-08-23 11:23:15 +00:00
|
|
|
unmapArgs := []string{"unmap", dArgs.imageOrDeviceSpec}
|
|
|
|
unmapArgs = appendDeviceTypeAndOptions(unmapArgs, dArgs.isNbd, false, dArgs.unmapOptions)
|
2020-09-22 08:06:52 +00:00
|
|
|
|
|
|
|
_, stderr, err := util.ExecCommand(ctx, rbd, unmapArgs...)
|
2018-09-17 18:12:22 +00:00
|
|
|
if err != nil {
|
2019-08-03 22:11:28 +00:00
|
|
|
// Messages for krbd and nbd differ, hence checking either of them for missing mapping
|
|
|
|
// This is not applicable when a device path is passed in
|
2021-08-23 11:23:15 +00:00
|
|
|
if dArgs.isImageSpec &&
|
|
|
|
(strings.Contains(stderr, fmt.Sprintf(rbdUnmapCmdkRbdMissingMap, dArgs.imageOrDeviceSpec)) ||
|
|
|
|
strings.Contains(stderr, fmt.Sprintf(rbdUnmapCmdNbdMissingMap, dArgs.imageOrDeviceSpec))) {
|
2019-08-03 22:11:28 +00:00
|
|
|
// Devices found not to be mapped are treated as a successful detach
|
2021-08-23 11:23:15 +00:00
|
|
|
util.TraceLog(ctx, "image or device spec (%s) not mapped", dArgs.imageOrDeviceSpec)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2019-08-03 22:11:28 +00:00
|
|
|
return nil
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-08-23 11:23:15 +00:00
|
|
|
return fmt.Errorf("rbd: unmap for spec (%s) failed (%w): (%s)", dArgs.imageOrDeviceSpec, err, stderr)
|
2018-09-17 18:12:22 +00:00
|
|
|
}
|
2021-08-20 01:06:35 +00:00
|
|
|
if dArgs.isNbd && dArgs.logDir != "" {
|
|
|
|
logFile := getCephClientLogFileName(dArgs.volumeID, dArgs.logDir, "rbd-nbd")
|
|
|
|
if err = os.Remove(logFile); err != nil {
|
|
|
|
util.WarningLog(ctx, "failed to remove logfile: %s, error: %v",
|
|
|
|
logFile, err)
|
|
|
|
}
|
|
|
|
}
|
2018-09-17 18:12:22 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|