mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-18 04:10:22 +00:00
b6a88dd728
Problem: ------- For rbd nbd userspace mounter backends, after a restart of the nodeplugin all the mounts will start seeing IO errors. This is because, for rbd-nbd backends there will be a userspace mount daemon running per volume, post restart of the nodeplugin pod, there is no way to restore the daemons back to life. Solution: -------- The volume healer is a one-time activity that is triggered at the startup time of the rbd nodeplugin. It navigates through the list of volume attachments on the node and acts accordingly. For now, it is limited to nbd type storage only, but it is flexible and can be extended in the future for other backend types as needed. From a few feets above: This solves a severe problem for nbd backed csi volumes. The healer while going through the list of volume attachments on the node, if finds the volume is in attached state and is of type nbd, then it will attempt to fix the rbd-nbd volumes by sending a NodeStageVolume request with the required volume attributes like secrets, device name, image attributes, and etc.. which will finally help start the required rbd-nbd daemons in the nodeplugin csi-rbdplugin container. This will allow reattaching the backend images with the right nbd device, thus allowing the applications to perform IO without any interruptions even after a nodeplugin restart. Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
177 lines
5.5 KiB
Go
177 lines
5.5 KiB
Go
/*
|
|
Copyright 2021 The Ceph-CSI Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package rbd
|
|
|
|
import (
|
|
"context"
|
|
|
|
"github.com/ceph/ceph-csi/internal/util"
|
|
|
|
"github.com/container-storage-interface/spec/lib/go/csi"
|
|
v1 "k8s.io/api/core/v1"
|
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
k8s "k8s.io/client-go/kubernetes"
|
|
)
|
|
|
|
const (
|
|
fsTypeBlockName = "block"
|
|
)
|
|
|
|
// accessModeStrToInt convert access mode type string to int32.
|
|
// Makesure to update this function as and when there are new modes introduced.
|
|
func accessModeStrToInt(mode v1.PersistentVolumeAccessMode) csi.VolumeCapability_AccessMode_Mode {
|
|
switch mode {
|
|
case v1.ReadWriteOnce:
|
|
return csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
|
|
case v1.ReadOnlyMany:
|
|
return csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
|
|
case v1.ReadWriteMany:
|
|
return csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
|
|
}
|
|
return csi.VolumeCapability_AccessMode_UNKNOWN
|
|
}
|
|
|
|
// getSecret get the secret details by name.
|
|
func getSecret(c *k8s.Clientset, ns, name string) (map[string]string, error) {
|
|
deviceSecret := make(map[string]string)
|
|
|
|
secret, err := c.CoreV1().Secrets(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
|
if err != nil {
|
|
util.ErrorLogMsg("get secret failed, err: %v", err)
|
|
return nil, err
|
|
}
|
|
|
|
for k, v := range secret.Data {
|
|
deviceSecret[k] = string(v)
|
|
}
|
|
|
|
return deviceSecret, nil
|
|
}
|
|
|
|
func callNodeStageVolume(ns *NodeServer, c *k8s.Clientset, pv *v1.PersistentVolume, stagingPath string) error {
|
|
publishContext := make(map[string]string)
|
|
|
|
volID := pv.Spec.PersistentVolumeSource.CSI.VolumeHandle
|
|
stagingParentPath := stagingPath + pv.Name + "/globalmount"
|
|
|
|
util.DefaultLog("sending nodeStageVolume for volID: %s, stagingPath: %s",
|
|
volID, stagingParentPath)
|
|
|
|
deviceSecret, err := getSecret(c,
|
|
pv.Spec.PersistentVolumeSource.CSI.NodeStageSecretRef.Namespace,
|
|
pv.Spec.PersistentVolumeSource.CSI.NodeStageSecretRef.Name)
|
|
if err != nil {
|
|
util.ErrorLogMsg("getSecret failed for volID: %s, err: %v", volID, err)
|
|
return err
|
|
}
|
|
|
|
volumeContext := pv.Spec.PersistentVolumeSource.CSI.VolumeAttributes
|
|
volumeContext["volumeHealerContext"] = "true"
|
|
|
|
req := &csi.NodeStageVolumeRequest{
|
|
VolumeId: volID,
|
|
PublishContext: publishContext,
|
|
StagingTargetPath: stagingParentPath,
|
|
VolumeCapability: &csi.VolumeCapability{
|
|
AccessMode: &csi.VolumeCapability_AccessMode{
|
|
Mode: accessModeStrToInt(pv.Spec.AccessModes[0]),
|
|
},
|
|
},
|
|
Secrets: deviceSecret,
|
|
VolumeContext: volumeContext,
|
|
}
|
|
if pv.Spec.PersistentVolumeSource.CSI.FSType == fsTypeBlockName {
|
|
req.VolumeCapability.AccessType = &csi.VolumeCapability_Block{
|
|
Block: &csi.VolumeCapability_BlockVolume{},
|
|
}
|
|
} else {
|
|
req.VolumeCapability.AccessType = &csi.VolumeCapability_Mount{
|
|
Mount: &csi.VolumeCapability_MountVolume{
|
|
FsType: pv.Spec.PersistentVolumeSource.CSI.FSType,
|
|
MountFlags: pv.Spec.MountOptions,
|
|
},
|
|
}
|
|
}
|
|
|
|
_, err = ns.NodeStageVolume(context.TODO(), req)
|
|
if err != nil {
|
|
util.ErrorLogMsg("nodeStageVolume request failed, volID: %s, stagingPath: %s, err: %v",
|
|
volID, stagingParentPath, err)
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// runVolumeHealer heal the volumes attached on a node.
|
|
func runVolumeHealer(ns *NodeServer, conf *util.Config) error {
|
|
c := util.NewK8sClient()
|
|
val, err := c.StorageV1().VolumeAttachments().List(context.TODO(), metav1.ListOptions{})
|
|
if err != nil {
|
|
util.ErrorLogMsg("list volumeAttachments failed, err: %v", err)
|
|
return err
|
|
}
|
|
|
|
for i := range val.Items {
|
|
// skip if the volumeattachments doesn't belong to current node or driver
|
|
if val.Items[i].Spec.NodeName != conf.NodeID || val.Items[i].Spec.Attacher != conf.DriverName {
|
|
continue
|
|
}
|
|
pvName := *val.Items[i].Spec.Source.PersistentVolumeName
|
|
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
|
|
if err != nil {
|
|
// skip if volume doesn't exist
|
|
if !apierrors.IsNotFound(err) {
|
|
util.ErrorLogMsg("get persistentVolumes failed for pv: %s, err: %v", pvName, err)
|
|
}
|
|
continue
|
|
}
|
|
// TODO: check with pv delete annotations, for eg: what happens when the pv is marked for delete
|
|
// skip this volumeattachment if its pv is not bound
|
|
if pv.Status.Phase != v1.VolumeBound {
|
|
continue
|
|
}
|
|
// skip if mounter is not rbd-nbd
|
|
if pv.Spec.PersistentVolumeSource.CSI.VolumeAttributes["mounter"] != "rbd-nbd" {
|
|
continue
|
|
}
|
|
|
|
// ensure that the volume is still in attached state
|
|
va, err := c.StorageV1().VolumeAttachments().Get(context.TODO(), val.Items[i].Name, metav1.GetOptions{})
|
|
if err != nil {
|
|
// skip if volume attachment doesn't exist
|
|
if !apierrors.IsNotFound(err) {
|
|
util.ErrorLogMsg("get volumeAttachments failed for volumeAttachment: %s, volID: %s, err: %v",
|
|
val.Items[i].Name, pv.Spec.PersistentVolumeSource.CSI.VolumeHandle, err)
|
|
}
|
|
continue
|
|
}
|
|
if !va.Status.Attached {
|
|
continue
|
|
}
|
|
|
|
err = callNodeStageVolume(ns, c, pv, conf.StagingPath)
|
|
if err != nil {
|
|
util.ErrorLogMsg("callNodeStageVolume failed for VolID: %s, err: %v",
|
|
pv.Spec.PersistentVolumeSource.CSI.VolumeHandle, err)
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|