ceph-csi/internal/rbd/rbd_healer.go
Niels de Vos 6d00b39886 cleanup: move log functions to new internal/util/log package
Moving the log functions into its own internal/util/log package makes it
possible to split out the humongous internal/util packages in further
smaller pieces. This reduces the inter-dependencies between utility
functions and components, preventing circular dependencies which are not
allowed in Go.

Updates: #852
Signed-off-by: Niels de Vos <ndevos@redhat.com>
2021-08-26 09:34:05 +00:00

202 lines
6.0 KiB
Go

/*
Copyright 2021 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"context"
"sync"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/container-storage-interface/spec/lib/go/csi"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8s "k8s.io/client-go/kubernetes"
)
const (
fsTypeBlockName = "block"
)
// accessModeStrToInt convert access mode type string to int32.
// Make sure to update this function as and when there are new modes introduced.
func accessModeStrToInt(mode v1.PersistentVolumeAccessMode) csi.VolumeCapability_AccessMode_Mode {
switch mode {
case v1.ReadWriteOnce:
return csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER
case v1.ReadOnlyMany:
return csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY
case v1.ReadWriteMany:
return csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER
case v1.ReadWriteOncePod:
return csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER
}
return csi.VolumeCapability_AccessMode_UNKNOWN
}
// getSecret get the secret details by name.
func getSecret(c *k8s.Clientset, ns, name string) (map[string]string, error) {
deviceSecret := make(map[string]string)
secret, err := c.CoreV1().Secrets(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
log.ErrorLogMsg("get secret failed, err: %v", err)
return nil, err
}
for k, v := range secret.Data {
deviceSecret[k] = string(v)
}
return deviceSecret, nil
}
func callNodeStageVolume(ns *NodeServer, c *k8s.Clientset, pv *v1.PersistentVolume, stagingPath string) error {
publishContext := make(map[string]string)
volID := pv.Spec.PersistentVolumeSource.CSI.VolumeHandle
stagingParentPath := stagingPath + pv.Name + "/globalmount"
log.DefaultLog("sending nodeStageVolume for volID: %s, stagingPath: %s",
volID, stagingParentPath)
deviceSecret, err := getSecret(c,
pv.Spec.PersistentVolumeSource.CSI.NodeStageSecretRef.Namespace,
pv.Spec.PersistentVolumeSource.CSI.NodeStageSecretRef.Name)
if err != nil {
log.ErrorLogMsg("getSecret failed for volID: %s, err: %v", volID, err)
return err
}
volumeContext := pv.Spec.PersistentVolumeSource.CSI.VolumeAttributes
volumeContext["volumeHealerContext"] = "true"
req := &csi.NodeStageVolumeRequest{
VolumeId: volID,
PublishContext: publishContext,
StagingTargetPath: stagingParentPath,
VolumeCapability: &csi.VolumeCapability{
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: accessModeStrToInt(pv.Spec.AccessModes[0]),
},
},
Secrets: deviceSecret,
VolumeContext: volumeContext,
}
if pv.Spec.PersistentVolumeSource.CSI.FSType == fsTypeBlockName {
req.VolumeCapability.AccessType = &csi.VolumeCapability_Block{
Block: &csi.VolumeCapability_BlockVolume{},
}
} else {
req.VolumeCapability.AccessType = &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{
FsType: pv.Spec.PersistentVolumeSource.CSI.FSType,
MountFlags: pv.Spec.MountOptions,
},
}
}
_, err = ns.NodeStageVolume(context.TODO(), req)
if err != nil {
log.ErrorLogMsg("nodeStageVolume request failed, volID: %s, stagingPath: %s, err: %v",
volID, stagingParentPath, err)
return err
}
return nil
}
// runVolumeHealer heal the volumes attached on a node.
func runVolumeHealer(ns *NodeServer, conf *util.Config) error {
c := util.NewK8sClient()
val, err := c.StorageV1().VolumeAttachments().List(context.TODO(), metav1.ListOptions{})
if err != nil {
log.ErrorLogMsg("list volumeAttachments failed, err: %v", err)
return err
}
var wg sync.WaitGroup
channel := make(chan error)
for i := range val.Items {
// skip if the volumeattachments doesn't belong to current node or driver
if val.Items[i].Spec.NodeName != conf.NodeID || val.Items[i].Spec.Attacher != conf.DriverName {
continue
}
pvName := *val.Items[i].Spec.Source.PersistentVolumeName
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
if err != nil {
// skip if volume doesn't exist
if !apierrors.IsNotFound(err) {
log.ErrorLogMsg("get persistentVolumes failed for pv: %s, err: %v", pvName, err)
}
continue
}
// TODO: check with pv delete annotations, for eg: what happens when the pv is marked for delete
// skip this volumeattachment if its pv is not bound
if pv.Status.Phase != v1.VolumeBound {
continue
}
// skip if mounter is not rbd-nbd
if pv.Spec.PersistentVolumeSource.CSI.VolumeAttributes["mounter"] != "rbd-nbd" {
continue
}
// ensure that the volume is still in attached state
va, err := c.StorageV1().VolumeAttachments().Get(context.TODO(), val.Items[i].Name, metav1.GetOptions{})
if err != nil {
// skip if volume attachment doesn't exist
if !apierrors.IsNotFound(err) {
log.ErrorLogMsg("get volumeAttachments failed for volumeAttachment: %s, volID: %s, err: %v",
val.Items[i].Name, pv.Spec.PersistentVolumeSource.CSI.VolumeHandle, err)
}
continue
}
if !va.Status.Attached {
continue
}
wg.Add(1)
// run multiple NodeStageVolume calls concurrently
go func(wg *sync.WaitGroup, ns *NodeServer, c *k8s.Clientset, pv *v1.PersistentVolume, stagingPath string) {
defer wg.Done()
channel <- callNodeStageVolume(ns, c, pv, stagingPath)
}(&wg, ns, c, pv, conf.StagingPath)
}
go func() {
wg.Wait()
close(channel)
}()
for s := range channel {
if s != nil {
log.ErrorLogMsg("callNodeStageVolume failed, err: %v", s)
}
}
return nil
}