2020-10-21 12:49:45 +00:00
|
|
|
/*
|
|
|
|
Copyright 2020 The Ceph-CSI Authors.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
package persistentvolume
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
2020-12-04 08:19:27 +00:00
|
|
|
"strconv"
|
2020-10-21 12:49:45 +00:00
|
|
|
|
|
|
|
ctrl "github.com/ceph/ceph-csi/internal/controller"
|
|
|
|
"github.com/ceph/ceph-csi/internal/rbd"
|
|
|
|
"github.com/ceph/ceph-csi/internal/util"
|
|
|
|
|
|
|
|
corev1 "k8s.io/api/core/v1"
|
|
|
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
|
|
|
"k8s.io/apimachinery/pkg/runtime"
|
|
|
|
"k8s.io/apimachinery/pkg/types"
|
|
|
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
|
|
"sigs.k8s.io/controller-runtime/pkg/controller"
|
|
|
|
"sigs.k8s.io/controller-runtime/pkg/handler"
|
|
|
|
"sigs.k8s.io/controller-runtime/pkg/manager"
|
|
|
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
|
|
|
"sigs.k8s.io/controller-runtime/pkg/source"
|
|
|
|
)
|
|
|
|
|
|
|
|
// ReconcilePersistentVolume reconciles a PersistentVolume object.
|
|
|
|
type ReconcilePersistentVolume struct {
|
|
|
|
client client.Client
|
|
|
|
config ctrl.Config
|
2021-04-06 15:38:37 +00:00
|
|
|
Locks *util.VolumeLocks
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var _ reconcile.Reconciler = &ReconcilePersistentVolume{}
|
|
|
|
var _ ctrl.ContollerManager = &ReconcilePersistentVolume{}
|
|
|
|
|
|
|
|
// Init will add the ReconcilePersistentVolume to the list.
|
|
|
|
func Init() {
|
|
|
|
// add ReconcilePersistentVolume to the list
|
|
|
|
ctrl.ControllerList = append(ctrl.ControllerList, ReconcilePersistentVolume{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add adds the newPVReconciler.
|
|
|
|
func (r ReconcilePersistentVolume) Add(mgr manager.Manager, config ctrl.Config) error {
|
|
|
|
return add(mgr, newPVReconciler(mgr, config))
|
|
|
|
}
|
|
|
|
|
|
|
|
// newReconciler returns a ReconcilePersistentVolume.
|
|
|
|
func newPVReconciler(mgr manager.Manager, config ctrl.Config) reconcile.Reconciler {
|
|
|
|
r := &ReconcilePersistentVolume{
|
|
|
|
client: mgr.GetClient(),
|
|
|
|
config: config,
|
2021-04-06 15:38:37 +00:00
|
|
|
Locks: util.NewVolumeLocks(),
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|
|
|
// Create a new controller
|
2021-06-25 11:08:52 +00:00
|
|
|
c, err := controller.New(
|
|
|
|
"persistentvolume-controller",
|
|
|
|
mgr,
|
|
|
|
controller.Options{MaxConcurrentReconciles: 1, Reconciler: r})
|
2020-10-21 12:49:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Watch for changes to PersistentVolumes
|
|
|
|
err = c.Watch(&source.Kind{Type: &corev1.PersistentVolume{}}, &handler.EnqueueRequestForObject{})
|
|
|
|
if err != nil {
|
2021-05-11 09:28:56 +00:00
|
|
|
return fmt.Errorf("failed to watch the changes: %w", err)
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-12-04 08:27:42 +00:00
|
|
|
func (r *ReconcilePersistentVolume) getCredentials(name, namespace string) (*util.Credentials, error) {
|
|
|
|
var cr *util.Credentials
|
|
|
|
|
|
|
|
if name == "" || namespace == "" {
|
|
|
|
errStr := "secret name or secret namespace is empty"
|
|
|
|
util.ErrorLogMsg(errStr)
|
|
|
|
return nil, errors.New(errStr)
|
|
|
|
}
|
2020-10-21 12:49:45 +00:00
|
|
|
secret := &corev1.Secret{}
|
|
|
|
err := r.client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, secret)
|
|
|
|
if err != nil {
|
2020-12-08 13:21:56 +00:00
|
|
|
return nil, fmt.Errorf("error getting secret %s in namespace %s: %w", name, namespace, err)
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
credentials := map[string]string{}
|
|
|
|
for key, value := range secret.Data {
|
|
|
|
credentials[key] = string(value)
|
|
|
|
}
|
2020-12-04 08:27:42 +00:00
|
|
|
|
|
|
|
cr, err = util.NewUserCredentials(credentials)
|
|
|
|
if err != nil {
|
|
|
|
util.ErrorLogMsg("failed to get user credentials %s", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return cr, nil
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
|
|
|
|
2020-12-04 08:27:42 +00:00
|
|
|
func checkStaticVolume(pv *corev1.PersistentVolume) (bool, error) {
|
2020-12-04 08:19:27 +00:00
|
|
|
static := false
|
|
|
|
var err error
|
2020-12-04 08:27:42 +00:00
|
|
|
|
|
|
|
staticVol := pv.Spec.CSI.VolumeAttributes["staticVolume"]
|
2020-12-04 08:19:27 +00:00
|
|
|
if staticVol != "" {
|
|
|
|
static, err = strconv.ParseBool(staticVol)
|
|
|
|
if err != nil {
|
|
|
|
return false, fmt.Errorf("failed to parse preProvisionedVolume: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return static, nil
|
|
|
|
}
|
|
|
|
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
// storeVolumeIDInPV stores the new volumeID in PV object.
|
|
|
|
func (r ReconcilePersistentVolume) storeVolumeIDInPV(pv *corev1.PersistentVolume, newVolumeID string) error {
|
|
|
|
if v, ok := pv.Annotations[rbd.PVVolumeHandleAnnotationKey]; ok {
|
|
|
|
if v == newVolumeID {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if pv.Annotations == nil {
|
|
|
|
pv.Annotations = make(map[string]string)
|
|
|
|
}
|
|
|
|
if pv.Labels == nil {
|
|
|
|
pv.Labels = make(map[string]string)
|
|
|
|
}
|
|
|
|
pv.Labels[rbd.PVReplicatedLabelKey] = rbd.PVReplicatedLabelValue
|
|
|
|
pv.Annotations[rbd.PVVolumeHandleAnnotationKey] = newVolumeID
|
|
|
|
return r.client.Update(context.TODO(), pv)
|
|
|
|
}
|
|
|
|
|
2020-10-21 12:49:45 +00:00
|
|
|
// reconcilePV will extract the image details from the pv spec and regenerates
|
|
|
|
// the omap data.
|
|
|
|
func (r ReconcilePersistentVolume) reconcilePV(obj runtime.Object) error {
|
|
|
|
pv, ok := obj.(*corev1.PersistentVolume)
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
2021-04-04 06:52:11 +00:00
|
|
|
if pv.Spec.CSI == nil || pv.Spec.CSI.Driver != r.config.DriverName {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
pool := pv.Spec.CSI.VolumeAttributes["pool"]
|
|
|
|
journalPool := pv.Spec.CSI.VolumeAttributes["journalPool"]
|
|
|
|
requestName := pv.Name
|
|
|
|
imageName := pv.Spec.CSI.VolumeAttributes["imageName"]
|
|
|
|
volumeHandler := pv.Spec.CSI.VolumeHandle
|
|
|
|
secretName := ""
|
|
|
|
secretNamespace := ""
|
|
|
|
// check static volume
|
|
|
|
static, err := checkStaticVolume(pv)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// if the volume is static, dont generate OMAP data
|
|
|
|
if static {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if pv.Spec.CSI.ControllerExpandSecretRef != nil {
|
|
|
|
secretName = pv.Spec.CSI.ControllerExpandSecretRef.Name
|
|
|
|
secretNamespace = pv.Spec.CSI.ControllerExpandSecretRef.Namespace
|
|
|
|
} else if pv.Spec.CSI.NodeStageSecretRef != nil {
|
|
|
|
secretName = pv.Spec.CSI.NodeStageSecretRef.Name
|
|
|
|
secretNamespace = pv.Spec.CSI.NodeStageSecretRef.Namespace
|
|
|
|
}
|
2020-10-21 12:49:45 +00:00
|
|
|
|
2021-04-06 15:38:37 +00:00
|
|
|
// Take lock to process only one volumeHandle at a time.
|
|
|
|
if ok := r.Locks.TryAcquire(pv.Spec.CSI.VolumeHandle); !ok {
|
|
|
|
return fmt.Errorf(util.VolumeOperationAlreadyExistsFmt, pv.Spec.CSI.VolumeHandle)
|
|
|
|
}
|
|
|
|
defer r.Locks.Release(pv.Spec.CSI.VolumeHandle)
|
|
|
|
|
2021-04-04 06:52:11 +00:00
|
|
|
cr, err := r.getCredentials(secretName, secretNamespace)
|
|
|
|
if err != nil {
|
|
|
|
util.ErrorLogMsg("failed to get credentials from secret %s", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer cr.DeleteCredentials()
|
2020-12-04 08:27:42 +00:00
|
|
|
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
rbdVolID, err := rbd.RegenerateJournal(imageName, volumeHandler, pool, journalPool, requestName, cr)
|
2021-04-04 06:52:11 +00:00
|
|
|
if err != nil {
|
|
|
|
util.ErrorLogMsg("failed to regenerate journal %s", err)
|
|
|
|
return err
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
if rbdVolID != volumeHandler {
|
|
|
|
err = r.storeVolumeIDInPV(pv, rbdVolID)
|
|
|
|
if err != nil {
|
|
|
|
util.ErrorLogMsg("failed to store volumeID in PV %s", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2020-10-21 12:49:45 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-04-15 10:49:37 +00:00
|
|
|
// Reconcile reconciles the PersistentVolume object and creates a new omap entries
|
2020-10-21 12:49:45 +00:00
|
|
|
// for the volume.
|
|
|
|
func (r *ReconcilePersistentVolume) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
|
|
|
pv := &corev1.PersistentVolume{}
|
|
|
|
err := r.client.Get(context.TODO(), request.NamespacedName, pv)
|
|
|
|
if err != nil {
|
|
|
|
if apierrors.IsNotFound(err) {
|
|
|
|
return reconcile.Result{}, nil
|
|
|
|
}
|
|
|
|
return reconcile.Result{}, err
|
|
|
|
}
|
|
|
|
// Check if the object is under deletion
|
|
|
|
if !pv.GetDeletionTimestamp().IsZero() {
|
|
|
|
return reconcile.Result{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
err = r.reconcilePV(pv)
|
|
|
|
if err != nil {
|
|
|
|
return reconcile.Result{}, err
|
|
|
|
}
|
|
|
|
return reconcile.Result{}, nil
|
|
|
|
}
|