2020-10-21 12:49:45 +00:00
|
|
|
/*
|
|
|
|
Copyright 2020 The Ceph-CSI Authors.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
package persistentvolume
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
|
|
|
|
ctrl "github.com/ceph/ceph-csi/internal/controller"
|
|
|
|
"github.com/ceph/ceph-csi/internal/rbd"
|
|
|
|
"github.com/ceph/ceph-csi/internal/util"
|
2021-08-24 15:03:25 +00:00
|
|
|
"github.com/ceph/ceph-csi/internal/util/log"
|
2020-10-21 12:49:45 +00:00
|
|
|
|
|
|
|
corev1 "k8s.io/api/core/v1"
|
|
|
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
|
|
|
"k8s.io/apimachinery/pkg/runtime"
|
|
|
|
"k8s.io/apimachinery/pkg/types"
|
|
|
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
|
|
|
"sigs.k8s.io/controller-runtime/pkg/controller"
|
|
|
|
"sigs.k8s.io/controller-runtime/pkg/handler"
|
|
|
|
"sigs.k8s.io/controller-runtime/pkg/manager"
|
|
|
|
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
|
|
|
"sigs.k8s.io/controller-runtime/pkg/source"
|
|
|
|
)
|
|
|
|
|
|
|
|
// ReconcilePersistentVolume reconciles a PersistentVolume object.
|
|
|
|
type ReconcilePersistentVolume struct {
|
|
|
|
client client.Client
|
|
|
|
config ctrl.Config
|
2021-04-06 15:38:37 +00:00
|
|
|
Locks *util.VolumeLocks
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
|
|
|
|
2021-07-13 12:21:05 +00:00
|
|
|
var (
|
2021-08-18 04:16:11 +00:00
|
|
|
_ reconcile.Reconciler = &ReconcilePersistentVolume{}
|
|
|
|
_ ctrl.Manager = &ReconcilePersistentVolume{}
|
2021-07-13 12:21:05 +00:00
|
|
|
)
|
2020-10-21 12:49:45 +00:00
|
|
|
|
|
|
|
// Init will add the ReconcilePersistentVolume to the list.
|
|
|
|
func Init() {
|
|
|
|
// add ReconcilePersistentVolume to the list
|
|
|
|
ctrl.ControllerList = append(ctrl.ControllerList, ReconcilePersistentVolume{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add adds the newPVReconciler.
|
|
|
|
func (r ReconcilePersistentVolume) Add(mgr manager.Manager, config ctrl.Config) error {
|
|
|
|
return add(mgr, newPVReconciler(mgr, config))
|
|
|
|
}
|
|
|
|
|
|
|
|
// newReconciler returns a ReconcilePersistentVolume.
|
|
|
|
func newPVReconciler(mgr manager.Manager, config ctrl.Config) reconcile.Reconciler {
|
|
|
|
r := &ReconcilePersistentVolume{
|
|
|
|
client: mgr.GetClient(),
|
|
|
|
config: config,
|
2021-04-06 15:38:37 +00:00
|
|
|
Locks: util.NewVolumeLocks(),
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-10-21 12:49:45 +00:00
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
func add(mgr manager.Manager, r reconcile.Reconciler) error {
|
|
|
|
// Create a new controller
|
2021-06-25 11:08:52 +00:00
|
|
|
c, err := controller.New(
|
|
|
|
"persistentvolume-controller",
|
|
|
|
mgr,
|
|
|
|
controller.Options{MaxConcurrentReconciles: 1, Reconciler: r})
|
2020-10-21 12:49:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Watch for changes to PersistentVolumes
|
|
|
|
err = c.Watch(&source.Kind{Type: &corev1.PersistentVolume{}}, &handler.EnqueueRequestForObject{})
|
|
|
|
if err != nil {
|
2021-05-11 09:28:56 +00:00
|
|
|
return fmt.Errorf("failed to watch the changes: %w", err)
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-10-21 12:49:45 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 05:02:01 +00:00
|
|
|
func (r *ReconcilePersistentVolume) getCredentials(
|
|
|
|
ctx context.Context,
|
|
|
|
name,
|
|
|
|
namespace string) (*util.Credentials, error) {
|
2020-12-04 08:27:42 +00:00
|
|
|
var cr *util.Credentials
|
|
|
|
|
|
|
|
if name == "" || namespace == "" {
|
|
|
|
errStr := "secret name or secret namespace is empty"
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLogMsg(errStr)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-12-04 08:27:42 +00:00
|
|
|
return nil, errors.New(errStr)
|
|
|
|
}
|
2020-10-21 12:49:45 +00:00
|
|
|
secret := &corev1.Secret{}
|
2021-06-25 05:02:01 +00:00
|
|
|
err := r.client.Get(ctx,
|
|
|
|
types.NamespacedName{Name: name, Namespace: namespace},
|
|
|
|
secret)
|
2020-10-21 12:49:45 +00:00
|
|
|
if err != nil {
|
2020-12-08 13:21:56 +00:00
|
|
|
return nil, fmt.Errorf("error getting secret %s in namespace %s: %w", name, namespace, err)
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
credentials := map[string]string{}
|
|
|
|
for key, value := range secret.Data {
|
|
|
|
credentials[key] = string(value)
|
|
|
|
}
|
2020-12-04 08:27:42 +00:00
|
|
|
|
|
|
|
cr, err = util.NewUserCredentials(credentials)
|
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLogMsg("failed to get user credentials %s", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-12-04 08:27:42 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-12-04 08:27:42 +00:00
|
|
|
return cr, nil
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
|
|
|
|
2021-09-03 12:48:49 +00:00
|
|
|
func checkStaticVolume(pv *corev1.PersistentVolume) bool {
|
|
|
|
return pv.Spec.CSI.VolumeAttributes["staticVolume"] == "true"
|
2020-12-04 08:19:27 +00:00
|
|
|
}
|
|
|
|
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
// storeVolumeIDInPV stores the new volumeID in PV object.
|
2021-06-25 05:02:01 +00:00
|
|
|
func (r ReconcilePersistentVolume) storeVolumeIDInPV(
|
|
|
|
ctx context.Context,
|
|
|
|
pv *corev1.PersistentVolume,
|
|
|
|
newVolumeID string) error {
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
if v, ok := pv.Annotations[rbd.PVVolumeHandleAnnotationKey]; ok {
|
|
|
|
if v == newVolumeID {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if pv.Annotations == nil {
|
|
|
|
pv.Annotations = make(map[string]string)
|
|
|
|
}
|
|
|
|
if pv.Labels == nil {
|
|
|
|
pv.Labels = make(map[string]string)
|
|
|
|
}
|
|
|
|
pv.Labels[rbd.PVReplicatedLabelKey] = rbd.PVReplicatedLabelValue
|
|
|
|
pv.Annotations[rbd.PVVolumeHandleAnnotationKey] = newVolumeID
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-06-25 05:02:01 +00:00
|
|
|
return r.client.Update(ctx, pv)
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
}
|
|
|
|
|
2020-10-21 12:49:45 +00:00
|
|
|
// reconcilePV will extract the image details from the pv spec and regenerates
|
|
|
|
// the omap data.
|
2021-06-25 05:02:01 +00:00
|
|
|
func (r ReconcilePersistentVolume) reconcilePV(ctx context.Context, obj runtime.Object) error {
|
2020-10-21 12:49:45 +00:00
|
|
|
pv, ok := obj.(*corev1.PersistentVolume)
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
2021-04-04 06:52:11 +00:00
|
|
|
if pv.Spec.CSI == nil || pv.Spec.CSI.Driver != r.config.DriverName {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
requestName := pv.Name
|
|
|
|
volumeHandler := pv.Spec.CSI.VolumeHandle
|
|
|
|
secretName := ""
|
|
|
|
secretNamespace := ""
|
|
|
|
// check static volume
|
2021-09-03 12:48:49 +00:00
|
|
|
static := checkStaticVolume(pv)
|
2021-04-04 06:52:11 +00:00
|
|
|
// if the volume is static, dont generate OMAP data
|
|
|
|
if static {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if pv.Spec.CSI.ControllerExpandSecretRef != nil {
|
|
|
|
secretName = pv.Spec.CSI.ControllerExpandSecretRef.Name
|
|
|
|
secretNamespace = pv.Spec.CSI.ControllerExpandSecretRef.Namespace
|
|
|
|
} else if pv.Spec.CSI.NodeStageSecretRef != nil {
|
|
|
|
secretName = pv.Spec.CSI.NodeStageSecretRef.Name
|
|
|
|
secretNamespace = pv.Spec.CSI.NodeStageSecretRef.Namespace
|
|
|
|
}
|
2020-10-21 12:49:45 +00:00
|
|
|
|
2021-04-06 15:38:37 +00:00
|
|
|
// Take lock to process only one volumeHandle at a time.
|
|
|
|
if ok := r.Locks.TryAcquire(pv.Spec.CSI.VolumeHandle); !ok {
|
|
|
|
return fmt.Errorf(util.VolumeOperationAlreadyExistsFmt, pv.Spec.CSI.VolumeHandle)
|
|
|
|
}
|
|
|
|
defer r.Locks.Release(pv.Spec.CSI.VolumeHandle)
|
|
|
|
|
2021-06-25 05:02:01 +00:00
|
|
|
cr, err := r.getCredentials(ctx, secretName, secretNamespace)
|
2021-04-04 06:52:11 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLogMsg("failed to get credentials from secret %s", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-04-04 06:52:11 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer cr.DeleteCredentials()
|
2020-12-04 08:27:42 +00:00
|
|
|
|
2021-07-16 09:40:09 +00:00
|
|
|
rbdVolID, err := rbd.RegenerateJournal(pv.Spec.CSI.VolumeAttributes, volumeHandler, requestName, cr)
|
2021-04-04 06:52:11 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLogMsg("failed to regenerate journal %s", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2021-04-04 06:52:11 +00:00
|
|
|
return err
|
2020-10-21 12:49:45 +00:00
|
|
|
}
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
if rbdVolID != volumeHandler {
|
2021-06-25 05:02:01 +00:00
|
|
|
err = r.storeVolumeIDInPV(ctx, pv, rbdVolID)
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
if err != nil {
|
2021-08-24 15:03:25 +00:00
|
|
|
log.ErrorLogMsg("failed to store volumeID in PV %s", err)
|
2021-07-22 05:45:17 +00:00
|
|
|
|
rbd:store/Read volumeID in/from PV annotation
In the case of the Async DR, the volumeID will
not be the same if the clusterID or the PoolID
is different, With Earlier implementation, it
is expected that the new volumeID mapping is
stored in the rados omap pool. In the case of the
ControllerExpand or the DeleteVolume Request,
the only volumeID will be sent it's not possible
to find the corresponding poolID in the new cluster.
With This Change, it works as below
The csi-rbdplugin-controller will watch for the PV
objects, when there are any PV objects created it
will check the omap already exists, If the omap doesn't
exist it will generate the new volumeID and it checks for
the volumeID mapping entry in the PV annotation, if the
mapping does not exist, it will add the new entry
to the PV annotation.
The cephcsi will check for the PV annotations if the
omap does not exist if the mapping exists in the PV
annotation, it will use the new volumeID for further
operations.
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2021-03-30 10:33:55 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-10-21 12:49:45 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-04-15 10:49:37 +00:00
|
|
|
// Reconcile reconciles the PersistentVolume object and creates a new omap entries
|
2020-10-21 12:49:45 +00:00
|
|
|
// for the volume.
|
2021-06-25 05:02:01 +00:00
|
|
|
func (r *ReconcilePersistentVolume) Reconcile(ctx context.Context,
|
|
|
|
request reconcile.Request) (reconcile.Result, error) {
|
2020-10-21 12:49:45 +00:00
|
|
|
pv := &corev1.PersistentVolume{}
|
2021-06-25 05:02:01 +00:00
|
|
|
err := r.client.Get(ctx, request.NamespacedName, pv)
|
2020-10-21 12:49:45 +00:00
|
|
|
if err != nil {
|
|
|
|
if apierrors.IsNotFound(err) {
|
|
|
|
return reconcile.Result{}, nil
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-10-21 12:49:45 +00:00
|
|
|
return reconcile.Result{}, err
|
|
|
|
}
|
|
|
|
// Check if the object is under deletion
|
|
|
|
if !pv.GetDeletionTimestamp().IsZero() {
|
|
|
|
return reconcile.Result{}, nil
|
|
|
|
}
|
|
|
|
|
2021-06-25 05:02:01 +00:00
|
|
|
err = r.reconcilePV(ctx, pv)
|
2020-10-21 12:49:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return reconcile.Result{}, err
|
|
|
|
}
|
2021-07-22 05:45:17 +00:00
|
|
|
|
2020-10-21 12:49:45 +00:00
|
|
|
return reconcile.Result{}, nil
|
|
|
|
}
|