1
0
mirror of https://github.com/ceph/ceph-csi.git synced 2024-12-22 04:50:23 +00:00
ceph-csi/internal/controller/controller.go
Madhu Rajanna 68bd44beba rbd: add new controller to regenerate omap data
In the case of Disaster Recovery failover, the
user expected to create the static PVC's. We have
planned not to go with the PVC name and namespace
for many reasons (as in kubernetes it's planned to
support PVC transfer to a new namespace with a
different name and with new features coming in
like data populator etc). For now, we are
planning to go with static PVC's to support
async mirroring.

During Async mirroring only the RBD images are
mirrored to the secondary site, and when the
user creates the static PVC's on the failover
we need to regenerate the omap data. The
volumeHandler in PV spec is an encoded string
which contains clusterID and poolID and image UUID,
The clusterID and poolID won't remain same on both
the clusters, for that cephcsi need to generate the
new volume handler and its to create a mapping
between new volume handler and old volume handler
with that whenever cephcsi gets csi requests it
check if the mapping exists it will pull the new
volume handler and continues other operations.

The new controller watches for the PVs created,
It checks if the omap exists if it doesn't it
will regenerate the entire omap data.

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
2020-11-28 18:50:00 +00:00

79 lines
2.2 KiB
Go

/*
Copyright 2020 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"github.com/ceph/ceph-csi/internal/util"
clientConfig "sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
)
// ContollerManager is the interface that will wrap Add function.
// The New controllers which gets added, as to implement Add function to get
// started by the manager.
type ContollerManager interface {
Add(manager.Manager, Config) error
}
// Config holds the drivername and namespace name.
type Config struct {
DriverName string
Namespace string
}
// ControllerList holds the list of managers need to be started.
var ControllerList []ContollerManager
// addToManager calls the registered managers Add method.
func addToManager(mgr manager.Manager, config Config) error {
for _, c := range ControllerList {
err := c.Add(mgr, config)
if err != nil {
return err
}
}
return nil
}
// Start will start all the registered managers.
func Start(config Config) error {
electionID := config.DriverName + "-" + config.Namespace
opts := manager.Options{
LeaderElection: true,
// disable metrics
MetricsBindAddress: "0",
LeaderElectionNamespace: config.Namespace,
LeaderElectionID: electionID,
}
mgr, err := manager.New(clientConfig.GetConfigOrDie(), opts)
if err != nil {
util.ErrorLogMsg("failed to create manager %s", err)
return err
}
err = addToManager(mgr, config)
if err != nil {
util.ErrorLogMsg("failed to add manager %s", err)
return err
}
err = mgr.Start(signals.SetupSignalHandler())
if err != nil {
util.ErrorLogMsg("failed to start manager %s", err)
}
return err
}