rebase: update controller-runtime package to v0.9.2

This commit updates controller-runtime to v0.9.2 and
makes changes in persistentvolume.go to add context to
various functions and function calls made here instead of
context.TODO().

Signed-off-by: Rakshith R <rar@redhat.com>
This commit is contained in:
Rakshith R
2021-06-25 10:32:01 +05:30
committed by mergify[bot]
parent 1b23d78113
commit 9eaa55506f
238 changed files with 19614 additions and 10805 deletions

View File

@ -15,7 +15,7 @@ limitations under the License.
*/
/*
Package leaderelection contains a constructors for a leader election resource lock.
Package leaderelection contains a constructor for a leader election resource lock.
This is used to ensure that multiple copies of a controller manager can be run with
only one active set of controllers, for active-passive HA.

View File

@ -31,28 +31,38 @@ import (
const inClusterNamespacePath = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
// Options provides the required configuration to create a new resource lock
// Options provides the required configuration to create a new resource lock.
type Options struct {
// LeaderElection determines whether or not to use leader election when
// starting the manager.
LeaderElection bool
// LeaderElectionResourceLock determines which resource lock to use for leader election,
// defaults to "configmapsleases".
LeaderElectionResourceLock string
// LeaderElectionNamespace determines the namespace in which the leader
// election configmap will be created.
// election resource will be created.
LeaderElectionNamespace string
// LeaderElectionID determines the name of the configmap that leader election
// LeaderElectionID determines the name of the resource that leader election
// will use for holding the leader lock.
LeaderElectionID string
}
// NewResourceLock creates a new config map resource lock for use in a leader
// election loop
// NewResourceLock creates a new resource lock for use in a leader election loop.
func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, options Options) (resourcelock.Interface, error) {
if !options.LeaderElection {
return nil, nil
}
// Default resource lock to "configmapsleases". We must keep this default until we are sure all controller-runtime
// users have upgraded from the original default ConfigMap lock to a controller-runtime version that has this new
// default. Many users of controller-runtime skip versions, so we should be extremely conservative here.
if options.LeaderElectionResourceLock == "" {
options.LeaderElectionResourceLock = resourcelock.ConfigMapsLeasesResourceLock
}
// LeaderElectionID must be provided to prevent clashes
if options.LeaderElectionID == "" {
return nil, errors.New("LeaderElectionID must be configured")
@ -75,13 +85,12 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op
id = id + "_" + string(uuid.NewUUID())
// Construct client for leader election
client, err := kubernetes.NewForConfig(config)
client, err := kubernetes.NewForConfig(rest.AddUserAgent(config, "leader-election"))
if err != nil {
return nil, err
}
// TODO(JoelSpeed): switch to leaderelection object in 1.12
return resourcelock.New(resourcelock.ConfigMapsResourceLock,
return resourcelock.New(options.LeaderElectionResourceLock,
options.LeaderElectionNamespace,
options.LeaderElectionID,
client.CoreV1(),
@ -95,8 +104,7 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op
func getInClusterNamespace() (string, error) {
// Check whether the namespace file exists.
// If not, we are not running in cluster so can't guess the namespace.
_, err := os.Stat(inClusterNamespacePath)
if os.IsNotExist(err) {
if _, err := os.Stat(inClusterNamespacePath); os.IsNotExist(err) {
return "", fmt.Errorf("not running in-cluster, please specify LeaderElectionNamespace")
} else if err != nil {
return "", fmt.Errorf("error checking namespace file: %w", err)