mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
1
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/BUILD
generated
vendored
@ -25,6 +25,7 @@ go_library(
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
|
17
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -135,7 +136,7 @@ func NewAttachDetachController(
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "attachdetach-controller"})
|
||||
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||
|
||||
@ -522,6 +523,10 @@ func (adc *attachDetachController) GetVolumeDevicePluginDir(podUID string) strin
|
||||
return ""
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) GetPodsDir() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) GetPodVolumeDir(podUID types.UID, pluginName, volumeName string) string {
|
||||
return ""
|
||||
}
|
||||
@ -582,6 +587,12 @@ func (adc *attachDetachController) GetConfigMapFunc() func(namespace, name strin
|
||||
}
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) GetServiceAccountTokenFunc() func(_, _ string, _ *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
|
||||
return func(_, _ string, _ *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
|
||||
return nil, fmt.Errorf("GetServiceAccountToken unsupported in attachDetachController")
|
||||
}
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) GetExec(pluginName string) mount.Exec {
|
||||
return mount.NewOsExec()
|
||||
}
|
||||
@ -607,3 +618,7 @@ func (adc *attachDetachController) GetNodeLabels() (map[string]string, error) {
|
||||
func (adc *attachDetachController) GetNodeName() types.NodeName {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) GetEventRecorder() record.EventRecorder {
|
||||
return adc.recorder
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) {
|
||||
|
||||
podName := util.GetUniquePodName(pod)
|
||||
|
||||
generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].Name
|
||||
generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].GCEPersistentDisk.PDName
|
||||
|
||||
pvcLister := fakeInformerFactory.Core().V1().PersistentVolumeClaims().Lister()
|
||||
pvLister := fakeInformerFactory.Core().V1().PersistentVolumes().Lister()
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go
generated
vendored
@ -257,13 +257,17 @@ func (rc *reconciler) attachDesiredVolumes() {
|
||||
for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() {
|
||||
if rc.actualStateOfWorld.VolumeNodeExists(volumeToAttach.VolumeName, volumeToAttach.NodeName) {
|
||||
// Volume/Node exists, touch it to reset detachRequestedTime
|
||||
glog.V(5).Infof(volumeToAttach.GenerateMsgDetailed("Volume attached--touching", ""))
|
||||
if glog.V(5) {
|
||||
glog.Infof(volumeToAttach.GenerateMsgDetailed("Volume attached--touching", ""))
|
||||
}
|
||||
rc.actualStateOfWorld.ResetDetachRequestTime(volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
||||
continue
|
||||
}
|
||||
// Don't even try to start an operation if there is already one running
|
||||
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "") {
|
||||
glog.V(10).Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
||||
if glog.V(10) {
|
||||
glog.Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@ -279,7 +283,9 @@ func (rc *reconciler) attachDesiredVolumes() {
|
||||
}
|
||||
|
||||
// Volume/Node doesn't exist, spawn a goroutine to attach it
|
||||
glog.V(5).Infof(volumeToAttach.GenerateMsgDetailed("Starting attacherDetacher.AttachVolume", ""))
|
||||
if glog.V(5) {
|
||||
glog.Infof(volumeToAttach.GenerateMsgDetailed("Starting attacherDetacher.AttachVolume", ""))
|
||||
}
|
||||
err := rc.attacherDetacher.AttachVolume(volumeToAttach.VolumeToAttach, rc.actualStateOfWorld)
|
||||
if err == nil {
|
||||
glog.Infof(volumeToAttach.GenerateMsgDetailed("attacherDetacher.AttachVolume started", ""))
|
||||
|
@ -153,7 +153,6 @@ func CreateTestClient() *fake.Clientset {
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{ExternalID: string(nodeName)},
|
||||
}
|
||||
obj.Items = append(obj.Items, node)
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/BUILD
generated
vendored
@ -25,6 +25,7 @@ go_library(
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
|
17
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/expand_controller.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/expand_controller.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
@ -116,7 +117,7 @@ func NewExpandController(
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "volume_expand"})
|
||||
blkutil := volumepathhandler.NewBlockVolumePathHandler()
|
||||
|
||||
@ -227,6 +228,10 @@ func (expc *expandController) GetVolumeDevicePluginDir(pluginName string) string
|
||||
return ""
|
||||
}
|
||||
|
||||
func (expc *expandController) GetPodsDir() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (expc *expandController) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
|
||||
return ""
|
||||
}
|
||||
@ -291,6 +296,12 @@ func (expc *expandController) GetConfigMapFunc() func(namespace, name string) (*
|
||||
}
|
||||
}
|
||||
|
||||
func (expc *expandController) GetServiceAccountTokenFunc() func(_, _ string, _ *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
|
||||
return func(_, _ string, _ *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
|
||||
return nil, fmt.Errorf("GetServiceAccountToken unsupported in expandController")
|
||||
}
|
||||
}
|
||||
|
||||
func (expc *expandController) GetNodeLabels() (map[string]string, error) {
|
||||
return nil, fmt.Errorf("GetNodeLabels unsupported in expandController")
|
||||
}
|
||||
@ -298,3 +309,7 @@ func (expc *expandController) GetNodeLabels() (map[string]string, error) {
|
||||
func (expc *expandController) GetNodeName() types.NodeName {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (expc *expandController) GetEventRecorder() record.EventRecorder {
|
||||
return expc.recorder
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD
generated
vendored
@ -34,6 +34,7 @@ go_library(
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/recyclerclient:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/OWNERS
generated
vendored
@ -2,3 +2,4 @@ approvers:
|
||||
- jsafrane
|
||||
- saad-ali
|
||||
- thockin
|
||||
- msau42 # for volume scheduling
|
||||
|
40
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/binder_test.go
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/binder_test.go
generated
vendored
@ -723,7 +723,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
},
|
||||
{
|
||||
// failed syncVolume do not bind when pvc is prebound to pv with mismatching volumeModes
|
||||
"14-8-1 - do not bind when pv is prebound to pvc with mismatching volumeModes",
|
||||
"14-8-1 - do not bind when pvc is prebound to pv with mismatching volumeModes",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-8-1", "uid14-8-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
@ -767,6 +767,44 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-12", "uid14-12", "10Gi", "volume14-12", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume output warning when pv is prebound to pvc with mismatching volumeMode
|
||||
"14-13 - output warning when pv is prebound to pvc with different volumeModes",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-13", "10Gi", "uid14-13", "claim14-13", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-13", "10Gi", "uid14-13", "claim14-13", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-13", "uid14-13", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-13", "uid14-13", "10Gi", "", v1.ClaimPending, nil)),
|
||||
[]string{"Warning VolumeMismatch"},
|
||||
noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// syncVolume output warning when pv is prebound to pvc with mismatching volumeMode
|
||||
"14-13-1 - output warning when pv is prebound to pvc with different volumeModes",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-13-1", "10Gi", "uid14-13-1", "claim14-13-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-13-1", "10Gi", "uid14-13-1", "claim14-13-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-13-1", "uid14-13-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-13-1", "uid14-13-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
[]string{"Warning VolumeMismatch"},
|
||||
noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// syncVolume waits for synClaim without warning when pv is prebound to pvc with matching volumeMode block
|
||||
"14-14 - wait for synClaim without warning when pv is prebound to pvc with matching volumeModes block",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-14", "10Gi", "uid14-14", "claim14-14", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-14", "10Gi", "uid14-14", "claim14-14", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-14", "uid14-14", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-14", "uid14-14", "10Gi", "", v1.ClaimPending, nil)),
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// syncVolume waits for synClaim without warning when pv is prebound to pvc with matching volumeMode file
|
||||
"14-14-1 - wait for synClaim without warning when pv is prebound to pvc with matching volumeModes file",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-14-1", "10Gi", "uid14-14-1", "claim14-14-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-14-1", "10Gi", "uid14-14-1", "claim14-14-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-14-1", "uid14-14-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-14-1", "uid14-14-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
}
|
||||
|
||||
err := utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/framework_test.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/framework_test.go
generated
vendored
@ -612,6 +612,7 @@ func newTestController(kubeClient clientset.Interface, informerFactory informers
|
||||
ClaimInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informerFactory.Storage().V1().StorageClasses(),
|
||||
PodInformer: informerFactory.Core().V1().Pods(),
|
||||
NodeInformer: informerFactory.Core().V1().Nodes(),
|
||||
EventRecorder: record.NewFakeRecorder(1000),
|
||||
EnableDynamicProvisioning: enableDynamicProvisioning,
|
||||
}
|
||||
@ -1192,7 +1193,7 @@ func (plugin *mockVolumePlugin) NewProvisioner(options vol.VolumeOptions) (vol.P
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *mockVolumePlugin) Provision() (*v1.PersistentVolume, error) {
|
||||
func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
|
||||
if len(plugin.provisionCalls) <= plugin.provisionCallCounter {
|
||||
return nil, fmt.Errorf("Mock plugin error: unexpected provisioner call %d", plugin.provisionCallCounter)
|
||||
}
|
||||
|
102
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go
generated
vendored
102
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go
generated
vendored
@ -135,6 +135,14 @@ const annDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by"
|
||||
// a volume for this PVC.
|
||||
const annStorageProvisioner = "volume.beta.kubernetes.io/storage-provisioner"
|
||||
|
||||
// This annotation is added to a PVC that has been triggered by scheduler to
|
||||
// be dynamically provisioned. Its value is the name of the selected node.
|
||||
const annSelectedNode = "volume.alpha.kubernetes.io/selected-node"
|
||||
|
||||
// If the provisioner name in a storage class is set to "kubernetes.io/no-provisioner",
|
||||
// then dynamic provisioning is not supported by the storage.
|
||||
const notSupportedProvisioner = "kubernetes.io/no-provisioner"
|
||||
|
||||
// CloudVolumeCreatedForClaimNamespaceTag is a name of a tag attached to a real volume in cloud (e.g. AWS EBS or GCE PD)
|
||||
// with namespace of a persistent volume claim used to create this volume.
|
||||
const CloudVolumeCreatedForClaimNamespaceTag = "kubernetes.io/created-for/pvc/namespace"
|
||||
@ -166,6 +174,8 @@ type PersistentVolumeController struct {
|
||||
classListerSynced cache.InformerSynced
|
||||
podLister corelisters.PodLister
|
||||
podListerSynced cache.InformerSynced
|
||||
NodeLister corelisters.NodeLister
|
||||
NodeListerSynced cache.InformerSynced
|
||||
|
||||
kubeClient clientset.Interface
|
||||
eventRecorder record.EventRecorder
|
||||
@ -277,6 +287,16 @@ func (ctrl *PersistentVolumeController) shouldDelayBinding(claim *v1.PersistentV
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
// When feature DynamicProvisioningScheduling enabled,
|
||||
// Scheduler signal to the PV controller to start dynamic
|
||||
// provisioning by setting the "annSelectedNode" annotation
|
||||
// in the PVC
|
||||
if _, ok := claim.Annotations[annSelectedNode]; ok {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
className := v1helper.GetPersistentVolumeClaimClass(claim)
|
||||
if className == "" {
|
||||
return false, nil
|
||||
@ -291,8 +311,6 @@ func (ctrl *PersistentVolumeController) shouldDelayBinding(claim *v1.PersistentV
|
||||
return false, fmt.Errorf("VolumeBindingMode not set for StorageClass %q", className)
|
||||
}
|
||||
|
||||
// TODO: add check to handle dynamic provisioning later
|
||||
|
||||
return *class.VolumeBindingMode == storage.VolumeBindingWaitForFirstConsumer, nil
|
||||
}
|
||||
|
||||
@ -320,7 +338,6 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol
|
||||
// OBSERVATION: pvc is "Pending", will retry
|
||||
switch {
|
||||
case delayBinding:
|
||||
// TODO: Skip dynamic provisioning for now
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, events.WaitForFirstConsumer, "waiting for first consumer to be created before binding")
|
||||
case v1helper.GetPersistentVolumeClaimClass(claim) != "":
|
||||
if err = ctrl.provisionClaim(claim); err != nil {
|
||||
@ -571,6 +588,17 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
|
||||
}
|
||||
return nil
|
||||
} else if claim.Spec.VolumeName == "" {
|
||||
if isMisMatch, err := checkVolumeModeMisMatches(&claim.Spec, &volume.Spec); err != nil || isMisMatch {
|
||||
// Binding for the volume won't be called in syncUnboundClaim,
|
||||
// because findBestMatchForClaim won't return the volume due to volumeMode mismatch.
|
||||
volumeMsg := fmt.Sprintf("Cannot bind PersistentVolume to requested PersistentVolumeClaim %q due to incompatible volumeMode.", claim.Name)
|
||||
ctrl.eventRecorder.Event(volume, v1.EventTypeWarning, events.VolumeMismatch, volumeMsg)
|
||||
claimMsg := fmt.Sprintf("Cannot bind PersistentVolume %q to requested PersistentVolumeClaim due to incompatible volumeMode.", volume.Name)
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.VolumeMismatch, claimMsg)
|
||||
// Skipping syncClaim
|
||||
return nil
|
||||
}
|
||||
|
||||
if metav1.HasAnnotation(volume.ObjectMeta, annBoundByController) {
|
||||
// The binding is not completed; let PVC sync handle it
|
||||
glog.V(4).Infof("synchronizing PersistentVolume[%s]: volume not bound yet, waiting for syncClaim to fix it", volume.Name)
|
||||
@ -1045,12 +1073,7 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *v1.PersistentVolum
|
||||
|
||||
// doRerecycleVolumeOperationcycleVolume recycles a volume. This method is
|
||||
// running in standalone goroutine and already has all necessary locks.
|
||||
func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{}) {
|
||||
volume, ok := arg.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
glog.Errorf("Cannot convert recycleVolumeOperation argument to volume, got %#v", arg)
|
||||
return
|
||||
}
|
||||
func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.PersistentVolume) {
|
||||
glog.V(4).Infof("recycleVolumeOperation [%s] started", volume.Name)
|
||||
|
||||
// This method may have been waiting for a volume lock for some time.
|
||||
@ -1134,13 +1157,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{})
|
||||
|
||||
// deleteVolumeOperation deletes a volume. This method is running in standalone
|
||||
// goroutine and already has all necessary locks.
|
||||
func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) error {
|
||||
volume, ok := arg.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
glog.Errorf("Cannot convert deleteVolumeOperation argument to volume, got %#v", arg)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.PersistentVolume) error {
|
||||
glog.V(4).Infof("deleteVolumeOperation [%s] started", volume.Name)
|
||||
|
||||
// This method may have been waiting for a volume lock for some time.
|
||||
@ -1331,13 +1348,7 @@ func (ctrl *PersistentVolumeController) provisionClaim(claim *v1.PersistentVolum
|
||||
|
||||
// provisionClaimOperation provisions a volume. This method is running in
|
||||
// standalone goroutine and already has all necessary locks.
|
||||
func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interface{}) {
|
||||
claim, ok := claimObj.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
glog.Errorf("Cannot convert provisionClaimOperation argument to claim, got %#v", claimObj)
|
||||
return
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.PersistentVolumeClaim) {
|
||||
claimClass := v1helper.GetPersistentVolumeClaimClass(claim)
|
||||
glog.V(4).Infof("provisionClaimOperation [%s] started, class: %q", claimToClaimKey(claim), claimClass)
|
||||
|
||||
@ -1425,10 +1436,30 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa
|
||||
return
|
||||
}
|
||||
|
||||
var selectedNode *v1.Node = nil
|
||||
var allowedTopologies []v1.TopologySelectorTerm = nil
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
if nodeName, ok := claim.Annotations[annSelectedNode]; ok {
|
||||
selectedNode, err = ctrl.NodeLister.Get(nodeName)
|
||||
if err != nil {
|
||||
strerr := fmt.Sprintf("Failed to get target node: %v", err)
|
||||
glog.V(3).Infof("unexpected error getting target node %q for claim %q: %v", nodeName, claimToClaimKey(claim), err)
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
|
||||
return
|
||||
}
|
||||
}
|
||||
allowedTopologies = storageClass.AllowedTopologies
|
||||
}
|
||||
|
||||
opComplete := util.OperationCompleteHook(plugin.GetPluginName(), "volume_provision")
|
||||
volume, err = provisioner.Provision()
|
||||
volume, err = provisioner.Provision(selectedNode, allowedTopologies)
|
||||
opComplete(&err)
|
||||
if err != nil {
|
||||
// Other places of failure have nothing to do with DynamicProvisioningScheduling,
|
||||
// so just let controller retry in the next sync. We'll only call func
|
||||
// rescheduleProvisioning here when the underlying provisioning actually failed.
|
||||
ctrl.rescheduleProvisioning(claim)
|
||||
|
||||
strerr := fmt.Sprintf("Failed to provision volume with StorageClass %q: %v", storageClass.Name, err)
|
||||
glog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err)
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
|
||||
@ -1519,6 +1550,29 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa
|
||||
}
|
||||
}
|
||||
|
||||
// rescheduleProvisioning signal back to the scheduler to retry dynamic provisioning
|
||||
// by removing the annSelectedNode annotation
|
||||
func (ctrl *PersistentVolumeController) rescheduleProvisioning(claim *v1.PersistentVolumeClaim) {
|
||||
if _, ok := claim.Annotations[annSelectedNode]; !ok {
|
||||
// Provisioning not triggered by the scheduler, skip
|
||||
return
|
||||
}
|
||||
|
||||
// The claim from method args can be pointing to watcher cache. We must not
|
||||
// modify these, therefore create a copy.
|
||||
newClaim := claim.DeepCopy()
|
||||
delete(newClaim.Annotations, annSelectedNode)
|
||||
// Try to update the PVC object
|
||||
if _, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(newClaim.Namespace).Update(newClaim); err != nil {
|
||||
glog.V(4).Infof("Failed to delete annotation 'annSelectedNode' for PersistentVolumeClaim %q: %v", claimToClaimKey(newClaim), err)
|
||||
return
|
||||
}
|
||||
if _, err := ctrl.storeClaimUpdate(newClaim); err != nil {
|
||||
// We will get an "claim updated" event soon, this is not a big error
|
||||
glog.V(4).Infof("Updating PersistentVolumeClaim %q: cannot update internal cache: %v", claimToClaimKey(newClaim), err)
|
||||
}
|
||||
}
|
||||
|
||||
// getProvisionedVolumeNameForClaim returns PV.Name for the provisioned volume.
|
||||
// The name must be unique.
|
||||
func (ctrl *PersistentVolumeController) getProvisionedVolumeNameForClaim(claim *v1.PersistentVolumeClaim) string {
|
||||
|
@ -63,6 +63,7 @@ type ControllerParameters struct {
|
||||
ClaimInformer coreinformers.PersistentVolumeClaimInformer
|
||||
ClassInformer storageinformers.StorageClassInformer
|
||||
PodInformer coreinformers.PodInformer
|
||||
NodeInformer coreinformers.NodeInformer
|
||||
EventRecorder record.EventRecorder
|
||||
EnableDynamicProvisioning bool
|
||||
}
|
||||
@ -73,7 +74,7 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
|
||||
if eventRecorder == nil {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartLogging(glog.Infof)
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(p.KubeClient.CoreV1().RESTClient()).Events("")})
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: p.KubeClient.CoreV1().Events("")})
|
||||
eventRecorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"})
|
||||
}
|
||||
|
||||
@ -122,6 +123,8 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
|
||||
controller.classListerSynced = p.ClassInformer.Informer().HasSynced
|
||||
controller.podLister = p.PodInformer.Lister()
|
||||
controller.podListerSynced = p.PodInformer.Informer().HasSynced
|
||||
controller.NodeLister = p.NodeInformer.Lister()
|
||||
controller.NodeListerSynced = p.NodeInformer.Informer().HasSynced
|
||||
return controller, nil
|
||||
}
|
||||
|
||||
@ -268,7 +271,7 @@ func (ctrl *PersistentVolumeController) Run(stopCh <-chan struct{}) {
|
||||
glog.Infof("Starting persistent volume controller")
|
||||
defer glog.Infof("Shutting down persistent volume controller")
|
||||
|
||||
if !controller.WaitForCacheSync("persistent volume", stopCh, ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced) {
|
||||
if !controller.WaitForCacheSync("persistent volume", stopCh, ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced, ctrl.NodeListerSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
|
45
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_test.go
generated
vendored
45
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_test.go
generated
vendored
@ -312,8 +312,8 @@ func TestDelayBinding(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// When feature gate is disabled, should always be delayed
|
||||
name := "feature-disabled"
|
||||
// When volumeScheduling feature gate is disabled, should always be delayed
|
||||
name := "volumeScheduling-feature-disabled"
|
||||
shouldDelay, err := ctrl.shouldDelayBinding(makePVCClass(&classWaitMode))
|
||||
if err != nil {
|
||||
t.Errorf("Test %q returned error: %v", name, err)
|
||||
@ -322,7 +322,7 @@ func TestDelayBinding(t *testing.T) {
|
||||
t.Errorf("Test %q returned true, expected false", name)
|
||||
}
|
||||
|
||||
// Enable feature gate
|
||||
// Enable volumeScheduling feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
|
||||
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false")
|
||||
|
||||
@ -338,4 +338,43 @@ func TestDelayBinding(t *testing.T) {
|
||||
t.Errorf("Test %q returned unexpected %v", name, test.shouldDelay)
|
||||
}
|
||||
}
|
||||
|
||||
// When dynamicProvisioningScheduling feature gate is disabled, should be delayed,
|
||||
// even if the pvc has selectedNode annotation.
|
||||
provisionedClaim := makePVCClass(&classWaitMode)
|
||||
provisionedClaim.Annotations = map[string]string{annSelectedNode: "node-name"}
|
||||
name = "dynamicProvisioningScheduling-feature-disabled"
|
||||
shouldDelay, err = ctrl.shouldDelayBinding(provisionedClaim)
|
||||
if err != nil {
|
||||
t.Errorf("Test %q returned error: %v", name, err)
|
||||
}
|
||||
if !shouldDelay {
|
||||
t.Errorf("Test %q returned false, expected true", name)
|
||||
}
|
||||
|
||||
// Enable DynamicProvisioningScheduling feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("DynamicProvisioningScheduling=true")
|
||||
defer utilfeature.DefaultFeatureGate.Set("DynamicProvisioningScheduling=false")
|
||||
|
||||
// When the pvc does not have selectedNode annotation, should be delayed,
|
||||
// even if dynamicProvisioningScheduling feature gate is enabled.
|
||||
name = "dynamicProvisioningScheduling-feature-enabled, selectedNode-annotation-not-set"
|
||||
shouldDelay, err = ctrl.shouldDelayBinding(makePVCClass(&classWaitMode))
|
||||
if err != nil {
|
||||
t.Errorf("Test %q returned error: %v", name, err)
|
||||
}
|
||||
if !shouldDelay {
|
||||
t.Errorf("Test %q returned false, expected true", name)
|
||||
}
|
||||
|
||||
// Should not be delayed when dynamicProvisioningScheduling feature gate is enabled,
|
||||
// and the pvc has selectedNode annotation.
|
||||
name = "dynamicProvisioningScheduling-feature-enabled, selectedNode-annotation-set"
|
||||
shouldDelay, err = ctrl.shouldDelayBinding(provisionedClaim)
|
||||
if err != nil {
|
||||
t.Errorf("Test %q returned error: %v", name, err)
|
||||
}
|
||||
if shouldDelay {
|
||||
t.Errorf("Test %q returned true, expected false", name)
|
||||
}
|
||||
}
|
||||
|
@ -158,8 +158,30 @@ func (c *assumeCache) add(obj interface{}) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
if objInfo, _ := c.getObjInfo(name); objInfo != nil {
|
||||
newVersion, err := c.getObjVersion(name, obj)
|
||||
if err != nil {
|
||||
glog.Errorf("add: couldn't get object version: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
storedVersion, err := c.getObjVersion(name, objInfo.latestObj)
|
||||
if err != nil {
|
||||
glog.Errorf("add: couldn't get stored object version: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Only update object if version is newer.
|
||||
// This is so we don't override assumed objects due to informer resync.
|
||||
if newVersion <= storedVersion {
|
||||
glog.V(10).Infof("Skip adding %v %v to assume cache because version %v is not newer than %v", c.description, name, newVersion, storedVersion)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
objInfo := &objInfo{name: name, latestObj: obj, apiObj: obj}
|
||||
c.store.Update(objInfo)
|
||||
glog.V(10).Infof("Adding %v %v to assume cache: %+v ", c.description, name, obj)
|
||||
}
|
||||
|
||||
func (c *assumeCache) update(oldObj interface{}, newObj interface{}) {
|
||||
@ -349,3 +371,34 @@ func (c *pvAssumeCache) ListPVs(storageClassName string) []*v1.PersistentVolume
|
||||
}
|
||||
return pvs
|
||||
}
|
||||
|
||||
// PVCAssumeCache is a AssumeCache for PersistentVolumeClaim objects
|
||||
type PVCAssumeCache interface {
|
||||
AssumeCache
|
||||
|
||||
// GetPVC returns the PVC from the cache with the same
|
||||
// namespace and the same name of the specified pod.
|
||||
// pvcKey is the result of MetaNamespaceKeyFunc on PVC obj
|
||||
GetPVC(pvcKey string) (*v1.PersistentVolumeClaim, error)
|
||||
}
|
||||
|
||||
type pvcAssumeCache struct {
|
||||
*assumeCache
|
||||
}
|
||||
|
||||
func NewPVCAssumeCache(informer cache.SharedIndexInformer) PVCAssumeCache {
|
||||
return &pvcAssumeCache{assumeCache: NewAssumeCache(informer, "v1.PersistentVolumeClaim", "namespace", cache.MetaNamespaceIndexFunc)}
|
||||
}
|
||||
|
||||
func (c *pvcAssumeCache) GetPVC(pvcKey string) (*v1.PersistentVolumeClaim, error) {
|
||||
obj, err := c.Get(pvcKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pvc, ok := obj.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
return nil, &errWrongType{"v1.PersistentVolumeClaim", obj}
|
||||
}
|
||||
return pvc, nil
|
||||
}
|
||||
|
@ -36,6 +36,33 @@ func makePV(name, version, storageClass string) *v1.PersistentVolume {
|
||||
}
|
||||
}
|
||||
|
||||
func verifyListPVs(t *testing.T, cache PVAssumeCache, expectedPVs map[string]*v1.PersistentVolume, storageClassName string) {
|
||||
pvList := cache.ListPVs(storageClassName)
|
||||
if len(pvList) != len(expectedPVs) {
|
||||
t.Errorf("ListPVs() returned %v PVs, expected %v", len(pvList), len(expectedPVs))
|
||||
}
|
||||
for _, pv := range pvList {
|
||||
expectedPV, ok := expectedPVs[pv.Name]
|
||||
if !ok {
|
||||
t.Errorf("ListPVs() returned unexpected PV %q", pv.Name)
|
||||
}
|
||||
if expectedPV != pv {
|
||||
t.Errorf("ListPVs() returned PV %p, expected %p", pv, expectedPV)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyPV(cache PVAssumeCache, name string, expectedPV *v1.PersistentVolume) error {
|
||||
pv, err := cache.GetPV(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pv != expectedPV {
|
||||
return fmt.Errorf("GetPV() returned %p, expected %p", pv, expectedPV)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestAssumePV(t *testing.T) {
|
||||
scenarios := map[string]struct {
|
||||
oldPV *v1.PersistentVolume
|
||||
@ -88,7 +115,7 @@ func TestAssumePV(t *testing.T) {
|
||||
|
||||
// Add oldPV to cache
|
||||
internal_cache.add(scenario.oldPV)
|
||||
if err := getPV(cache, scenario.oldPV.Name, scenario.oldPV); err != nil {
|
||||
if err := verifyPV(cache, scenario.oldPV.Name, scenario.oldPV); err != nil {
|
||||
t.Errorf("Failed to GetPV() after initial update: %v", err)
|
||||
continue
|
||||
}
|
||||
@ -107,7 +134,7 @@ func TestAssumePV(t *testing.T) {
|
||||
if !scenario.shouldSucceed {
|
||||
expectedPV = scenario.oldPV
|
||||
}
|
||||
if err := getPV(cache, scenario.oldPV.Name, expectedPV); err != nil {
|
||||
if err := verifyPV(cache, scenario.oldPV.Name, expectedPV); err != nil {
|
||||
t.Errorf("Failed to GetPV() after initial update: %v", err)
|
||||
}
|
||||
}
|
||||
@ -128,13 +155,13 @@ func TestRestorePV(t *testing.T) {
|
||||
|
||||
// Add oldPV to cache
|
||||
internal_cache.add(oldPV)
|
||||
if err := getPV(cache, oldPV.Name, oldPV); err != nil {
|
||||
if err := verifyPV(cache, oldPV.Name, oldPV); err != nil {
|
||||
t.Fatalf("Failed to GetPV() after initial update: %v", err)
|
||||
}
|
||||
|
||||
// Restore PV
|
||||
cache.Restore(oldPV.Name)
|
||||
if err := getPV(cache, oldPV.Name, oldPV); err != nil {
|
||||
if err := verifyPV(cache, oldPV.Name, oldPV); err != nil {
|
||||
t.Fatalf("Failed to GetPV() after iniital restore: %v", err)
|
||||
}
|
||||
|
||||
@ -142,13 +169,13 @@ func TestRestorePV(t *testing.T) {
|
||||
if err := cache.Assume(newPV); err != nil {
|
||||
t.Fatalf("Assume() returned error %v", err)
|
||||
}
|
||||
if err := getPV(cache, oldPV.Name, newPV); err != nil {
|
||||
if err := verifyPV(cache, oldPV.Name, newPV); err != nil {
|
||||
t.Fatalf("Failed to GetPV() after Assume: %v", err)
|
||||
}
|
||||
|
||||
// Restore PV
|
||||
cache.Restore(oldPV.Name)
|
||||
if err := getPV(cache, oldPV.Name, oldPV); err != nil {
|
||||
if err := verifyPV(cache, oldPV.Name, oldPV); err != nil {
|
||||
t.Fatalf("Failed to GetPV() after restore: %v", err)
|
||||
}
|
||||
}
|
||||
@ -243,29 +270,203 @@ func TestPVCacheWithStorageClasses(t *testing.T) {
|
||||
verifyListPVs(t, cache, pvs2, "class2")
|
||||
}
|
||||
|
||||
func verifyListPVs(t *testing.T, cache PVAssumeCache, expectedPVs map[string]*v1.PersistentVolume, storageClassName string) {
|
||||
pvList := cache.ListPVs(storageClassName)
|
||||
if len(pvList) != len(expectedPVs) {
|
||||
t.Errorf("ListPVs() returned %v PVs, expected %v", len(pvList), len(expectedPVs))
|
||||
func TestAssumeUpdatePVCache(t *testing.T) {
|
||||
cache := NewPVAssumeCache(nil)
|
||||
internal_cache, ok := cache.(*pvAssumeCache)
|
||||
if !ok {
|
||||
t.Fatalf("Failed to get internal cache")
|
||||
}
|
||||
for _, pv := range pvList {
|
||||
expectedPV, ok := expectedPVs[pv.Name]
|
||||
|
||||
pvName := "test-pv0"
|
||||
|
||||
// Add a PV
|
||||
pv := makePV(pvName, "1", "")
|
||||
internal_cache.add(pv)
|
||||
if err := verifyPV(cache, pvName, pv); err != nil {
|
||||
t.Fatalf("failed to get PV: %v", err)
|
||||
}
|
||||
|
||||
// Assume PV
|
||||
newPV := pv.DeepCopy()
|
||||
newPV.Spec.ClaimRef = &v1.ObjectReference{Name: "test-claim"}
|
||||
if err := cache.Assume(newPV); err != nil {
|
||||
t.Fatalf("failed to assume PV: %v", err)
|
||||
}
|
||||
if err := verifyPV(cache, pvName, newPV); err != nil {
|
||||
t.Fatalf("failed to get PV after assume: %v", err)
|
||||
}
|
||||
|
||||
// Add old PV
|
||||
internal_cache.add(pv)
|
||||
if err := verifyPV(cache, pvName, newPV); err != nil {
|
||||
t.Fatalf("failed to get PV after old PV added: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func makeClaim(name, version, namespace string) *v1.PersistentVolumeClaim {
|
||||
return &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
ResourceVersion: version,
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func verifyPVC(cache PVCAssumeCache, pvcKey string, expectedPVC *v1.PersistentVolumeClaim) error {
|
||||
pvc, err := cache.GetPVC(pvcKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pvc != expectedPVC {
|
||||
return fmt.Errorf("GetPVC() returned %p, expected %p", pvc, expectedPVC)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestAssumePVC(t *testing.T) {
|
||||
scenarios := map[string]struct {
|
||||
oldPVC *v1.PersistentVolumeClaim
|
||||
newPVC *v1.PersistentVolumeClaim
|
||||
shouldSucceed bool
|
||||
}{
|
||||
"success-same-version": {
|
||||
oldPVC: makeClaim("pvc1", "5", "ns1"),
|
||||
newPVC: makeClaim("pvc1", "5", "ns1"),
|
||||
shouldSucceed: true,
|
||||
},
|
||||
"success-new-higher-version": {
|
||||
oldPVC: makeClaim("pvc1", "5", "ns1"),
|
||||
newPVC: makeClaim("pvc1", "6", "ns1"),
|
||||
shouldSucceed: true,
|
||||
},
|
||||
"fail-old-not-found": {
|
||||
oldPVC: makeClaim("pvc2", "5", "ns1"),
|
||||
newPVC: makeClaim("pvc1", "5", "ns1"),
|
||||
shouldSucceed: false,
|
||||
},
|
||||
"fail-new-lower-version": {
|
||||
oldPVC: makeClaim("pvc1", "5", "ns1"),
|
||||
newPVC: makeClaim("pvc1", "4", "ns1"),
|
||||
shouldSucceed: false,
|
||||
},
|
||||
"fail-new-bad-version": {
|
||||
oldPVC: makeClaim("pvc1", "5", "ns1"),
|
||||
newPVC: makeClaim("pvc1", "a", "ns1"),
|
||||
shouldSucceed: false,
|
||||
},
|
||||
"fail-old-bad-version": {
|
||||
oldPVC: makeClaim("pvc1", "a", "ns1"),
|
||||
newPVC: makeClaim("pvc1", "5", "ns1"),
|
||||
shouldSucceed: false,
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
cache := NewPVCAssumeCache(nil)
|
||||
internal_cache, ok := cache.(*pvcAssumeCache)
|
||||
if !ok {
|
||||
t.Errorf("ListPVs() returned unexpected PV %q", pv.Name)
|
||||
t.Fatalf("Failed to get internal cache")
|
||||
}
|
||||
if expectedPV != pv {
|
||||
t.Errorf("ListPVs() returned PV %p, expected %p", pv, expectedPV)
|
||||
|
||||
// Add oldPVC to cache
|
||||
internal_cache.add(scenario.oldPVC)
|
||||
if err := verifyPVC(cache, getPVCName(scenario.oldPVC), scenario.oldPVC); err != nil {
|
||||
t.Errorf("Failed to GetPVC() after initial update: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Assume newPVC
|
||||
err := cache.Assume(scenario.newPVC)
|
||||
if scenario.shouldSucceed && err != nil {
|
||||
t.Errorf("Test %q failed: Assume() returned error %v", name, err)
|
||||
}
|
||||
if !scenario.shouldSucceed && err == nil {
|
||||
t.Errorf("Test %q failed: Assume() returned success but expected error", name)
|
||||
}
|
||||
|
||||
// Check that GetPVC returns correct PVC
|
||||
expectedPV := scenario.newPVC
|
||||
if !scenario.shouldSucceed {
|
||||
expectedPV = scenario.oldPVC
|
||||
}
|
||||
if err := verifyPVC(cache, getPVCName(scenario.oldPVC), expectedPV); err != nil {
|
||||
t.Errorf("Failed to GetPVC() after initial update: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getPV(cache PVAssumeCache, name string, expectedPV *v1.PersistentVolume) error {
|
||||
pv, err := cache.GetPV(name)
|
||||
if err != nil {
|
||||
return err
|
||||
func TestRestorePVC(t *testing.T) {
|
||||
cache := NewPVCAssumeCache(nil)
|
||||
internal_cache, ok := cache.(*pvcAssumeCache)
|
||||
if !ok {
|
||||
t.Fatalf("Failed to get internal cache")
|
||||
}
|
||||
if pv != expectedPV {
|
||||
return fmt.Errorf("GetPV() returned %p, expected %p", pv, expectedPV)
|
||||
|
||||
oldPVC := makeClaim("pvc1", "5", "ns1")
|
||||
newPVC := makeClaim("pvc1", "5", "ns1")
|
||||
|
||||
// Restore PVC that doesn't exist
|
||||
cache.Restore("nothing")
|
||||
|
||||
// Add oldPVC to cache
|
||||
internal_cache.add(oldPVC)
|
||||
if err := verifyPVC(cache, getPVCName(oldPVC), oldPVC); err != nil {
|
||||
t.Fatalf("Failed to GetPVC() after initial update: %v", err)
|
||||
}
|
||||
|
||||
// Restore PVC
|
||||
cache.Restore(getPVCName(oldPVC))
|
||||
if err := verifyPVC(cache, getPVCName(oldPVC), oldPVC); err != nil {
|
||||
t.Fatalf("Failed to GetPVC() after iniital restore: %v", err)
|
||||
}
|
||||
|
||||
// Assume newPVC
|
||||
if err := cache.Assume(newPVC); err != nil {
|
||||
t.Fatalf("Assume() returned error %v", err)
|
||||
}
|
||||
if err := verifyPVC(cache, getPVCName(oldPVC), newPVC); err != nil {
|
||||
t.Fatalf("Failed to GetPVC() after Assume: %v", err)
|
||||
}
|
||||
|
||||
// Restore PVC
|
||||
cache.Restore(getPVCName(oldPVC))
|
||||
if err := verifyPVC(cache, getPVCName(oldPVC), oldPVC); err != nil {
|
||||
t.Fatalf("Failed to GetPVC() after restore: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAssumeUpdatePVCCache(t *testing.T) {
|
||||
cache := NewPVCAssumeCache(nil)
|
||||
internal_cache, ok := cache.(*pvcAssumeCache)
|
||||
if !ok {
|
||||
t.Fatalf("Failed to get internal cache")
|
||||
}
|
||||
|
||||
pvcName := "test-pvc0"
|
||||
pvcNamespace := "test-ns"
|
||||
|
||||
// Add a PVC
|
||||
pvc := makeClaim(pvcName, "1", pvcNamespace)
|
||||
internal_cache.add(pvc)
|
||||
if err := verifyPVC(cache, getPVCName(pvc), pvc); err != nil {
|
||||
t.Fatalf("failed to get PVC: %v", err)
|
||||
}
|
||||
|
||||
// Assume PVC
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC.Annotations["volume.alpha.kubernetes.io/selected-node"] = "test-node"
|
||||
if err := cache.Assume(newPVC); err != nil {
|
||||
t.Fatalf("failed to assume PVC: %v", err)
|
||||
}
|
||||
if err := verifyPVC(cache, getPVCName(pvc), newPVC); err != nil {
|
||||
t.Fatalf("failed to get PVC after assume: %v", err)
|
||||
}
|
||||
|
||||
// Add old PVC
|
||||
internal_cache.add(pvc)
|
||||
if err := verifyPVC(cache, getPVCName(pvc), newPVC); err != nil {
|
||||
t.Fatalf("failed to get PVC after old PVC added: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
226
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder.go
generated
vendored
226
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder.go
generated
vendored
@ -24,10 +24,13 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
storageinformers "k8s.io/client-go/informers/storage/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
@ -58,24 +61,30 @@ type SchedulerVolumeBinder interface {
|
||||
// If a PVC is bound, it checks if the PV's NodeAffinity matches the Node.
|
||||
// Otherwise, it tries to find an available PV to bind to the PVC.
|
||||
//
|
||||
// It returns true if there are matching PVs that can satisfy all of the Pod's PVCs, and returns true
|
||||
// if bound volumes satisfy the PV NodeAffinity.
|
||||
// It returns true if all of the Pod's PVCs have matching PVs or can be dynamic provisioned,
|
||||
// and returns true if bound volumes satisfy the PV NodeAffinity.
|
||||
//
|
||||
// This function is called by the volume binding scheduler predicate and can be called in parallel
|
||||
FindPodVolumes(pod *v1.Pod, nodeName string) (unboundVolumesSatisified, boundVolumesSatisfied bool, err error)
|
||||
FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisified, boundVolumesSatisfied bool, err error)
|
||||
|
||||
// AssumePodVolumes will take the PV matches for unbound PVCs and update the PV cache assuming
|
||||
// AssumePodVolumes will:
|
||||
// 1. Take the PV matches for unbound PVCs and update the PV cache assuming
|
||||
// that the PV is prebound to the PVC.
|
||||
// 2. Take the PVCs that need provisioning and update the PVC cache with related
|
||||
// annotations set.
|
||||
//
|
||||
// It returns true if all volumes are fully bound, and returns true if any volume binding API operation needs
|
||||
// to be done afterwards.
|
||||
// It returns true if all volumes are fully bound, and returns true if any volume binding/provisioning
|
||||
// API operation needs to be done afterwards.
|
||||
//
|
||||
// This function will modify assumedPod with the node name.
|
||||
// This function is called serially.
|
||||
AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, bindingRequired bool, err error)
|
||||
|
||||
// BindPodVolumes will initiate the volume binding by making the API call to prebind the PV
|
||||
// BindPodVolumes will:
|
||||
// 1. Initiate the volume binding by making the API call to prebind the PV
|
||||
// to its matching PVC.
|
||||
// 2. Trigger the volume provisioning by making the API call to set related
|
||||
// annotations on the PVC
|
||||
//
|
||||
// This function can be called in parallel.
|
||||
BindPodVolumes(assumedPod *v1.Pod) error
|
||||
@ -87,10 +96,8 @@ type SchedulerVolumeBinder interface {
|
||||
type volumeBinder struct {
|
||||
ctrl *PersistentVolumeController
|
||||
|
||||
// TODO: Need AssumeCache for PVC for dynamic provisioning
|
||||
pvcCache corelisters.PersistentVolumeClaimLister
|
||||
nodeCache corelisters.NodeLister
|
||||
pvCache PVAssumeCache
|
||||
pvcCache PVCAssumeCache
|
||||
pvCache PVAssumeCache
|
||||
|
||||
// Stores binding decisions that were made in FindPodVolumes for use in AssumePodVolumes.
|
||||
// AssumePodVolumes modifies the bindings again for use in BindPodVolumes.
|
||||
@ -102,7 +109,6 @@ func NewVolumeBinder(
|
||||
kubeClient clientset.Interface,
|
||||
pvcInformer coreinformers.PersistentVolumeClaimInformer,
|
||||
pvInformer coreinformers.PersistentVolumeInformer,
|
||||
nodeInformer coreinformers.NodeInformer,
|
||||
storageClassInformer storageinformers.StorageClassInformer) SchedulerVolumeBinder {
|
||||
|
||||
// TODO: find better way...
|
||||
@ -113,8 +119,7 @@ func NewVolumeBinder(
|
||||
|
||||
b := &volumeBinder{
|
||||
ctrl: ctrl,
|
||||
pvcCache: pvcInformer.Lister(),
|
||||
nodeCache: nodeInformer.Lister(),
|
||||
pvcCache: NewPVCAssumeCache(pvcInformer.Informer()),
|
||||
pvCache: NewPVAssumeCache(pvInformer.Informer()),
|
||||
podBindingCache: NewPodBindingCache(),
|
||||
}
|
||||
@ -126,24 +131,20 @@ func (b *volumeBinder) GetBindingsCache() PodBindingCache {
|
||||
return b.podBindingCache
|
||||
}
|
||||
|
||||
// FindPodVolumes caches the matching PVs per node in podBindingCache
|
||||
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, nodeName string) (unboundVolumesSatisfied, boundVolumesSatisfied bool, err error) {
|
||||
// FindPodVolumes caches the matching PVs and PVCs to provision per node in podBindingCache
|
||||
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisfied, boundVolumesSatisfied bool, err error) {
|
||||
podName := getPodName(pod)
|
||||
|
||||
glog.V(4).Infof("FindPodVolumes for pod %q, node %q", podName, nodeName)
|
||||
// Warning: Below log needs high verbosity as it can be printed several times (#60933).
|
||||
glog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name)
|
||||
|
||||
// Initialize to true for pods that don't have volumes
|
||||
unboundVolumesSatisfied = true
|
||||
boundVolumesSatisfied = true
|
||||
|
||||
node, err := b.nodeCache.Get(nodeName)
|
||||
if node == nil || err != nil {
|
||||
return false, false, fmt.Errorf("error getting node %q: %v", nodeName, err)
|
||||
}
|
||||
|
||||
// The pod's volumes need to be processed in one call to avoid the race condition where
|
||||
// volumes can get bound in between calls.
|
||||
boundClaims, unboundClaims, unboundClaimsImmediate, err := b.getPodVolumes(pod)
|
||||
// volumes can get bound/provisioned in between calls.
|
||||
boundClaims, claimsToBind, unboundClaimsImmediate, err := b.getPodVolumes(pod)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
@ -161,37 +162,51 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, nodeName string) (unboundVolu
|
||||
}
|
||||
}
|
||||
|
||||
// Find PVs for unbound volumes
|
||||
if len(unboundClaims) > 0 {
|
||||
unboundVolumesSatisfied, err = b.findMatchingVolumes(pod, unboundClaims, node)
|
||||
if len(claimsToBind) > 0 {
|
||||
var claimsToProvision []*v1.PersistentVolumeClaim
|
||||
unboundVolumesSatisfied, claimsToProvision, err = b.findMatchingVolumes(pod, claimsToBind, node)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
// Try to provision for unbound volumes
|
||||
if !unboundVolumesSatisfied {
|
||||
unboundVolumesSatisfied, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return unboundVolumesSatisfied, boundVolumesSatisfied, nil
|
||||
}
|
||||
|
||||
// AssumePodVolumes will take the cached matching PVs in podBindingCache for the chosen node
|
||||
// and update the pvCache with the new prebound PV. It will update podBindingCache again
|
||||
// with the PVs that need an API update.
|
||||
// AssumePodVolumes will take the cached matching PVs and PVCs to provision
|
||||
// in podBindingCache for the chosen node, and:
|
||||
// 1. Update the pvCache with the new prebound PV.
|
||||
// 2. Update the pvcCache with the new PVCs with annotations set
|
||||
// It will update podBindingCache again with the PVs and PVCs that need an API update.
|
||||
func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound, bindingRequired bool, err error) {
|
||||
podName := getPodName(assumedPod)
|
||||
|
||||
glog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName)
|
||||
|
||||
if allBound := b.arePodVolumesBound(assumedPod); allBound {
|
||||
glog.V(4).Infof("AssumePodVolumes: all PVCs bound and nothing to do")
|
||||
glog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName)
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
assumedPod.Spec.NodeName = nodeName
|
||||
// Assume PV
|
||||
claimsToBind := b.podBindingCache.GetBindings(assumedPod, nodeName)
|
||||
newBindings := []*bindingInfo{}
|
||||
|
||||
for _, binding := range claimsToBind {
|
||||
newPV, dirty, err := b.ctrl.getBindVolumeToClaim(binding.pv, binding.pvc)
|
||||
glog.V(5).Infof("AssumePodVolumes: getBindVolumeToClaim for PV %q, PVC %q. newPV %p, dirty %v, err: %v",
|
||||
glog.V(5).Infof("AssumePodVolumes: getBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v",
|
||||
podName,
|
||||
binding.pv.Name,
|
||||
binding.pvc.Name,
|
||||
newPV,
|
||||
@ -212,30 +227,71 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al
|
||||
}
|
||||
}
|
||||
|
||||
if len(newBindings) == 0 {
|
||||
// Don't update cached bindings if no API updates are needed. This can happen if we
|
||||
// previously updated the PV object and are waiting for the PV controller to finish binding.
|
||||
glog.V(4).Infof("AssumePodVolumes: PVs already assumed")
|
||||
return false, false, nil
|
||||
// Don't update cached bindings if no API updates are needed. This can happen if we
|
||||
// previously updated the PV object and are waiting for the PV controller to finish binding.
|
||||
if len(newBindings) != 0 {
|
||||
bindingRequired = true
|
||||
b.podBindingCache.UpdateBindings(assumedPod, nodeName, newBindings)
|
||||
}
|
||||
b.podBindingCache.UpdateBindings(assumedPod, nodeName, newBindings)
|
||||
|
||||
return false, true, nil
|
||||
// Assume PVCs
|
||||
claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, nodeName)
|
||||
|
||||
newProvisionedPVCs := []*v1.PersistentVolumeClaim{}
|
||||
for _, claim := range claimsToProvision {
|
||||
// The claims from method args can be pointing to watcher cache. We must not
|
||||
// modify these, therefore create a copy.
|
||||
claimClone := claim.DeepCopy()
|
||||
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annSelectedNode, nodeName)
|
||||
err = b.pvcCache.Assume(claimClone)
|
||||
if err != nil {
|
||||
b.revertAssumedPVs(newBindings)
|
||||
b.revertAssumedPVCs(newProvisionedPVCs)
|
||||
return
|
||||
}
|
||||
|
||||
newProvisionedPVCs = append(newProvisionedPVCs, claimClone)
|
||||
}
|
||||
|
||||
if len(newProvisionedPVCs) != 0 {
|
||||
bindingRequired = true
|
||||
b.podBindingCache.UpdateProvisionedPVCs(assumedPod, nodeName, newProvisionedPVCs)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// BindPodVolumes gets the cached bindings in podBindingCache and makes the API update for those PVs.
|
||||
// BindPodVolumes gets the cached bindings and PVCs to provision in podBindingCache
|
||||
// and makes the API update for those PVs/PVCs.
|
||||
func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) error {
|
||||
glog.V(4).Infof("BindPodVolumes for pod %q", getPodName(assumedPod))
|
||||
podName := getPodName(assumedPod)
|
||||
glog.V(4).Infof("BindPodVolumes for pod %q", podName)
|
||||
|
||||
bindings := b.podBindingCache.GetBindings(assumedPod, assumedPod.Spec.NodeName)
|
||||
claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, assumedPod.Spec.NodeName)
|
||||
|
||||
// Do the actual prebinding. Let the PV controller take care of the rest
|
||||
// There is no API rollback if the actual binding fails
|
||||
for i, bindingInfo := range bindings {
|
||||
glog.V(5).Infof("BindPodVolumes: Pod %q, binding PV %q to PVC %q", podName, bindingInfo.pv.Name, bindingInfo.pvc.Name)
|
||||
_, err := b.ctrl.updateBindVolumeToClaim(bindingInfo.pv, bindingInfo.pvc, false)
|
||||
if err != nil {
|
||||
// only revert assumed cached updates for volumes we haven't successfully bound
|
||||
b.revertAssumedPVs(bindings[i:])
|
||||
// Revert all of the assumed cached updates for claims,
|
||||
// since no actual API update will be done
|
||||
b.revertAssumedPVCs(claimsToProvision)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest
|
||||
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
|
||||
for i, claim := range claimsToProvision {
|
||||
if _, err := b.ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil {
|
||||
glog.V(4).Infof("updating PersistentVolumeClaim[%s] failed: %v", getPVCName(claim), err)
|
||||
// only revert assumed cached updates for claims we haven't successfully updated
|
||||
b.revertAssumedPVCs(claimsToProvision[i:])
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -257,7 +313,13 @@ func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume, checkFull
|
||||
}
|
||||
|
||||
pvcName := vol.PersistentVolumeClaim.ClaimName
|
||||
pvc, err := b.pvcCache.PersistentVolumeClaims(namespace).Get(pvcName)
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pvcName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
pvc, err := b.pvcCache.GetPVC(getPVCName(claim))
|
||||
if err != nil || pvc == nil {
|
||||
return false, nil, fmt.Errorf("error getting PVC %q: %v", pvcName, err)
|
||||
}
|
||||
@ -342,16 +404,22 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node
|
||||
glog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("All volumes for Pod %q match with Node %q", podName, node.Name)
|
||||
glog.V(4).Infof("All bound volumes for Pod %q match with Node %q", podName, node.Name)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingInfo, node *v1.Node) (foundMatches bool, err error) {
|
||||
// findMatchingVolumes tries to find matching volumes for given claims,
|
||||
// and return unbound claims for further provision.
|
||||
func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingInfo, node *v1.Node) (foundMatches bool, unboundClaims []*v1.PersistentVolumeClaim, err error) {
|
||||
podName := getPodName(pod)
|
||||
// Sort all the claims by increasing size request to get the smallest fits
|
||||
sort.Sort(byPVCSize(claimsToBind))
|
||||
|
||||
chosenPVs := map[string]*v1.PersistentVolume{}
|
||||
|
||||
foundMatches = true
|
||||
matchedClaims := []*bindingInfo{}
|
||||
|
||||
for _, bindingInfo := range claimsToBind {
|
||||
// Get storage class name from each PVC
|
||||
storageClassName := ""
|
||||
@ -364,20 +432,72 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI
|
||||
// Find a matching PV
|
||||
bindingInfo.pv, err = findMatchingVolume(bindingInfo.pvc, allPVs, node, chosenPVs, true)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, nil, err
|
||||
}
|
||||
if bindingInfo.pv == nil {
|
||||
glog.V(4).Infof("No matching volumes for PVC %q on node %q", getPVCName(bindingInfo.pvc), node.Name)
|
||||
return false, nil
|
||||
glog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, getPVCName(bindingInfo.pvc), node.Name)
|
||||
unboundClaims = append(unboundClaims, bindingInfo.pvc)
|
||||
foundMatches = false
|
||||
continue
|
||||
}
|
||||
|
||||
// matching PV needs to be excluded so we don't select it again
|
||||
chosenPVs[bindingInfo.pv.Name] = bindingInfo.pv
|
||||
matchedClaims = append(matchedClaims, bindingInfo)
|
||||
glog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", bindingInfo.pv.Name, getPVCName(bindingInfo.pvc), node.Name, podName)
|
||||
}
|
||||
|
||||
// Mark cache with all the matches for each PVC for this node
|
||||
b.podBindingCache.UpdateBindings(pod, node.Name, claimsToBind)
|
||||
glog.V(4).Infof("Found matching volumes on node %q", node.Name)
|
||||
if len(matchedClaims) > 0 {
|
||||
b.podBindingCache.UpdateBindings(pod, node.Name, matchedClaims)
|
||||
}
|
||||
|
||||
if foundMatches {
|
||||
glog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// checkVolumeProvisions checks given unbound claims (the claims have gone through func
|
||||
// findMatchingVolumes, and do not have matching volumes for binding), and return true
|
||||
// if all of the claims are eligible for dynamic provision.
|
||||
func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied bool, err error) {
|
||||
podName := getPodName(pod)
|
||||
provisionedClaims := []*v1.PersistentVolumeClaim{}
|
||||
|
||||
for _, claim := range claimsToProvision {
|
||||
className := v1helper.GetPersistentVolumeClaimClass(claim)
|
||||
if className == "" {
|
||||
return false, fmt.Errorf("no class for claim %q", getPVCName(claim))
|
||||
}
|
||||
|
||||
class, err := b.ctrl.classLister.Get(className)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to find storage class %q", className)
|
||||
}
|
||||
provisioner := class.Provisioner
|
||||
if provisioner == "" || provisioner == notSupportedProvisioner {
|
||||
glog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, getPVCName(claim))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check if the node can satisfy the topology requirement in the class
|
||||
if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) {
|
||||
glog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, getPVCName(claim))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// TODO: Check if capacity of the node domain in the storage class
|
||||
// can satisfy resource requirement of given claim
|
||||
|
||||
provisionedClaims = append(provisionedClaims, claim)
|
||||
|
||||
}
|
||||
glog.V(4).Infof("Provisioning for claims of pod %q that has no matching volumes on node %q ...", podName, node.Name)
|
||||
|
||||
// Mark cache with all the PVCs that need provisioning for this node
|
||||
b.podBindingCache.UpdateProvisionedPVCs(pod, node.Name, provisionedClaims)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
@ -388,6 +508,12 @@ func (b *volumeBinder) revertAssumedPVs(bindings []*bindingInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *volumeBinder) revertAssumedPVCs(claims []*v1.PersistentVolumeClaim) {
|
||||
for _, claim := range claims {
|
||||
b.pvcCache.Restore(getPVCName(claim))
|
||||
}
|
||||
}
|
||||
|
||||
type bindingInfo struct {
|
||||
// Claim that needs to be bound
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
|
@ -30,27 +30,41 @@ type PodBindingCache interface {
|
||||
// pod and node.
|
||||
UpdateBindings(pod *v1.Pod, node string, bindings []*bindingInfo)
|
||||
|
||||
// DeleteBindings will remove all cached bindings for the given pod.
|
||||
DeleteBindings(pod *v1.Pod)
|
||||
|
||||
// GetBindings will return the cached bindings for the given pod and node.
|
||||
GetBindings(pod *v1.Pod, node string) []*bindingInfo
|
||||
|
||||
// UpdateProvisionedPVCs will update the cache with the given provisioning decisions
|
||||
// for the pod and node.
|
||||
UpdateProvisionedPVCs(pod *v1.Pod, node string, provisionings []*v1.PersistentVolumeClaim)
|
||||
|
||||
// GetProvisionedPVCs will return the cached provisioning decisions for the given pod and node.
|
||||
GetProvisionedPVCs(pod *v1.Pod, node string) []*v1.PersistentVolumeClaim
|
||||
|
||||
// DeleteBindings will remove all cached bindings and provisionings for the given pod.
|
||||
// TODO: separate the func if it is needed to delete bindings/provisionings individually
|
||||
DeleteBindings(pod *v1.Pod)
|
||||
}
|
||||
|
||||
type podBindingCache struct {
|
||||
mutex sync.Mutex
|
||||
|
||||
// Key = pod name
|
||||
// Value = nodeBindings
|
||||
bindings map[string]nodeBindings
|
||||
// Value = nodeDecisions
|
||||
bindingDecisions map[string]nodeDecisions
|
||||
}
|
||||
|
||||
// Key = nodeName
|
||||
// Value = array of bindingInfo
|
||||
type nodeBindings map[string][]*bindingInfo
|
||||
// Value = bindings & provisioned PVCs of the node
|
||||
type nodeDecisions map[string]nodeDecision
|
||||
|
||||
// A decision includes bindingInfo and provisioned PVCs of the node
|
||||
type nodeDecision struct {
|
||||
bindings []*bindingInfo
|
||||
provisionings []*v1.PersistentVolumeClaim
|
||||
}
|
||||
|
||||
func NewPodBindingCache() PodBindingCache {
|
||||
return &podBindingCache{bindings: map[string]nodeBindings{}}
|
||||
return &podBindingCache{bindingDecisions: map[string]nodeDecisions{}}
|
||||
}
|
||||
|
||||
func (c *podBindingCache) DeleteBindings(pod *v1.Pod) {
|
||||
@ -58,7 +72,7 @@ func (c *podBindingCache) DeleteBindings(pod *v1.Pod) {
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
podName := getPodName(pod)
|
||||
delete(c.bindings, podName)
|
||||
delete(c.bindingDecisions, podName)
|
||||
}
|
||||
|
||||
func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*bindingInfo) {
|
||||
@ -66,12 +80,20 @@ func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*b
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
podName := getPodName(pod)
|
||||
nodeBinding, ok := c.bindings[podName]
|
||||
decisions, ok := c.bindingDecisions[podName]
|
||||
if !ok {
|
||||
nodeBinding = nodeBindings{}
|
||||
c.bindings[podName] = nodeBinding
|
||||
decisions = nodeDecisions{}
|
||||
c.bindingDecisions[podName] = decisions
|
||||
}
|
||||
nodeBinding[node] = bindings
|
||||
decision, ok := decisions[node]
|
||||
if !ok {
|
||||
decision = nodeDecision{
|
||||
bindings: bindings,
|
||||
}
|
||||
} else {
|
||||
decision.bindings = bindings
|
||||
}
|
||||
decisions[node] = decision
|
||||
}
|
||||
|
||||
func (c *podBindingCache) GetBindings(pod *v1.Pod, node string) []*bindingInfo {
|
||||
@ -79,9 +101,50 @@ func (c *podBindingCache) GetBindings(pod *v1.Pod, node string) []*bindingInfo {
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
podName := getPodName(pod)
|
||||
nodeBindings, ok := c.bindings[podName]
|
||||
decisions, ok := c.bindingDecisions[podName]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return nodeBindings[node]
|
||||
decision, ok := decisions[node]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return decision.bindings
|
||||
}
|
||||
|
||||
func (c *podBindingCache) UpdateProvisionedPVCs(pod *v1.Pod, node string, pvcs []*v1.PersistentVolumeClaim) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
podName := getPodName(pod)
|
||||
decisions, ok := c.bindingDecisions[podName]
|
||||
if !ok {
|
||||
decisions = nodeDecisions{}
|
||||
c.bindingDecisions[podName] = decisions
|
||||
}
|
||||
decision, ok := decisions[node]
|
||||
if !ok {
|
||||
decision = nodeDecision{
|
||||
provisionings: pvcs,
|
||||
}
|
||||
} else {
|
||||
decision.provisionings = pvcs
|
||||
}
|
||||
decisions[node] = decision
|
||||
}
|
||||
|
||||
func (c *podBindingCache) GetProvisionedPVCs(pod *v1.Pod, node string) []*v1.PersistentVolumeClaim {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
podName := getPodName(pod)
|
||||
decisions, ok := c.bindingDecisions[podName]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
decision, ok := decisions[node]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return decision.provisionings
|
||||
}
|
||||
|
@ -26,32 +26,37 @@ import (
|
||||
|
||||
func TestUpdateGetBindings(t *testing.T) {
|
||||
scenarios := map[string]struct {
|
||||
updateBindings []*bindingInfo
|
||||
updatePod string
|
||||
updateNode string
|
||||
updateBindings []*bindingInfo
|
||||
updateProvisionings []*v1.PersistentVolumeClaim
|
||||
updatePod string
|
||||
updateNode string
|
||||
|
||||
getBindings []*bindingInfo
|
||||
getPod string
|
||||
getNode string
|
||||
getBindings []*bindingInfo
|
||||
getProvisionings []*v1.PersistentVolumeClaim
|
||||
getPod string
|
||||
getNode string
|
||||
}{
|
||||
"no-pod": {
|
||||
getPod: "pod1",
|
||||
getNode: "node1",
|
||||
},
|
||||
"no-node": {
|
||||
updatePod: "pod1",
|
||||
updateNode: "node1",
|
||||
updateBindings: []*bindingInfo{},
|
||||
getPod: "pod1",
|
||||
getNode: "node2",
|
||||
updatePod: "pod1",
|
||||
updateNode: "node1",
|
||||
updateBindings: []*bindingInfo{},
|
||||
updateProvisionings: []*v1.PersistentVolumeClaim{},
|
||||
getPod: "pod1",
|
||||
getNode: "node2",
|
||||
},
|
||||
"binding-exists": {
|
||||
updatePod: "pod1",
|
||||
updateNode: "node1",
|
||||
updateBindings: []*bindingInfo{{pvc: &v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "pvc1"}}}},
|
||||
getPod: "pod1",
|
||||
getNode: "node1",
|
||||
getBindings: []*bindingInfo{{pvc: &v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "pvc1"}}}},
|
||||
updatePod: "pod1",
|
||||
updateNode: "node1",
|
||||
updateBindings: []*bindingInfo{{pvc: &v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "pvc1"}}}},
|
||||
updateProvisionings: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "pvc2"}}},
|
||||
getPod: "pod1",
|
||||
getNode: "node1",
|
||||
getBindings: []*bindingInfo{{pvc: &v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "pvc1"}}}},
|
||||
getProvisionings: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "pvc2"}}},
|
||||
},
|
||||
}
|
||||
|
||||
@ -61,6 +66,7 @@ func TestUpdateGetBindings(t *testing.T) {
|
||||
// Perform updates
|
||||
updatePod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: scenario.updatePod, Namespace: "ns"}}
|
||||
cache.UpdateBindings(updatePod, scenario.updateNode, scenario.updateBindings)
|
||||
cache.UpdateProvisionedPVCs(updatePod, scenario.updateNode, scenario.updateProvisionings)
|
||||
|
||||
// Verify updated bindings
|
||||
bindings := cache.GetBindings(updatePod, scenario.updateNode)
|
||||
@ -68,45 +74,71 @@ func TestUpdateGetBindings(t *testing.T) {
|
||||
t.Errorf("Test %v failed: returned bindings after update different. Got %+v, expected %+v", name, bindings, scenario.updateBindings)
|
||||
}
|
||||
|
||||
// Verify updated provisionings
|
||||
provisionings := cache.GetProvisionedPVCs(updatePod, scenario.updateNode)
|
||||
if !reflect.DeepEqual(provisionings, scenario.updateProvisionings) {
|
||||
t.Errorf("Test %v failed: returned provisionings after update different. Got %+v, expected %+v", name, provisionings, scenario.updateProvisionings)
|
||||
}
|
||||
|
||||
// Get bindings
|
||||
getPod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: scenario.getPod, Namespace: "ns"}}
|
||||
bindings = cache.GetBindings(getPod, scenario.getNode)
|
||||
if !reflect.DeepEqual(bindings, scenario.getBindings) {
|
||||
t.Errorf("Test %v failed: unexpected bindings returned. Got %+v, expected %+v", name, bindings, scenario.updateBindings)
|
||||
}
|
||||
|
||||
// Get provisionings
|
||||
provisionings = cache.GetProvisionedPVCs(getPod, scenario.getNode)
|
||||
if !reflect.DeepEqual(provisionings, scenario.getProvisionings) {
|
||||
t.Errorf("Test %v failed: unexpected bindings returned. Got %+v, expected %+v", name, provisionings, scenario.getProvisionings)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteBindings(t *testing.T) {
|
||||
initialBindings := []*bindingInfo{{pvc: &v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "pvc1"}}}}
|
||||
initialProvisionings := []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "pvc2"}}}
|
||||
cache := NewPodBindingCache()
|
||||
|
||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "ns"}}
|
||||
|
||||
// Get nil bindings
|
||||
// Get nil bindings and provisionings
|
||||
bindings := cache.GetBindings(pod, "node1")
|
||||
if bindings != nil {
|
||||
t.Errorf("Test failed: expected initial nil bindings, got %+v", bindings)
|
||||
}
|
||||
provisionings := cache.GetProvisionedPVCs(pod, "node1")
|
||||
if provisionings != nil {
|
||||
t.Errorf("Test failed: expected initial nil provisionings, got %+v", provisionings)
|
||||
}
|
||||
|
||||
// Delete nothing
|
||||
cache.DeleteBindings(pod)
|
||||
|
||||
// Perform updates
|
||||
cache.UpdateBindings(pod, "node1", initialBindings)
|
||||
cache.UpdateProvisionedPVCs(pod, "node1", initialProvisionings)
|
||||
|
||||
// Get bindings
|
||||
// Get bindings and provisionings
|
||||
bindings = cache.GetBindings(pod, "node1")
|
||||
if !reflect.DeepEqual(bindings, initialBindings) {
|
||||
t.Errorf("Test failed: expected bindings %+v, got %+v", initialBindings, bindings)
|
||||
}
|
||||
provisionings = cache.GetProvisionedPVCs(pod, "node1")
|
||||
if !reflect.DeepEqual(provisionings, initialProvisionings) {
|
||||
t.Errorf("Test failed: expected provisionings %+v, got %+v", initialProvisionings, provisionings)
|
||||
}
|
||||
|
||||
// Delete
|
||||
cache.DeleteBindings(pod)
|
||||
|
||||
// Get bindings
|
||||
// Get bindings and provisionings
|
||||
bindings = cache.GetBindings(pod, "node1")
|
||||
if bindings != nil {
|
||||
t.Errorf("Test failed: expected nil bindings, got %+v", bindings)
|
||||
}
|
||||
provisionings = cache.GetProvisionedPVCs(pod, "node1")
|
||||
if provisionings != nil {
|
||||
t.Errorf("Test failed: expected nil provisionings, got %+v", provisionings)
|
||||
}
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ type FakeVolumeBinder struct {
|
||||
BindCalled bool
|
||||
}
|
||||
|
||||
func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, nodeName string) (unboundVolumesSatisfied, boundVolumesSatsified bool, err error) {
|
||||
func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisfied, boundVolumesSatsified bool, err error) {
|
||||
return b.config.FindUnboundSatsified, b.config.FindBoundSatsified, b.config.FindErr
|
||||
}
|
||||
|
||||
|
498
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_test.go
generated
vendored
498
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder_test.go
generated
vendored
@ -33,24 +33,29 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
)
|
||||
|
||||
var (
|
||||
unboundPVC = makeTestPVC("unbound-pvc", "1G", pvcUnbound, "", &waitClass)
|
||||
unboundPVC2 = makeTestPVC("unbound-pvc2", "5G", pvcUnbound, "", &waitClass)
|
||||
preboundPVC = makeTestPVC("prebound-pvc", "1G", pvcPrebound, "pv-node1a", &waitClass)
|
||||
boundPVC = makeTestPVC("bound-pvc", "1G", pvcBound, "pv-bound", &waitClass)
|
||||
boundPVC2 = makeTestPVC("bound-pvc2", "1G", pvcBound, "pv-bound2", &waitClass)
|
||||
badPVC = makeBadPVC()
|
||||
immediateUnboundPVC = makeTestPVC("immediate-unbound-pvc", "1G", pvcUnbound, "", &immediateClass)
|
||||
immediateBoundPVC = makeTestPVC("immediate-bound-pvc", "1G", pvcBound, "pv-bound-immediate", &immediateClass)
|
||||
unboundPVC = makeTestPVC("unbound-pvc", "1G", pvcUnbound, "", "1", &waitClass)
|
||||
unboundPVC2 = makeTestPVC("unbound-pvc2", "5G", pvcUnbound, "", "1", &waitClass)
|
||||
preboundPVC = makeTestPVC("prebound-pvc", "1G", pvcPrebound, "pv-node1a", "1", &waitClass)
|
||||
boundPVC = makeTestPVC("bound-pvc", "1G", pvcBound, "pv-bound", "1", &waitClass)
|
||||
boundPVC2 = makeTestPVC("bound-pvc2", "1G", pvcBound, "pv-bound2", "1", &waitClass)
|
||||
badPVC = makeBadPVC()
|
||||
immediateUnboundPVC = makeTestPVC("immediate-unbound-pvc", "1G", pvcUnbound, "", "1", &immediateClass)
|
||||
immediateBoundPVC = makeTestPVC("immediate-bound-pvc", "1G", pvcBound, "pv-bound-immediate", "1", &immediateClass)
|
||||
provisionedPVC = makeTestPVC("provisioned-pvc", "1Gi", pvcUnbound, "", "1", &waitClass)
|
||||
provisionedPVC2 = makeTestPVC("provisioned-pvc2", "1Gi", pvcUnbound, "", "1", &waitClass)
|
||||
provisionedPVCHigherVersion = makeTestPVC("provisioned-pvc2", "1Gi", pvcUnbound, "", "2", &waitClass)
|
||||
noProvisionerPVC = makeTestPVC("no-provisioner-pvc", "1Gi", pvcUnbound, "", "1", &provisionNotSupportClass)
|
||||
topoMismatchPVC = makeTestPVC("topo-mismatch-pvc", "1Gi", pvcUnbound, "", "1", &topoMismatchClass)
|
||||
|
||||
pvNoNode = makeTestPV("pv-no-node", "", "1G", "1", nil, waitClass)
|
||||
pvNode1a = makeTestPV("pv-node1a", "node1", "5G", "1", nil, waitClass)
|
||||
pvNode1b = makeTestPV("pv-node1b", "node1", "10G", "1", nil, waitClass)
|
||||
pvNode1c = makeTestPV("pv-node1b", "node1", "5G", "1", nil, waitClass)
|
||||
pvNode2 = makeTestPV("pv-node2", "node2", "1G", "1", nil, waitClass)
|
||||
pvPrebound = makeTestPV("pv-prebound", "node1", "1G", "1", unboundPVC, waitClass)
|
||||
pvBound = makeTestPV("pv-bound", "node1", "1G", "1", boundPVC, waitClass)
|
||||
@ -67,8 +72,13 @@ var (
|
||||
binding1aBound = makeBinding(unboundPVC, pvNode1aBound)
|
||||
binding1bBound = makeBinding(unboundPVC2, pvNode1bBound)
|
||||
|
||||
waitClass = "waitClass"
|
||||
immediateClass = "immediateClass"
|
||||
waitClass = "waitClass"
|
||||
immediateClass = "immediateClass"
|
||||
provisionNotSupportClass = "provisionNotSupportedClass"
|
||||
topoMismatchClass = "topoMismatchClass"
|
||||
|
||||
nodeLabelKey = "nodeKey"
|
||||
nodeLabelValue = "node1"
|
||||
)
|
||||
|
||||
type testEnv struct {
|
||||
@ -77,7 +87,7 @@ type testEnv struct {
|
||||
binder SchedulerVolumeBinder
|
||||
internalBinder *volumeBinder
|
||||
internalPVCache *pvAssumeCache
|
||||
internalPVCCache cache.Indexer
|
||||
internalPVCCache *pvcAssumeCache
|
||||
}
|
||||
|
||||
func newTestBinder(t *testing.T) *testEnv {
|
||||
@ -86,27 +96,14 @@ func newTestBinder(t *testing.T) *testEnv {
|
||||
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||
|
||||
pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims()
|
||||
nodeInformer := informerFactory.Core().V1().Nodes()
|
||||
classInformer := informerFactory.Storage().V1().StorageClasses()
|
||||
|
||||
binder := NewVolumeBinder(
|
||||
client,
|
||||
pvcInformer,
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
nodeInformer,
|
||||
classInformer)
|
||||
|
||||
// Add a node
|
||||
err := nodeInformer.Informer().GetIndexer().Add(&v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
Labels: map[string]string{"key1": "node1"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to add node to internal cache: %v", err)
|
||||
}
|
||||
|
||||
// Add storageclasses
|
||||
waitMode := storagev1.VolumeBindingWaitForFirstConsumer
|
||||
immediateMode := storagev1.VolumeBindingImmediate
|
||||
@ -116,6 +113,17 @@ func newTestBinder(t *testing.T) *testEnv {
|
||||
Name: waitClass,
|
||||
},
|
||||
VolumeBindingMode: &waitMode,
|
||||
Provisioner: "test-provisioner",
|
||||
AllowedTopologies: []v1.TopologySelectorTerm{
|
||||
{
|
||||
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Values: []string{nodeLabelValue, "reference-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -123,9 +131,33 @@ func newTestBinder(t *testing.T) *testEnv {
|
||||
},
|
||||
VolumeBindingMode: &immediateMode,
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: provisionNotSupportClass,
|
||||
},
|
||||
VolumeBindingMode: &waitMode,
|
||||
Provisioner: "kubernetes.io/no-provisioner",
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: topoMismatchClass,
|
||||
},
|
||||
VolumeBindingMode: &waitMode,
|
||||
Provisioner: "test-provisioner",
|
||||
AllowedTopologies: []v1.TopologySelectorTerm{
|
||||
{
|
||||
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Values: []string{"reference-value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, class := range classes {
|
||||
if err = classInformer.Informer().GetIndexer().Add(class); err != nil {
|
||||
if err := classInformer.Informer().GetIndexer().Add(class); err != nil {
|
||||
t.Fatalf("Failed to add storage class to internal cache: %v", err)
|
||||
}
|
||||
}
|
||||
@ -142,22 +174,31 @@ func newTestBinder(t *testing.T) *testEnv {
|
||||
t.Fatalf("Failed to convert to internal PV cache")
|
||||
}
|
||||
|
||||
pvcCache := internalBinder.pvcCache
|
||||
internalPVCCache, ok := pvcCache.(*pvcAssumeCache)
|
||||
if !ok {
|
||||
t.Fatalf("Failed to convert to internal PVC cache")
|
||||
}
|
||||
|
||||
return &testEnv{
|
||||
client: client,
|
||||
reactor: reactor,
|
||||
binder: binder,
|
||||
internalBinder: internalBinder,
|
||||
internalPVCache: internalPVCache,
|
||||
internalPVCCache: pvcInformer.Informer().GetIndexer(),
|
||||
internalPVCCache: internalPVCCache,
|
||||
}
|
||||
}
|
||||
|
||||
func (env *testEnv) initClaims(t *testing.T, pvcs []*v1.PersistentVolumeClaim) {
|
||||
for _, pvc := range pvcs {
|
||||
err := env.internalPVCCache.Add(pvc)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to add PVC %q to internal cache: %v", pvc.Name, err)
|
||||
func (env *testEnv) initClaims(cachedPVCs []*v1.PersistentVolumeClaim, apiPVCs []*v1.PersistentVolumeClaim) {
|
||||
internalPVCCache := env.internalPVCCache
|
||||
for _, pvc := range cachedPVCs {
|
||||
internalPVCCache.add(pvc)
|
||||
if apiPVCs == nil {
|
||||
env.reactor.claims[pvc.Name] = pvc
|
||||
}
|
||||
}
|
||||
for _, pvc := range apiPVCs {
|
||||
env.reactor.claims[pvc.Name] = pvc
|
||||
}
|
||||
}
|
||||
@ -176,7 +217,7 @@ func (env *testEnv) initVolumes(cachedPVs []*v1.PersistentVolume, apiPVs []*v1.P
|
||||
|
||||
}
|
||||
|
||||
func (env *testEnv) assumeVolumes(t *testing.T, name, node string, pod *v1.Pod, bindings []*bindingInfo) {
|
||||
func (env *testEnv) assumeVolumes(t *testing.T, name, node string, pod *v1.Pod, bindings []*bindingInfo, provisionings []*v1.PersistentVolumeClaim) {
|
||||
pvCache := env.internalBinder.pvCache
|
||||
for _, binding := range bindings {
|
||||
if err := pvCache.Assume(binding.pv); err != nil {
|
||||
@ -185,23 +226,46 @@ func (env *testEnv) assumeVolumes(t *testing.T, name, node string, pod *v1.Pod,
|
||||
}
|
||||
|
||||
env.internalBinder.podBindingCache.UpdateBindings(pod, node, bindings)
|
||||
|
||||
pvcCache := env.internalBinder.pvcCache
|
||||
for _, pvc := range provisionings {
|
||||
if err := pvcCache.Assume(pvc); err != nil {
|
||||
t.Fatalf("Failed to setup test %q: error: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
env.internalBinder.podBindingCache.UpdateProvisionedPVCs(pod, node, provisionings)
|
||||
}
|
||||
|
||||
func (env *testEnv) initPodCache(pod *v1.Pod, node string, bindings []*bindingInfo) {
|
||||
func (env *testEnv) initPodCache(pod *v1.Pod, node string, bindings []*bindingInfo, provisionings []*v1.PersistentVolumeClaim) {
|
||||
cache := env.internalBinder.podBindingCache
|
||||
cache.UpdateBindings(pod, node, bindings)
|
||||
|
||||
cache.UpdateProvisionedPVCs(pod, node, provisionings)
|
||||
}
|
||||
|
||||
func (env *testEnv) validatePodCache(t *testing.T, name, node string, pod *v1.Pod, expectedBindings []*bindingInfo) {
|
||||
func (env *testEnv) validatePodCache(t *testing.T, name, node string, pod *v1.Pod, expectedBindings []*bindingInfo, expectedProvisionings []*v1.PersistentVolumeClaim) {
|
||||
cache := env.internalBinder.podBindingCache
|
||||
bindings := cache.GetBindings(pod, node)
|
||||
|
||||
if !reflect.DeepEqual(expectedBindings, bindings) {
|
||||
t.Errorf("Test %q failed: Expected bindings %+v, got %+v", name, expectedBindings, bindings)
|
||||
}
|
||||
|
||||
provisionedClaims := cache.GetProvisionedPVCs(pod, node)
|
||||
|
||||
if !reflect.DeepEqual(expectedProvisionings, provisionedClaims) {
|
||||
t.Errorf("Test %q failed: Expected provisionings %+v, got %+v", name, expectedProvisionings, provisionedClaims)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (env *testEnv) validateAssume(t *testing.T, name string, pod *v1.Pod, bindings []*bindingInfo) {
|
||||
func (env *testEnv) getPodBindings(t *testing.T, name, node string, pod *v1.Pod) []*bindingInfo {
|
||||
cache := env.internalBinder.podBindingCache
|
||||
return cache.GetBindings(pod, node)
|
||||
}
|
||||
|
||||
func (env *testEnv) validateAssume(t *testing.T, name string, pod *v1.Pod, bindings []*bindingInfo, provisionings []*v1.PersistentVolumeClaim) {
|
||||
// TODO: Check binding cache
|
||||
|
||||
// Check pv cache
|
||||
@ -223,9 +287,23 @@ func (env *testEnv) validateAssume(t *testing.T, name string, pod *v1.Pod, bindi
|
||||
t.Errorf("Test %q failed: expected PV.ClaimRef.Namespace %q, got %q", name, b.pvc.Namespace, pv.Spec.ClaimRef.Namespace)
|
||||
}
|
||||
}
|
||||
|
||||
// Check pvc cache
|
||||
pvcCache := env.internalBinder.pvcCache
|
||||
for _, p := range provisionings {
|
||||
pvcKey := getPVCName(p)
|
||||
pvc, err := pvcCache.GetPVC(pvcKey)
|
||||
if err != nil {
|
||||
t.Errorf("Test %q failed: GetPVC %q returned error: %v", name, pvcKey, err)
|
||||
continue
|
||||
}
|
||||
if pvc.Annotations[annSelectedNode] != nodeLabelValue {
|
||||
t.Errorf("Test %q failed: expected annSelectedNode of pvc %q to be %q, but got %q", name, pvcKey, nodeLabelValue, pvc.Annotations[annSelectedNode])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (env *testEnv) validateFailedAssume(t *testing.T, name string, pod *v1.Pod, bindings []*bindingInfo) {
|
||||
func (env *testEnv) validateFailedAssume(t *testing.T, name string, pod *v1.Pod, bindings []*bindingInfo, provisionings []*v1.PersistentVolumeClaim) {
|
||||
// All PVs have been unmodified in cache
|
||||
pvCache := env.internalBinder.pvCache
|
||||
for _, b := range bindings {
|
||||
@ -235,6 +313,20 @@ func (env *testEnv) validateFailedAssume(t *testing.T, name string, pod *v1.Pod,
|
||||
t.Errorf("Test %q failed: PV %q was modified in cache", name, b.pv.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Check pvc cache
|
||||
pvcCache := env.internalBinder.pvcCache
|
||||
for _, p := range provisionings {
|
||||
pvcKey := getPVCName(p)
|
||||
pvc, err := pvcCache.GetPVC(pvcKey)
|
||||
if err != nil {
|
||||
t.Errorf("Test %q failed: GetPVC %q returned error: %v", name, pvcKey, err)
|
||||
continue
|
||||
}
|
||||
if pvc.Annotations[annSelectedNode] != "" {
|
||||
t.Errorf("Test %q failed: expected annSelectedNode of pvc %q empty, but got %q", name, pvcKey, pvc.Annotations[annSelectedNode])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (env *testEnv) validateBind(
|
||||
@ -262,20 +354,46 @@ func (env *testEnv) validateBind(
|
||||
}
|
||||
}
|
||||
|
||||
func (env *testEnv) validateProvision(
|
||||
t *testing.T,
|
||||
name string,
|
||||
pod *v1.Pod,
|
||||
expectedPVCs []*v1.PersistentVolumeClaim,
|
||||
expectedAPIPVCs []*v1.PersistentVolumeClaim) {
|
||||
|
||||
// Check pvc cache
|
||||
pvcCache := env.internalBinder.pvcCache
|
||||
for _, pvc := range expectedPVCs {
|
||||
cachedPVC, err := pvcCache.GetPVC(getPVCName(pvc))
|
||||
if err != nil {
|
||||
t.Errorf("Test %q failed: GetPVC %q returned error: %v", name, getPVCName(pvc), err)
|
||||
}
|
||||
if !reflect.DeepEqual(cachedPVC, pvc) {
|
||||
t.Errorf("Test %q failed: cached PVC check failed [A-expected, B-got]:\n%s", name, diff.ObjectDiff(pvc, cachedPVC))
|
||||
}
|
||||
}
|
||||
|
||||
// Check reactor for API updates
|
||||
if err := env.reactor.checkClaims(expectedAPIPVCs); err != nil {
|
||||
t.Errorf("Test %q failed: API reactor validation failed: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
pvcUnbound = iota
|
||||
pvcPrebound
|
||||
pvcBound
|
||||
)
|
||||
|
||||
func makeTestPVC(name, size string, pvcBoundState int, pvName string, className *string) *v1.PersistentVolumeClaim {
|
||||
func makeTestPVC(name, size string, pvcBoundState int, pvName, resourceVersion string, className *string) *v1.PersistentVolumeClaim {
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: "testns",
|
||||
UID: types.UID("pvc-uid"),
|
||||
ResourceVersion: "1",
|
||||
ResourceVersion: resourceVersion,
|
||||
SelfLink: testapi.Default.SelfLink("pvc", name),
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
Resources: v1.ResourceRequirements{
|
||||
@ -331,7 +449,7 @@ func makeTestPV(name, node, capacity, version string, boundToPVC *v1.PersistentV
|
||||
},
|
||||
}
|
||||
if node != "" {
|
||||
pv.Spec.NodeAffinity = getVolumeNodeAffinity("key1", node)
|
||||
pv.Spec.NodeAffinity = getVolumeNodeAffinity(nodeLabelKey, node)
|
||||
}
|
||||
|
||||
if boundToPVC != nil {
|
||||
@ -394,18 +512,19 @@ func makeBinding(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) *bindin
|
||||
return &bindingInfo{pvc: pvc, pv: pv}
|
||||
}
|
||||
|
||||
func makeStringPtr(str string) *string {
|
||||
s := fmt.Sprintf("%v", str)
|
||||
return &s
|
||||
func addProvisionAnn(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
|
||||
res := pvc.DeepCopy()
|
||||
// Add provision related annotations
|
||||
res.Annotations[annSelectedNode] = nodeLabelValue
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func TestFindPodVolumes(t *testing.T) {
|
||||
func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
|
||||
scenarios := map[string]struct {
|
||||
// Inputs
|
||||
pvs []*v1.PersistentVolume
|
||||
podPVCs []*v1.PersistentVolumeClaim
|
||||
// Defaults to node1
|
||||
node string
|
||||
// If nil, use pod PVCs
|
||||
cachePVCs []*v1.PersistentVolumeClaim
|
||||
// If nil, makePod with podPVCs
|
||||
@ -454,13 +573,6 @@ func TestFindPodVolumes(t *testing.T) {
|
||||
expectedUnbound: true,
|
||||
expectedBound: true,
|
||||
},
|
||||
"unbound-pvc,node-not-exists": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
node: "node12",
|
||||
expectedUnbound: false,
|
||||
expectedBound: false,
|
||||
shouldFail: true,
|
||||
},
|
||||
"unbound-pvc,pv-same-node": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
pvs: []*v1.PersistentVolume{pvNode2, pvNode1a, pvNode1b},
|
||||
@ -489,10 +601,11 @@ func TestFindPodVolumes(t *testing.T) {
|
||||
expectedBound: true,
|
||||
},
|
||||
"two-unbound-pvcs,partial-match": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
expectedUnbound: false,
|
||||
expectedBound: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
expectedBindings: []*bindingInfo{binding1a},
|
||||
expectedUnbound: false,
|
||||
expectedBound: true,
|
||||
},
|
||||
"one-bound,one-unbound": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, boundPVC},
|
||||
@ -551,21 +664,27 @@ func TestFindPodVolumes(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
|
||||
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false")
|
||||
|
||||
testNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "node1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
glog.V(5).Infof("Running test case %q", name)
|
||||
|
||||
// Setup
|
||||
testEnv := newTestBinder(t)
|
||||
testEnv.initVolumes(scenario.pvs, scenario.pvs)
|
||||
if scenario.node == "" {
|
||||
scenario.node = "node1"
|
||||
}
|
||||
|
||||
// a. Init pvc cache
|
||||
if scenario.cachePVCs == nil {
|
||||
scenario.cachePVCs = scenario.podPVCs
|
||||
}
|
||||
testEnv.initClaims(t, scenario.cachePVCs)
|
||||
testEnv.initClaims(scenario.cachePVCs, scenario.cachePVCs)
|
||||
|
||||
// b. Generate pod with given claims
|
||||
if scenario.pod == nil {
|
||||
@ -573,7 +692,7 @@ func TestFindPodVolumes(t *testing.T) {
|
||||
}
|
||||
|
||||
// Execute
|
||||
unboundSatisfied, boundSatisfied, err := testEnv.binder.FindPodVolumes(scenario.pod, scenario.node)
|
||||
unboundSatisfied, boundSatisfied, err := testEnv.binder.FindPodVolumes(scenario.pod, testNode)
|
||||
|
||||
// Validate
|
||||
if !scenario.shouldFail && err != nil {
|
||||
@ -588,16 +707,131 @@ func TestFindPodVolumes(t *testing.T) {
|
||||
if unboundSatisfied != scenario.expectedUnbound {
|
||||
t.Errorf("Test %q failed: expected unboundSatsified %v, got %v", name, scenario.expectedUnbound, unboundSatisfied)
|
||||
}
|
||||
testEnv.validatePodCache(t, name, scenario.node, scenario.pod, scenario.expectedBindings)
|
||||
testEnv.validatePodCache(t, name, testNode.Name, scenario.pod, scenario.expectedBindings, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindPodVolumesWithProvisioning(t *testing.T) {
|
||||
scenarios := map[string]struct {
|
||||
// Inputs
|
||||
pvs []*v1.PersistentVolume
|
||||
podPVCs []*v1.PersistentVolumeClaim
|
||||
// If nil, use pod PVCs
|
||||
cachePVCs []*v1.PersistentVolumeClaim
|
||||
// If nil, makePod with podPVCs
|
||||
pod *v1.Pod
|
||||
|
||||
// Expected podBindingCache fields
|
||||
expectedBindings []*bindingInfo
|
||||
expectedProvisions []*v1.PersistentVolumeClaim
|
||||
|
||||
// Expected return values
|
||||
expectedUnbound bool
|
||||
expectedBound bool
|
||||
shouldFail bool
|
||||
}{
|
||||
"one-provisioned": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedUnbound: true,
|
||||
expectedBound: true,
|
||||
},
|
||||
"two-unbound-pvcs,one-matched,one-provisioned": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
expectedBindings: []*bindingInfo{binding1a},
|
||||
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedUnbound: true,
|
||||
expectedBound: true,
|
||||
},
|
||||
"one-bound,one-provisioned": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{boundPVC, provisionedPVC},
|
||||
pvs: []*v1.PersistentVolume{pvBound},
|
||||
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedUnbound: true,
|
||||
expectedBound: true,
|
||||
},
|
||||
"immediate-unbound-pvc": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{immediateUnboundPVC},
|
||||
expectedUnbound: false,
|
||||
expectedBound: false,
|
||||
shouldFail: true,
|
||||
},
|
||||
"one-immediate-bound,one-provisioned": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC, provisionedPVC},
|
||||
pvs: []*v1.PersistentVolume{pvBoundImmediate},
|
||||
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedUnbound: true,
|
||||
expectedBound: true,
|
||||
},
|
||||
"invalid-provisioner": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{noProvisionerPVC},
|
||||
expectedUnbound: false,
|
||||
expectedBound: true,
|
||||
},
|
||||
"volume-topology-unsatisfied": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{topoMismatchPVC},
|
||||
expectedUnbound: false,
|
||||
expectedBound: true,
|
||||
},
|
||||
}
|
||||
|
||||
// Set VolumeScheduling and DynamicProvisioningScheduling feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true,DynamicProvisioningScheduling=true")
|
||||
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false,DynamicProvisioningScheduling=false")
|
||||
|
||||
testNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "node1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
// Setup
|
||||
testEnv := newTestBinder(t)
|
||||
testEnv.initVolumes(scenario.pvs, scenario.pvs)
|
||||
|
||||
// a. Init pvc cache
|
||||
if scenario.cachePVCs == nil {
|
||||
scenario.cachePVCs = scenario.podPVCs
|
||||
}
|
||||
testEnv.initClaims(scenario.cachePVCs, scenario.cachePVCs)
|
||||
|
||||
// b. Generate pod with given claims
|
||||
if scenario.pod == nil {
|
||||
scenario.pod = makePod(scenario.podPVCs)
|
||||
}
|
||||
|
||||
// Execute
|
||||
unboundSatisfied, boundSatisfied, err := testEnv.binder.FindPodVolumes(scenario.pod, testNode)
|
||||
|
||||
// Validate
|
||||
if !scenario.shouldFail && err != nil {
|
||||
t.Errorf("Test %q failed: returned error: %v", name, err)
|
||||
}
|
||||
if scenario.shouldFail && err == nil {
|
||||
t.Errorf("Test %q failed: returned success but expected error", name)
|
||||
}
|
||||
if boundSatisfied != scenario.expectedBound {
|
||||
t.Errorf("Test %q failed: expected boundSatsified %v, got %v", name, scenario.expectedBound, boundSatisfied)
|
||||
}
|
||||
if unboundSatisfied != scenario.expectedUnbound {
|
||||
t.Errorf("Test %q failed: expected unboundSatsified %v, got %v", name, scenario.expectedUnbound, unboundSatisfied)
|
||||
}
|
||||
testEnv.validatePodCache(t, name, testNode.Name, scenario.pod, scenario.expectedBindings, scenario.expectedProvisions)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAssumePodVolumes(t *testing.T) {
|
||||
scenarios := map[string]struct {
|
||||
// Inputs
|
||||
podPVCs []*v1.PersistentVolumeClaim
|
||||
pvs []*v1.PersistentVolume
|
||||
bindings []*bindingInfo
|
||||
podPVCs []*v1.PersistentVolumeClaim
|
||||
pvs []*v1.PersistentVolume
|
||||
bindings []*bindingInfo
|
||||
provisionedPVCs []*v1.PersistentVolumeClaim
|
||||
|
||||
// Expected return values
|
||||
shouldFail bool
|
||||
@ -649,6 +883,21 @@ func TestAssumePodVolumes(t *testing.T) {
|
||||
shouldFail: true,
|
||||
expectedBindingRequired: true,
|
||||
},
|
||||
"one-binding, one-pvc-provisioned": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC},
|
||||
bindings: []*bindingInfo{binding1a},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedBindingRequired: true,
|
||||
},
|
||||
"one-binding, one-provision-tmpupdate-failed": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVCHigherVersion},
|
||||
bindings: []*bindingInfo{binding1a},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC2},
|
||||
shouldFail: true,
|
||||
expectedBindingRequired: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
@ -656,9 +905,9 @@ func TestAssumePodVolumes(t *testing.T) {
|
||||
|
||||
// Setup
|
||||
testEnv := newTestBinder(t)
|
||||
testEnv.initClaims(t, scenario.podPVCs)
|
||||
testEnv.initClaims(scenario.podPVCs, scenario.podPVCs)
|
||||
pod := makePod(scenario.podPVCs)
|
||||
testEnv.initPodCache(pod, "node1", scenario.bindings)
|
||||
testEnv.initPodCache(pod, "node1", scenario.bindings, scenario.provisionedPVCs)
|
||||
testEnv.initVolumes(scenario.pvs, scenario.pvs)
|
||||
|
||||
// Execute
|
||||
@ -681,9 +930,9 @@ func TestAssumePodVolumes(t *testing.T) {
|
||||
scenario.expectedBindings = scenario.bindings
|
||||
}
|
||||
if scenario.shouldFail {
|
||||
testEnv.validateFailedAssume(t, name, pod, scenario.expectedBindings)
|
||||
testEnv.validateFailedAssume(t, name, pod, scenario.expectedBindings, scenario.provisionedPVCs)
|
||||
} else {
|
||||
testEnv.validateAssume(t, name, pod, scenario.expectedBindings)
|
||||
testEnv.validateAssume(t, name, pod, scenario.expectedBindings, scenario.provisionedPVCs)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -696,11 +945,20 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
// if nil, use cachedPVs
|
||||
apiPVs []*v1.PersistentVolume
|
||||
|
||||
provisionedPVCs []*v1.PersistentVolumeClaim
|
||||
cachedPVCs []*v1.PersistentVolumeClaim
|
||||
// if nil, use cachedPVCs
|
||||
apiPVCs []*v1.PersistentVolumeClaim
|
||||
|
||||
// Expected return values
|
||||
shouldFail bool
|
||||
expectedPVs []*v1.PersistentVolume
|
||||
// if nil, use expectedPVs
|
||||
expectedAPIPVs []*v1.PersistentVolume
|
||||
|
||||
expectedPVCs []*v1.PersistentVolumeClaim
|
||||
// if nil, use expectedPVCs
|
||||
expectedAPIPVCs []*v1.PersistentVolumeClaim
|
||||
}{
|
||||
"all-bound": {},
|
||||
"not-fully-bound": {
|
||||
@ -724,6 +982,30 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
expectedAPIPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBoundHigherVersion},
|
||||
shouldFail: true,
|
||||
},
|
||||
"one-provisioned-pvc": {
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
|
||||
},
|
||||
"provision-api-update-failed": {
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), addProvisionAnn(provisionedPVC2)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC, provisionedPVC2},
|
||||
apiPVCs: []*v1.PersistentVolumeClaim{provisionedPVC, provisionedPVCHigherVersion},
|
||||
expectedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), provisionedPVC2},
|
||||
expectedAPIPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), provisionedPVCHigherVersion},
|
||||
shouldFail: true,
|
||||
},
|
||||
"bingding-succeed, provision-api-update-failed": {
|
||||
bindings: []*bindingInfo{binding1aBound},
|
||||
cachedPVs: []*v1.PersistentVolume{pvNode1a},
|
||||
expectedPVs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), addProvisionAnn(provisionedPVC2)},
|
||||
cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC, provisionedPVC2},
|
||||
apiPVCs: []*v1.PersistentVolumeClaim{provisionedPVC, provisionedPVCHigherVersion},
|
||||
expectedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), provisionedPVC2},
|
||||
expectedAPIPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), provisionedPVCHigherVersion},
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
for name, scenario := range scenarios {
|
||||
glog.V(5).Infof("Running test case %q", name)
|
||||
@ -734,8 +1016,12 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
if scenario.apiPVs == nil {
|
||||
scenario.apiPVs = scenario.cachedPVs
|
||||
}
|
||||
if scenario.apiPVCs == nil {
|
||||
scenario.apiPVCs = scenario.cachedPVCs
|
||||
}
|
||||
testEnv.initVolumes(scenario.cachedPVs, scenario.apiPVs)
|
||||
testEnv.assumeVolumes(t, name, "node1", pod, scenario.bindings)
|
||||
testEnv.initClaims(scenario.cachedPVCs, scenario.apiPVCs)
|
||||
testEnv.assumeVolumes(t, name, "node1", pod, scenario.bindings, scenario.provisionedPVCs)
|
||||
|
||||
// Execute
|
||||
err := testEnv.binder.BindPodVolumes(pod)
|
||||
@ -750,6 +1036,76 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
if scenario.expectedAPIPVs == nil {
|
||||
scenario.expectedAPIPVs = scenario.expectedPVs
|
||||
}
|
||||
if scenario.expectedAPIPVCs == nil {
|
||||
scenario.expectedAPIPVCs = scenario.expectedPVCs
|
||||
}
|
||||
testEnv.validateBind(t, name, pod, scenario.expectedPVs, scenario.expectedAPIPVs)
|
||||
testEnv.validateProvision(t, name, pod, scenario.expectedPVCs, scenario.expectedAPIPVCs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindAssumeVolumes(t *testing.T) {
|
||||
// Set feature gate
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
|
||||
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false")
|
||||
|
||||
// Test case
|
||||
podPVCs := []*v1.PersistentVolumeClaim{unboundPVC}
|
||||
pvs := []*v1.PersistentVolume{pvNode2, pvNode1a, pvNode1c}
|
||||
|
||||
// Setup
|
||||
testEnv := newTestBinder(t)
|
||||
testEnv.initVolumes(pvs, pvs)
|
||||
testEnv.initClaims(podPVCs, podPVCs)
|
||||
pod := makePod(podPVCs)
|
||||
|
||||
testNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "node1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Execute
|
||||
// 1. Find matching PVs
|
||||
unboundSatisfied, _, err := testEnv.binder.FindPodVolumes(pod, testNode)
|
||||
if err != nil {
|
||||
t.Errorf("Test failed: FindPodVolumes returned error: %v", err)
|
||||
}
|
||||
if !unboundSatisfied {
|
||||
t.Errorf("Test failed: couldn't find PVs for all PVCs")
|
||||
}
|
||||
expectedBindings := testEnv.getPodBindings(t, "before-assume", testNode.Name, pod)
|
||||
|
||||
// 2. Assume matches
|
||||
allBound, bindingRequired, err := testEnv.binder.AssumePodVolumes(pod, testNode.Name)
|
||||
if err != nil {
|
||||
t.Errorf("Test failed: AssumePodVolumes returned error: %v", err)
|
||||
}
|
||||
if allBound {
|
||||
t.Errorf("Test failed: detected unbound volumes as bound")
|
||||
}
|
||||
if !bindingRequired {
|
||||
t.Errorf("Test failed: binding not required")
|
||||
}
|
||||
testEnv.validateAssume(t, "assume", pod, expectedBindings, nil)
|
||||
// After assume, claimref should be set on pv
|
||||
expectedBindings = testEnv.getPodBindings(t, "after-assume", testNode.Name, pod)
|
||||
|
||||
// 3. Find matching PVs again
|
||||
// This should always return the original chosen pv
|
||||
// Run this many times in case sorting returns different orders for the two PVs.
|
||||
t.Logf("Testing FindPodVolumes after Assume")
|
||||
for i := 0; i < 50; i++ {
|
||||
unboundSatisfied, _, err := testEnv.binder.FindPodVolumes(pod, testNode)
|
||||
if err != nil {
|
||||
t.Errorf("Test failed: FindPodVolumes returned error: %v", err)
|
||||
}
|
||||
if !unboundSatisfied {
|
||||
t.Errorf("Test failed: couldn't find PVs for all PVCs")
|
||||
}
|
||||
testEnv.validatePodCache(t, "after-assume", testNode.Name, pod, expectedBindings, nil)
|
||||
}
|
||||
}
|
||||
|
20
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go
generated
vendored
@ -20,9 +20,11 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/util/io"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
@ -41,6 +43,10 @@ func (ctrl *PersistentVolumeController) GetVolumeDevicePluginDir(pluginName stri
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetPodsDir() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
|
||||
return ""
|
||||
}
|
||||
@ -89,18 +95,24 @@ func (ctrl *PersistentVolumeController) GetNodeAllocatable() (v1.ResourceList, e
|
||||
return v1.ResourceList{}, nil
|
||||
}
|
||||
|
||||
func (adc *PersistentVolumeController) GetSecretFunc() func(namespace, name string) (*v1.Secret, error) {
|
||||
func (ctrl *PersistentVolumeController) GetSecretFunc() func(namespace, name string) (*v1.Secret, error) {
|
||||
return func(_, _ string) (*v1.Secret, error) {
|
||||
return nil, fmt.Errorf("GetSecret unsupported in PersistentVolumeController")
|
||||
}
|
||||
}
|
||||
|
||||
func (adc *PersistentVolumeController) GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error) {
|
||||
func (ctrl *PersistentVolumeController) GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error) {
|
||||
return func(_, _ string) (*v1.ConfigMap, error) {
|
||||
return nil, fmt.Errorf("GetConfigMap unsupported in PersistentVolumeController")
|
||||
}
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetServiceAccountTokenFunc() func(_, _ string, _ *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
|
||||
return func(_, _ string, _ *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
|
||||
return nil, fmt.Errorf("GetServiceAccountToken unsupported in PersistentVolumeController")
|
||||
}
|
||||
}
|
||||
|
||||
func (adc *PersistentVolumeController) GetExec(pluginName string) mount.Exec {
|
||||
return mount.NewOsExec()
|
||||
}
|
||||
@ -112,3 +124,7 @@ func (ctrl *PersistentVolumeController) GetNodeLabels() (map[string]string, erro
|
||||
func (ctrl *PersistentVolumeController) GetNodeName() types.NodeName {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetEventRecorder() record.EventRecorder {
|
||||
return ctrl.eventRecorder
|
||||
}
|
||||
|
@ -49,13 +49,17 @@ type Controller struct {
|
||||
podListerSynced cache.InformerSynced
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
|
||||
// allows overriding of StorageObjectInUseProtection feature Enabled/Disabled for testing
|
||||
storageObjectInUseProtectionEnabled bool
|
||||
}
|
||||
|
||||
// NewPVCProtectionController returns a new *{VCProtectionController.
|
||||
func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface) *Controller {
|
||||
// NewPVCProtectionController returns a new instance of PVCProtectionController.
|
||||
func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface, storageObjectInUseProtectionFeatureEnabled bool) *Controller {
|
||||
e := &Controller{
|
||||
client: cl,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"),
|
||||
storageObjectInUseProtectionEnabled: storageObjectInUseProtectionFeatureEnabled,
|
||||
}
|
||||
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("persistentvolumeclaim_protection_controller", cl.CoreV1().RESTClient().GetRateLimiter())
|
||||
@ -141,7 +145,7 @@ func (c *Controller) processPVC(pvcNamespace, pvcName string) error {
|
||||
glog.V(4).Infof("Processing PVC %s/%s", pvcNamespace, pvcName)
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished processing PVC %s/%s (%v)", pvcNamespace, pvcName, time.Now().Sub(startTime))
|
||||
glog.V(4).Infof("Finished processing PVC %s/%s (%v)", pvcNamespace, pvcName, time.Since(startTime))
|
||||
}()
|
||||
|
||||
pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName)
|
||||
@ -176,6 +180,10 @@ func (c *Controller) processPVC(pvcNamespace, pvcName string) error {
|
||||
}
|
||||
|
||||
func (c *Controller) addFinalizer(pvc *v1.PersistentVolumeClaim) error {
|
||||
// Skip adding Finalizer in case the StorageObjectInUseProtection feature is not enabled
|
||||
if !c.storageObjectInUseProtectionEnabled {
|
||||
return nil
|
||||
}
|
||||
claimClone := pvc.DeepCopy()
|
||||
claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer)
|
||||
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(claimClone)
|
||||
|
@ -162,22 +162,31 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
deletedPod *v1.Pod
|
||||
// List of expected kubeclient actions that should happen during the
|
||||
// test.
|
||||
expectedActions []clienttesting.Action
|
||||
expectedActions []clienttesting.Action
|
||||
storageObjectInUseProtectionEnabled bool
|
||||
}{
|
||||
//
|
||||
// PVC events
|
||||
//
|
||||
{
|
||||
name: "PVC without finalizer -> finalizer is added",
|
||||
name: "StorageObjectInUseProtection Enabled, PVC without finalizer -> finalizer is added",
|
||||
updatedPVC: pvc(),
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewUpdateAction(pvcVer, defaultNS, withProtectionFinalizer(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "PVC with finalizer -> no action",
|
||||
updatedPVC: withProtectionFinalizer(pvc()),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
name: "StorageObjectInUseProtection Disabled, PVC without finalizer -> finalizer is added",
|
||||
updatedPVC: pvc(),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: false,
|
||||
},
|
||||
{
|
||||
name: "PVC with finalizer -> no action",
|
||||
updatedPVC: withProtectionFinalizer(pvc()),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "saving PVC finalizer fails -> controller retries",
|
||||
@ -197,13 +206,23 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
// This succeeds
|
||||
clienttesting.NewUpdateAction(pvcVer, defaultNS, withProtectionFinalizer(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "deleted PVC with finalizer -> finalizer is removed",
|
||||
name: "StorageObjectInUseProtection Enabled, deleted PVC with finalizer -> finalizer is removed",
|
||||
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "StorageObjectInUseProtection Disabled, deleted PVC with finalizer -> finalizer is removed",
|
||||
updatedPVC: deleted(withProtectionFinalizer(pvc())),
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: false,
|
||||
},
|
||||
{
|
||||
name: "finalizer removal fails -> controller retries",
|
||||
@ -223,6 +242,7 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
// Succeeds
|
||||
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "deleted PVC with finalizer + pods with the PVC exists -> finalizer is not removed",
|
||||
@ -241,9 +261,10 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "deleted PVC with finalizer + pods with the PVC andis finished -> finalizer is removed",
|
||||
name: "deleted PVC with finalizer + pods with the PVC and is finished -> finalizer is removed",
|
||||
initialObjects: []runtime.Object{
|
||||
withStatus(v1.PodFailed, withPVC(defaultPVCName, pod())),
|
||||
},
|
||||
@ -251,6 +272,7 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
//
|
||||
// Pod events
|
||||
@ -260,8 +282,9 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
initialObjects: []runtime.Object{
|
||||
deleted(withProtectionFinalizer(pvc())),
|
||||
},
|
||||
updatedPod: withStatus(v1.PodRunning, withPVC(defaultPVCName, pod())),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
updatedPod: withStatus(v1.PodRunning, withPVC(defaultPVCName, pod())),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "updated finished Pod -> finalizer is removed",
|
||||
@ -272,6 +295,7 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "updated unscheduled Pod -> finalizer is removed",
|
||||
@ -282,6 +306,7 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "deleted running Pod -> finalizer is removed",
|
||||
@ -292,6 +317,7 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewUpdateAction(pvcVer, defaultNS, deleted(pvc())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -330,7 +356,7 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the controller
|
||||
ctrl := NewPVCProtectionController(pvcInformer, podInformer, client)
|
||||
ctrl := NewPVCProtectionController(pvcInformer, podInformer, client, test.storageObjectInUseProtectionEnabled)
|
||||
|
||||
// Start the test by simulating an event
|
||||
if test.updatedPVC != nil {
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/controller/volume/pvprotection/pv_protection_controller.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/controller/volume/pvprotection/pv_protection_controller.go
generated
vendored
@ -45,13 +45,17 @@ type Controller struct {
|
||||
pvListerSynced cache.InformerSynced
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
|
||||
// allows overriding of StorageObjectInUseProtection feature Enabled/Disabled for testing
|
||||
storageObjectInUseProtectionEnabled bool
|
||||
}
|
||||
|
||||
// NewPVProtectionController returns a new *Controller.
|
||||
func NewPVProtectionController(pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface) *Controller {
|
||||
func NewPVProtectionController(pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface, storageObjectInUseProtectionFeatureEnabled bool) *Controller {
|
||||
e := &Controller{
|
||||
client: cl,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvprotection"),
|
||||
storageObjectInUseProtectionEnabled: storageObjectInUseProtectionFeatureEnabled,
|
||||
}
|
||||
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("persistentvolume_protection_controller", cl.CoreV1().RESTClient().GetRateLimiter())
|
||||
@ -119,7 +123,7 @@ func (c *Controller) processPV(pvName string) error {
|
||||
glog.V(4).Infof("Processing PV %s", pvName)
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished processing PV %s (%v)", pvName, time.Now().Sub(startTime))
|
||||
glog.V(4).Infof("Finished processing PV %s (%v)", pvName, time.Since(startTime))
|
||||
}()
|
||||
|
||||
pv, err := c.pvLister.Get(pvName)
|
||||
@ -151,6 +155,10 @@ func (c *Controller) processPV(pvName string) error {
|
||||
}
|
||||
|
||||
func (c *Controller) addFinalizer(pv *v1.PersistentVolume) error {
|
||||
// Skip adding Finalizer in case the StorageObjectInUseProtection feature is not enabled
|
||||
if !c.storageObjectInUseProtectionEnabled {
|
||||
return nil
|
||||
}
|
||||
pvClone := pv.DeepCopy()
|
||||
pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
|
||||
_, err := c.client.CoreV1().PersistentVolumes().Update(pvClone)
|
||||
|
@ -111,21 +111,30 @@ func TestPVProtectionController(t *testing.T) {
|
||||
updatedPV *v1.PersistentVolume
|
||||
// List of expected kubeclient actions that should happen during the
|
||||
// test.
|
||||
expectedActions []clienttesting.Action
|
||||
expectedActions []clienttesting.Action
|
||||
storageObjectInUseProtectionEnabled bool
|
||||
}{
|
||||
// PV events
|
||||
//
|
||||
{
|
||||
name: "PV without finalizer -> finalizer is added",
|
||||
name: "StorageObjectInUseProtection Enabled, PV without finalizer -> finalizer is added",
|
||||
updatedPV: pv(),
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewUpdateAction(pvVer, "", withProtectionFinalizer(pv())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "PVC with finalizer -> no action",
|
||||
updatedPV: withProtectionFinalizer(pv()),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
name: "StorageObjectInUseProtection Disabled, PV without finalizer -> finalizer is added",
|
||||
updatedPV: pv(),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: false,
|
||||
},
|
||||
{
|
||||
name: "PVC with finalizer -> no action",
|
||||
updatedPV: withProtectionFinalizer(pv()),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "saving PVC finalizer fails -> controller retries",
|
||||
@ -145,13 +154,23 @@ func TestPVProtectionController(t *testing.T) {
|
||||
// This succeeds
|
||||
clienttesting.NewUpdateAction(pvVer, "", withProtectionFinalizer(pv())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "deleted PV with finalizer -> finalizer is removed",
|
||||
name: "StorageObjectInUseProtection Enabled, deleted PV with finalizer -> finalizer is removed",
|
||||
updatedPV: deleted(withProtectionFinalizer(pv())),
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewUpdateAction(pvVer, "", deleted(pv())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "StorageObjectInUseProtection Disabled, deleted PV with finalizer -> finalizer is removed",
|
||||
updatedPV: deleted(withProtectionFinalizer(pv())),
|
||||
expectedActions: []clienttesting.Action{
|
||||
clienttesting.NewUpdateAction(pvVer, "", deleted(pv())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: false,
|
||||
},
|
||||
{
|
||||
name: "finalizer removal fails -> controller retries",
|
||||
@ -171,11 +190,13 @@ func TestPVProtectionController(t *testing.T) {
|
||||
// Succeeds
|
||||
clienttesting.NewUpdateAction(pvVer, "", deleted(pv())),
|
||||
},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
{
|
||||
name: "deleted PVC with finalizer + PV is bound -> finalizer is not removed",
|
||||
updatedPV: deleted(withProtectionFinalizer(boundPV())),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
name: "deleted PVC with finalizer + PV is bound -> finalizer is not removed",
|
||||
updatedPV: deleted(withProtectionFinalizer(boundPV())),
|
||||
expectedActions: []clienttesting.Action{},
|
||||
storageObjectInUseProtectionEnabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -209,7 +230,7 @@ func TestPVProtectionController(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the controller
|
||||
ctrl := NewPVProtectionController(pvInformer, client)
|
||||
ctrl := NewPVProtectionController(pvInformer, client, test.storageObjectInUseProtectionEnabled)
|
||||
|
||||
// Start the test by simulating an event
|
||||
if test.updatedPV != nil {
|
||||
|
Reference in New Issue
Block a user