Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -13,6 +13,7 @@ go_library(
"pv_controller.go",
"pv_controller_base.go",
"scheduler_assume_cache.go",
"scheduler_bind_cache_metrics.go",
"scheduler_binder.go",
"scheduler_binder_cache.go",
"scheduler_binder_fake.go",
@ -21,43 +22,44 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume",
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/volume/events:go_default_library",
"//pkg/controller/volume/persistentvolume/metrics:go_default_library",
"//pkg/features:go_default_library",
"//pkg/util/goroutinemap:go_default_library",
"//pkg/util/goroutinemap/exponentialbackoff:go_default_library",
"//pkg/util/io:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/recyclerclient:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/authentication/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/informers/storage/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/storage/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
"//staging/src/k8s.io/api/authentication/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/tools/reference:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -68,6 +70,7 @@ go_test(
"delete_test.go",
"framework_test.go",
"index_test.go",
"main_test.go",
"provision_test.go",
"pv_controller_test.go",
"recycle_test.go",
@ -80,31 +83,33 @@ go_test(
"//pkg/api/testapi:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/features:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/recyclerclient:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/storage/v1:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/tools/reference:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -3,3 +3,9 @@ approvers:
- saad-ali
- thockin
- msau42 # for volume scheduling
reviewers:
- jsafrane
- saad-ali
- thockin
- msau42 # for volume scheduling
- lichuqiang # for volume scheduling

View File

@ -23,6 +23,8 @@ import (
storage "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/kubernetes/pkg/features"
)
// Test single call to syncClaim and syncVolume methods.
@ -35,8 +37,6 @@ func TestSync(t *testing.T) {
"foo": "true",
"bar": "false",
}
modeBlock := v1.PersistentVolumeBlock
modeFile := v1.PersistentVolumeFilesystem
tests := []controllerTest{
// [Unit test set 1] User did not care which PV they get.
@ -45,7 +45,7 @@ func TestSync(t *testing.T) {
{
// syncClaim binds to a matching unbound volume.
"1-1 - successful bind",
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, nil),
newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
@ -54,8 +54,8 @@ func TestSync(t *testing.T) {
{
// syncClaim does not do anything when there is no matching volume.
"1-2 - noop",
newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim1-2", "uid1-2", "10Gi", "", v1.ClaimPending, nil),
newClaimArray("claim1-2", "uid1-2", "10Gi", "", v1.ClaimPending, nil),
[]string{"Normal FailedBinding"},
@ -65,8 +65,8 @@ func TestSync(t *testing.T) {
// syncClaim resets claim.Status to Pending when there is no
// matching volume.
"1-3 - reset to Pending",
newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim1-3", "uid1-3", "10Gi", "", v1.ClaimBound, nil),
newClaimArray("claim1-3", "uid1-3", "10Gi", "", v1.ClaimPending, nil),
[]string{"Normal FailedBinding"},
@ -76,11 +76,11 @@ func TestSync(t *testing.T) {
// syncClaim binds claims to the smallest matching volume
"1-4 - smallest volume",
[]*v1.PersistentVolume{
newVolume("volume1-4_1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-4_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-4_1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-4_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
},
[]*v1.PersistentVolume{
newVolume("volume1-4_1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-4_1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-4_2", "1Gi", "uid1-4", "claim1-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
},
newClaimArray("claim1-4", "uid1-4", "1Gi", "", v1.ClaimPending, nil),
@ -92,12 +92,12 @@ func TestSync(t *testing.T) {
// name), even though a smaller one is available.
"1-5 - prebound volume by name - success",
[]*v1.PersistentVolume{
newVolume("volume1-5_1", "10Gi", "", "claim1-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-5_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-5_1", "10Gi", "", "claim1-5", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-5_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
},
[]*v1.PersistentVolume{
newVolume("volume1-5_1", "10Gi", "uid1-5", "claim1-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-5_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-5_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
},
newClaimArray("claim1-5", "uid1-5", "1Gi", "", v1.ClaimPending, nil),
withExpectedCapacity("10Gi", newClaimArray("claim1-5", "uid1-5", "1Gi", "volume1-5_1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
@ -108,12 +108,12 @@ func TestSync(t *testing.T) {
// UID), even though a smaller one is available.
"1-6 - prebound volume by UID - success",
[]*v1.PersistentVolume{
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-6_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-6_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
},
[]*v1.PersistentVolume{
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-6_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-6_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
},
newClaimArray("claim1-6", "uid1-6", "1Gi", "", v1.ClaimPending, nil),
withExpectedCapacity("10Gi", newClaimArray("claim1-6", "uid1-6", "1Gi", "volume1-6_1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
@ -123,8 +123,8 @@ func TestSync(t *testing.T) {
// syncClaim does not bind claim to a volume prebound to a claim with
// same name and different UID
"1-7 - prebound volume to different claim",
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim1-7", "uid1-7", "1Gi", "", v1.ClaimPending, nil),
newClaimArray("claim1-7", "uid1-7", "1Gi", "", v1.ClaimPending, nil),
[]string{"Normal FailedBinding"},
@ -134,7 +134,7 @@ func TestSync(t *testing.T) {
// syncClaim completes binding - simulates controller crash after
// PV.ClaimRef is saved
"1-8 - complete bind after crash - PV bound",
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newClaimArray("claim1-8", "uid1-8", "1Gi", "", v1.ClaimPending, nil),
newClaimArray("claim1-8", "uid1-8", "1Gi", "volume1-8", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
@ -163,7 +163,7 @@ func TestSync(t *testing.T) {
{
// syncClaim binds a claim only when the label selector matches the volume
"1-11 - bind when selector matches",
withLabels(labels, newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withLabels(labels, newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withLabels(labels, newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, nil)),
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
@ -172,8 +172,8 @@ func TestSync(t *testing.T) {
{
// syncClaim does not bind a claim when the label selector doesn't match
"1-12 - do not bind when selector does not match",
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, nil)),
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, nil)),
[]string{"Normal FailedBinding"},
@ -182,8 +182,8 @@ func TestSync(t *testing.T) {
{
// syncClaim does not do anything when binding is delayed
"1-13 - delayed binding",
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classWait),
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classWait),
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classWait),
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classWait),
newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, &classWait),
newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, &classWait),
[]string{"Normal WaitForFirstConsumer"},
@ -192,7 +192,7 @@ func TestSync(t *testing.T) {
{
// syncClaim binds when binding is delayed but PV is prebound to PVC
"1-14 - successful prebound PV",
newVolumeArray("volume1-1", "1Gi", "", "claim1-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classWait),
newVolumeArray("volume1-1", "1Gi", "", "claim1-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classWait),
newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classWait),
newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, &classWait),
newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", v1.ClaimBound, &classWait, annBoundByController, annBindCompleted),
@ -203,12 +203,12 @@ func TestSync(t *testing.T) {
// even if there is smaller volume available
"1-15 - successful prebound PVC",
[]*v1.PersistentVolume{
newVolume("volume1-15_1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-15_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-15_1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-15_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
},
[]*v1.PersistentVolume{
newVolume("volume1-15_1", "10Gi", "uid1-15", "claim1-15", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newVolume("volume1-15_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-15_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
},
newClaimArray("claim1-15", "uid1-15", "1Gi", "volume1-15_1", v1.ClaimPending, nil),
withExpectedCapacity("10Gi", newClaimArray("claim1-15", "uid1-15", "1Gi", "volume1-15_1", v1.ClaimBound, nil, annBindCompleted)),
@ -218,12 +218,35 @@ func TestSync(t *testing.T) {
// syncClaim does not bind pre-bound PVC to PV with different AccessMode
"1-16 - successful prebound PVC",
// PV has ReadWriteOnce
newVolumeArray("volume1-16", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-16", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-16", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-16", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
claimWithAccessMode([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, newClaimArray("claim1-16", "uid1-16", "1Gi", "volume1-16", v1.ClaimPending, nil)),
claimWithAccessMode([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, newClaimArray("claim1-16", "uid1-16", "1Gi", "volume1-16", v1.ClaimPending, nil)),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim does not bind PVC to non-available PV if it's not pre-bind
"1-17 - skip non-available PV if it's not pre-bind",
[]*v1.PersistentVolume{
newVolume("volume1-17-pending", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-17-failed", "1Gi", "", "", v1.VolumeFailed, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-17-released", "1Gi", "", "", v1.VolumeReleased, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-17-empty", "1Gi", "", "", "", v1.PersistentVolumeReclaimRetain, classEmpty),
},
[]*v1.PersistentVolume{
newVolume("volume1-17-pending", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-17-failed", "1Gi", "", "", v1.VolumeFailed, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-17-released", "1Gi", "", "", v1.VolumeReleased, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-17-empty", "1Gi", "", "", "", v1.PersistentVolumeReclaimRetain, classEmpty),
},
[]*v1.PersistentVolumeClaim{
newClaim("claim1-17", "uid1-17", "1Gi", "", v1.ClaimPending, nil),
},
[]*v1.PersistentVolumeClaim{
newClaim("claim1-17", "uid1-17", "1Gi", "", v1.ClaimPending, nil),
},
noevents, noerrors, testSyncClaim,
},
// [Unit test set 2] User asked for a specific PV.
// Test the binding when pv.ClaimRef is already set by controller or
@ -251,7 +274,7 @@ func TestSync(t *testing.T) {
// syncClaim with claim pre-bound to a PV that exists and is
// unbound. Check it gets bound and no annBoundByController is set.
"2-3 - claim prebound to unbound volume",
newVolumeArray("volume2-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-3", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", v1.ClaimPending, nil),
newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", v1.ClaimBound, nil, annBindCompleted),
@ -261,7 +284,7 @@ func TestSync(t *testing.T) {
// claim with claim pre-bound to a PV that is pre-bound to the claim
// by name. Check it gets bound and no annBoundByController is set.
"2-4 - claim prebound to prebound volume by name",
newVolumeArray("volume2-4", "1Gi", "", "claim2-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-4", "1Gi", "", "claim2-4", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-4", "1Gi", "uid2-4", "claim2-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim2-4", "uid2-4", "1Gi", "volume2-4", v1.ClaimPending, nil),
newClaimArray("claim2-4", "uid2-4", "1Gi", "volume2-4", v1.ClaimBound, nil, annBindCompleted),
@ -272,7 +295,7 @@ func TestSync(t *testing.T) {
// claim by UID. Check it gets bound and no annBoundByController is
// set.
"2-5 - claim prebound to prebound volume by UID",
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim2-5", "uid2-5", "1Gi", "volume2-5", v1.ClaimPending, nil),
newClaimArray("claim2-5", "uid2-5", "1Gi", "volume2-5", v1.ClaimBound, nil, annBindCompleted),
@ -303,7 +326,7 @@ func TestSync(t *testing.T) {
// unbound, but does not match the selector. Check it gets bound
// and no annBoundByController is set.
"2-8 - claim prebound to unbound volume that does not match the selector",
newVolumeArray("volume2-8", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-8", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-8", "1Gi", "uid2-8", "claim2-8", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
withLabelSelector(labels, newClaimArray("claim2-8", "uid2-8", "1Gi", "volume2-8", v1.ClaimPending, nil)),
withLabelSelector(labels, newClaimArray("claim2-8", "uid2-8", "1Gi", "volume2-8", v1.ClaimBound, nil, annBindCompleted)),
@ -314,8 +337,8 @@ func TestSync(t *testing.T) {
// unbound, but its size is smaller than requested.
//Check that the claim status is reset to Pending
"2-9 - claim prebound to unbound volume that size is smaller than requested",
newVolumeArray("volume2-9", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-9", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-9", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-9", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim2-9", "uid2-9", "2Gi", "volume2-9", v1.ClaimBound, nil),
newClaimArray("claim2-9", "uid2-9", "2Gi", "volume2-9", v1.ClaimPending, nil),
[]string{"Warning VolumeMismatch"}, noerrors, testSyncClaim,
@ -324,8 +347,8 @@ func TestSync(t *testing.T) {
// syncClaim with claim pre-bound to a PV that exists and is
// unbound, but its class does not match. Check that the claim status is reset to Pending
"2-10 - claim prebound to unbound volume that class is different",
newVolumeArray("volume2-10", "1Gi", "1", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
newVolumeArray("volume2-10", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
newVolumeArray("volume2-10", "1Gi", "1", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold),
newVolumeArray("volume2-10", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold),
newClaimArray("claim2-10", "uid2-10", "1Gi", "volume2-10", v1.ClaimBound, nil),
newClaimArray("claim2-10", "uid2-10", "1Gi", "volume2-10", v1.ClaimPending, nil),
[]string{"Warning VolumeMismatch"}, noerrors, testSyncClaim,
@ -356,7 +379,7 @@ func TestSync(t *testing.T) {
// syncClaim with claim bound to unbound volume. Check it's bound.
// Also check that Pending phase is set to Bound
"3-3 - bound claim with unbound volume",
newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimPending, nil, annBoundByController, annBindCompleted),
newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
@ -366,8 +389,8 @@ func TestSync(t *testing.T) {
// syncClaim with claim bound to volume with missing (or different)
// volume.Spec.ClaimRef.UID. Check that the claim is marked as lost.
"3-4 - bound claim with prebound volume",
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", v1.ClaimPending, nil, annBoundByController, annBindCompleted),
newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", v1.ClaimLost, nil, annBoundByController, annBindCompleted),
[]string{"Warning ClaimMisbound"}, noerrors, testSyncClaim,
@ -377,7 +400,7 @@ func TestSync(t *testing.T) {
// controller does not do anything. Also check that Pending phase is
// set to Bound
"3-5 - bound claim with bound volume",
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", v1.ClaimPending, nil, annBindCompleted),
newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", v1.ClaimBound, nil, annBindCompleted),
@ -388,8 +411,8 @@ func TestSync(t *testing.T) {
// claim. Check that the claim is marked as lost.
// TODO: test that an event is emitted
"3-6 - bound claim with bound volume",
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", v1.ClaimPending, nil, annBindCompleted),
newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", v1.ClaimLost, nil, annBindCompleted),
[]string{"Warning ClaimMisbound"}, noerrors, testSyncClaim,
@ -399,7 +422,7 @@ func TestSync(t *testing.T) {
// even if the claim's selector doesn't match the volume. Also
// check that Pending phase is set to Bound
"3-7 - bound claim with unbound volume where selector doesn't match",
newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimPending, nil, annBoundByController, annBindCompleted)),
withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
@ -409,7 +432,7 @@ func TestSync(t *testing.T) {
{
// syncVolume with pending volume. Check it's marked as Available.
"4-1 - pending volume",
newVolumeArray("volume4-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume4-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume4-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
noclaims,
noclaims,
@ -419,7 +442,7 @@ func TestSync(t *testing.T) {
// syncVolume with prebound pending volume. Check it's marked as
// Available.
"4-2 - pending prebound volume",
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
noclaims,
noclaims,
@ -503,11 +526,11 @@ func TestSync(t *testing.T) {
// smaller PV available
"13-1 - binding to class",
[]*v1.PersistentVolume{
newVolume("volume13-1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume13-1-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
newVolume("volume13-1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume13-1-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold),
},
[]*v1.PersistentVolume{
newVolume("volume13-1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume13-1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume13-1-2", "10Gi", "uid13-1", "claim13-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classGold, annBoundByController),
},
newClaimArray("claim13-1", "uid13-1", "1Gi", "", v1.ClaimPending, &classGold),
@ -519,11 +542,11 @@ func TestSync(t *testing.T) {
// smaller PV with a class available
"13-2 - binding without a class",
[]*v1.PersistentVolume{
newVolume("volume13-2-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
newVolume("volume13-2-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume13-2-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold),
newVolume("volume13-2-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
},
[]*v1.PersistentVolume{
newVolume("volume13-2-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
newVolume("volume13-2-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold),
newVolume("volume13-2-2", "10Gi", "uid13-2", "claim13-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
},
newClaimArray("claim13-2", "uid13-2", "1Gi", "", v1.ClaimPending, nil),
@ -535,11 +558,11 @@ func TestSync(t *testing.T) {
// smaller PV with different class available
"13-3 - binding to specific a class",
[]*v1.PersistentVolume{
newVolume("volume13-3-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classSilver),
newVolume("volume13-3-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
newVolume("volume13-3-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classSilver),
newVolume("volume13-3-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold),
},
[]*v1.PersistentVolume{
newVolume("volume13-3-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classSilver),
newVolume("volume13-3-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classSilver),
newVolume("volume13-3-2", "10Gi", "uid13-3", "claim13-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classGold, annBoundByController),
},
newClaimArray("claim13-3", "uid13-3", "1Gi", "", v1.ClaimPending, &classGold),
@ -550,7 +573,7 @@ func TestSync(t *testing.T) {
// syncVolume binds claim requesting class "" to claim to PV with
// class=""
"13-4 - empty class",
newVolumeArray("volume13-4", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume13-4", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume13-4", "1Gi", "uid13-4", "claim13-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newClaimArray("claim13-4", "uid13-4", "1Gi", "", v1.ClaimPending, &classEmpty),
newClaimArray("claim13-4", "uid13-4", "1Gi", "volume13-4", v1.ClaimBound, &classEmpty, annBoundByController, annBindCompleted),
@ -560,65 +583,14 @@ func TestSync(t *testing.T) {
// syncVolume binds claim requesting class nil to claim to PV with
// class = ""
"13-5 - nil class",
newVolumeArray("volume13-5", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume13-5", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume13-5", "1Gi", "uid13-5", "claim13-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newClaimArray("claim13-5", "uid13-5", "1Gi", "", v1.ClaimPending, nil),
newClaimArray("claim13-5", "uid13-5", "1Gi", "volume13-5", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
// All of these should bind as feature set is not enabled for BlockVolume
// meaning volumeMode will be ignored and dropped
{
// syncVolume binds a requested block claim to a block volume
"14-1 - binding to volumeMode block",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "uid14-1", "claim14-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "volume14-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds a requested filesystem claim to a filesystem volume
"14-2 - binding to volumeMode filesystem",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "uid14-2", "claim14-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "volume14-2", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds an unspecified volumemode for claim to a specified filesystem volume
"14-3 - binding to volumeMode filesystem using default for claim",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "uid14-3", "claim14-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(nil, newClaimArray("claim14-3", "uid14-3", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(nil, newClaimArray("claim14-3", "uid14-3", "10Gi", "volume14-3", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds a requested filesystem claim to an unspecified volumeMode for volume
"14-4 - binding to unspecified volumeMode using requested filesystem for claim",
withVolumeVolumeMode(nil, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(nil, newVolumeArray("volume14-4", "10Gi", "uid14-4", "claim14-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "volume14-4", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds a requested filesystem claim to an unspecified volumeMode for volume
"14-5 - binding different volumeModes should be ignored",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "uid14-5", "claim14-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5", "uid14-5", "10Gi", "volume14-5", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
}
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false")
runSyncTests(t, tests, []*storage.StorageClass{
{
ObjectMeta: metav1.ObjectMeta{Name: classWait},
@ -627,7 +599,69 @@ func TestSync(t *testing.T) {
}, []*v1.Pod{})
}
func TestSyncAlphaBlockVolume(t *testing.T) {
func TestSyncBlockVolumeDisabled(t *testing.T) {
modeBlock := v1.PersistentVolumeBlock
modeFile := v1.PersistentVolumeFilesystem
// All of these should bind as feature set is not enabled for BlockVolume
// meaning volumeMode will be ignored and dropped
tests := []controllerTest{
{
// syncVolume binds a requested block claim to a block volume
"14-1 - binding to volumeMode block",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "uid14-1", "claim14-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "volume14-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds a requested filesystem claim to a filesystem volume
"14-2 - binding to volumeMode filesystem",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "uid14-2", "claim14-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "volume14-2", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds an unspecified volumemode for claim to a specified filesystem volume
"14-3 - binding to volumeMode filesystem using default for claim",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "uid14-3", "claim14-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(nil, newClaimArray("claim14-3", "uid14-3", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(nil, newClaimArray("claim14-3", "uid14-3", "10Gi", "volume14-3", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds a requested filesystem claim to an unspecified volumeMode for volume
"14-4 - binding to unspecified volumeMode using requested filesystem for claim",
withVolumeVolumeMode(nil, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(nil, newVolumeArray("volume14-4", "10Gi", "uid14-4", "claim14-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "volume14-4", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds a requested filesystem claim to an unspecified volumeMode for volume
"14-5 - binding different volumeModes should be ignored",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "uid14-5", "claim14-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5", "uid14-5", "10Gi", "volume14-5", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
}
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, false)()
runSyncTests(t, tests, []*storage.StorageClass{
{
ObjectMeta: metav1.ObjectMeta{Name: classWait},
VolumeBindingMode: &modeWait,
},
}, []*v1.Pod{})
}
func TestSyncBlockVolume(t *testing.T) {
modeBlock := v1.PersistentVolumeBlock
modeFile := v1.PersistentVolumeFilesystem
@ -637,7 +671,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// syncVolume binds a requested block claim to a block volume
"14-1 - binding to volumeMode block",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "uid14-1", "claim14-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "volume14-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
@ -646,7 +680,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// syncVolume binds a requested filesystem claim to a filesystem volume
"14-2 - binding to volumeMode filesystem",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "uid14-2", "claim14-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "volume14-2", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
@ -655,8 +689,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// failed syncVolume do not bind to an unspecified volumemode for claim to a specified filesystem volume
"14-3 - do not bind pv volumeMode filesystem and pvc volumeMode block",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-3", "uid14-3", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-3", "uid14-3", "10Gi", "", v1.ClaimPending, nil)),
[]string{"Normal FailedBinding"},
@ -665,8 +699,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// failed syncVolume do not bind a requested filesystem claim to an unspecified volumeMode for volume
"14-4 - do not bind pv volumeMode block and pvc volumeMode filesystem",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "", v1.ClaimPending, nil)),
[]string{"Normal FailedBinding"},
@ -675,8 +709,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// failed syncVolume do not bind when matching class but not matching volumeModes
"14-5 - do not bind when matching class but not volumeMode",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, &classGold)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, &classGold)),
[]string{"Warning ProvisioningFailed"},
@ -685,8 +719,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// failed syncVolume do not bind when matching volumeModes but class does not match
"14-5-1 - do not bind when matching volumeModes but class does not match",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-1", "uid14-5-1", "10Gi", "", v1.ClaimPending, &classSilver)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-1", "uid14-5-1", "10Gi", "", v1.ClaimPending, &classSilver)),
[]string{"Warning ProvisioningFailed"},
@ -695,8 +729,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// failed syncVolume do not bind when pvc is prebound to pv with matching volumeModes but class does not match
"14-5-2 - do not bind when pvc is prebound to pv with matching volumeModes but class does not match",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-2", "uid14-5-2", "10Gi", "volume14-5-2", v1.ClaimPending, &classSilver)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-2", "uid14-5-2", "10Gi", "volume14-5-2", v1.ClaimPending, &classSilver)),
[]string{"Warning VolumeMismatch"},
@ -705,7 +739,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// syncVolume bind when pv is prebound and volumeModes match
"14-7 - bind when pv volume is prebound and volumeModes match",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-7", "10Gi", "", "claim14-7", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-7", "10Gi", "", "claim14-7", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-7", "10Gi", "uid14-7", "claim14-7", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-7", "uid14-7", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-7", "uid14-7", "10Gi", "volume14-7", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
@ -714,8 +748,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// failed syncVolume do not bind when pvc is prebound to pv with mismatching volumeModes
"14-8 - do not bind when pvc is prebound to pv with mismatching volumeModes",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-8", "uid14-8", "10Gi", "volume14-8", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-8", "uid14-8", "10Gi", "volume14-8", v1.ClaimPending, nil)),
[]string{"Warning VolumeMismatch"},
@ -724,8 +758,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// failed syncVolume do not bind when pvc is prebound to pv with mismatching volumeModes
"14-8-1 - do not bind when pvc is prebound to pv with mismatching volumeModes",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-8-1", "uid14-8-1", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-8-1", "uid14-8-1", "10Gi", "", v1.ClaimPending, nil)),
[]string{"Normal FailedBinding"},
@ -734,7 +768,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// syncVolume binds when pvc is prebound to pv with matching volumeModes block
"14-9 - bind when pvc is prebound to pv with matching volumeModes block",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-9", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-9", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-9", "10Gi", "uid14-9", "claim14-9", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-9", "uid14-9", "10Gi", "volume14-9", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-9", "uid14-9", "10Gi", "volume14-9", v1.ClaimBound, nil, annBindCompleted)),
@ -743,7 +777,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// syncVolume binds when pv is prebound to pvc with matching volumeModes block
"14-10 - bind when pv is prebound to pvc with matching volumeModes block",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-10", "10Gi", "", "claim14-10", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-10", "10Gi", "", "claim14-10", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-10", "10Gi", "uid14-10", "claim14-10", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-10", "uid14-10", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-10", "uid14-10", "10Gi", "volume14-10", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
@ -752,7 +786,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// syncVolume binds when pvc is prebound to pv with matching volumeModes filesystem
"14-11 - bind when pvc is prebound to pv with matching volumeModes filesystem",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-11", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-11", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-11", "10Gi", "uid14-11", "claim14-11", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-11", "uid14-11", "10Gi", "volume14-11", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-11", "uid14-11", "10Gi", "volume14-11", v1.ClaimBound, nil, annBindCompleted)),
@ -761,7 +795,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// syncVolume binds when pv is prebound to pvc with matching volumeModes filesystem
"14-12 - bind when pv is prebound to pvc with matching volumeModes filesystem",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-12", "10Gi", "", "claim14-12", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-12", "10Gi", "", "claim14-12", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-12", "10Gi", "uid14-12", "claim14-12", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-12", "uid14-12", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-12", "uid14-12", "10Gi", "volume14-12", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
@ -770,8 +804,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// syncVolume output warning when pv is prebound to pvc with mismatching volumeMode
"14-13 - output warning when pv is prebound to pvc with different volumeModes",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-13", "10Gi", "uid14-13", "claim14-13", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-13", "10Gi", "uid14-13", "claim14-13", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-13", "10Gi", "uid14-13", "claim14-13", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-13", "10Gi", "uid14-13", "claim14-13", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-13", "uid14-13", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-13", "uid14-13", "10Gi", "", v1.ClaimPending, nil)),
[]string{"Warning VolumeMismatch"},
@ -780,8 +814,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// syncVolume output warning when pv is prebound to pvc with mismatching volumeMode
"14-13-1 - output warning when pv is prebound to pvc with different volumeModes",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-13-1", "10Gi", "uid14-13-1", "claim14-13-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-13-1", "10Gi", "uid14-13-1", "claim14-13-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-13-1", "10Gi", "uid14-13-1", "claim14-13-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-13-1", "10Gi", "uid14-13-1", "claim14-13-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-13-1", "uid14-13-1", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-13-1", "uid14-13-1", "10Gi", "", v1.ClaimPending, nil)),
[]string{"Warning VolumeMismatch"},
@ -790,8 +824,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// syncVolume waits for synClaim without warning when pv is prebound to pvc with matching volumeMode block
"14-14 - wait for synClaim without warning when pv is prebound to pvc with matching volumeModes block",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-14", "10Gi", "uid14-14", "claim14-14", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-14", "10Gi", "uid14-14", "claim14-14", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-14", "10Gi", "uid14-14", "claim14-14", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-14", "10Gi", "uid14-14", "claim14-14", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-14", "uid14-14", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-14", "uid14-14", "10Gi", "", v1.ClaimPending, nil)),
noevents, noerrors, testSyncVolume,
@ -799,20 +833,15 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
{
// syncVolume waits for synClaim without warning when pv is prebound to pvc with matching volumeMode file
"14-14-1 - wait for synClaim without warning when pv is prebound to pvc with matching volumeModes file",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-14-1", "10Gi", "uid14-14-1", "claim14-14-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-14-1", "10Gi", "uid14-14-1", "claim14-14-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-14-1", "10Gi", "uid14-14-1", "claim14-14-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-14-1", "10Gi", "uid14-14-1", "claim14-14-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-14-1", "uid14-14-1", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-14-1", "uid14-14-1", "10Gi", "", v1.ClaimPending, nil)),
noevents, noerrors, testSyncVolume,
},
}
err := utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
if err != nil {
t.Errorf("Failed to enable feature gate for BlockVolume: %v", err)
return
}
defer utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
runSyncTests(t, tests, []*storage.StorageClass{}, []*v1.Pod{})
}

View File

@ -27,7 +27,7 @@ import (
"testing"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
@ -38,6 +38,7 @@ import (
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
@ -48,6 +49,7 @@ import (
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/features"
vol "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
)
@ -159,7 +161,7 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
r.lock.Lock()
defer r.lock.Unlock()
glog.V(4).Infof("reactor got operation %q on %q", action.GetVerb(), action.GetResource())
klog.V(4).Infof("reactor got operation %q on %q", action.GetVerb(), action.GetResource())
// Inject error when requested
err = r.injectReactError(action)
@ -179,11 +181,17 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
return true, nil, fmt.Errorf("Cannot create volume %s: volume already exists", volume.Name)
}
// mimic apiserver defaulting
if volume.Spec.VolumeMode == nil && utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
volume.Spec.VolumeMode = new(v1.PersistentVolumeMode)
*volume.Spec.VolumeMode = v1.PersistentVolumeFilesystem
}
// Store the updated object to appropriate places.
r.volumes[volume.Name] = volume
r.changedObjects = append(r.changedObjects, volume)
r.changedSinceLastSync++
glog.V(4).Infof("created volume %s", volume.Name)
klog.V(4).Infof("created volume %s", volume.Name)
return true, volume, nil
case action.Matches("update", "persistentvolumes"):
@ -209,7 +217,7 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
r.volumes[volume.Name] = volume
r.changedObjects = append(r.changedObjects, volume)
r.changedSinceLastSync++
glog.V(4).Infof("saved updated volume %s", volume.Name)
klog.V(4).Infof("saved updated volume %s", volume.Name)
return true, volume, nil
case action.Matches("update", "persistentvolumeclaims"):
@ -235,23 +243,23 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
r.claims[claim.Name] = claim
r.changedObjects = append(r.changedObjects, claim)
r.changedSinceLastSync++
glog.V(4).Infof("saved updated claim %s", claim.Name)
klog.V(4).Infof("saved updated claim %s", claim.Name)
return true, claim, nil
case action.Matches("get", "persistentvolumes"):
name := action.(core.GetAction).GetName()
volume, found := r.volumes[name]
if found {
glog.V(4).Infof("GetVolume: found %s", volume.Name)
klog.V(4).Infof("GetVolume: found %s", volume.Name)
return true, volume, nil
} else {
glog.V(4).Infof("GetVolume: volume %s not found", name)
klog.V(4).Infof("GetVolume: volume %s not found", name)
return true, nil, fmt.Errorf("Cannot find volume %s", name)
}
case action.Matches("delete", "persistentvolumes"):
name := action.(core.DeleteAction).GetName()
glog.V(4).Infof("deleted volume %s", name)
klog.V(4).Infof("deleted volume %s", name)
_, found := r.volumes[name]
if found {
delete(r.volumes, name)
@ -263,7 +271,7 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
case action.Matches("delete", "persistentvolumeclaims"):
name := action.(core.DeleteAction).GetName()
glog.V(4).Infof("deleted claim %s", name)
klog.V(4).Infof("deleted claim %s", name)
_, found := r.volumes[name]
if found {
delete(r.claims, name)
@ -286,11 +294,11 @@ func (r *volumeReactor) injectReactError(action core.Action) error {
}
for i, expected := range r.errors {
glog.V(4).Infof("trying to match %q %q with %q %q", expected.verb, expected.resource, action.GetVerb(), action.GetResource())
klog.V(4).Infof("trying to match %q %q with %q %q", expected.verb, expected.resource, action.GetVerb(), action.GetResource())
if action.Matches(expected.verb, expected.resource) {
// That's the action we're waiting for, remove it from injectedErrors
r.errors = append(r.errors[:i], r.errors[i+1:]...)
glog.V(4).Infof("reactor found matching error at index %d: %q %q, returning %v", i, expected.verb, expected.resource, expected.error)
klog.V(4).Infof("reactor found matching error at index %d: %q %q, returning %v", i, expected.verb, expected.resource, expected.error)
return expected.error
}
}
@ -379,14 +387,14 @@ func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeCo
select {
case event, ok := <-fakeRecorder.Events:
if ok {
glog.V(5).Infof("event recorder got event %s", event)
klog.V(5).Infof("event recorder got event %s", event)
gotEvents = append(gotEvents, event)
} else {
glog.V(5).Infof("event recorder finished")
klog.V(5).Infof("event recorder finished")
finished = true
}
case _, _ = <-timer.C:
glog.V(5).Infof("event recorder timeout")
klog.V(5).Infof("event recorder timeout")
finished = true
}
}
@ -426,10 +434,10 @@ func (r *volumeReactor) popChange() interface{} {
switch obj.(type) {
case *v1.PersistentVolume:
vol, _ := obj.(*v1.PersistentVolume)
glog.V(4).Infof("reactor queue: %s", vol.Name)
klog.V(4).Infof("reactor queue: %s", vol.Name)
case *v1.PersistentVolumeClaim:
claim, _ := obj.(*v1.PersistentVolumeClaim)
glog.V(4).Infof("reactor queue: %s", claim.Name)
klog.V(4).Infof("reactor queue: %s", claim.Name)
}
}
@ -630,6 +638,7 @@ func newTestController(kubeClient clientset.Interface, informerFactory informers
// newVolume returns a new volume with given attributes
func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase v1.PersistentVolumePhase, reclaimPolicy v1.PersistentVolumeReclaimPolicy, class string, annotations ...string) *v1.PersistentVolume {
fs := v1.PersistentVolumeFilesystem
volume := v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: name,
@ -645,6 +654,7 @@ func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase v
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany},
PersistentVolumeReclaimPolicy: reclaimPolicy,
StorageClassName: class,
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: phase,
@ -740,6 +750,7 @@ func newVolumeArray(name, capacity, boundToClaimUID, boundToClaimName string, ph
// newClaim returns a new claim with given attributes
func newClaim(name, claimUID, capacity, boundToVolume string, phase v1.PersistentVolumeClaimPhase, class *string, annotations ...string) *v1.PersistentVolumeClaim {
fs := v1.PersistentVolumeFilesystem
claim := v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,
@ -756,6 +767,7 @@ func newClaim(name, claimUID, capacity, boundToVolume string, phase v1.Persisten
},
VolumeName: boundToVolume,
StorageClassName: class,
VolumeMode: &fs,
},
Status: v1.PersistentVolumeClaimStatus{
Phase: phase,
@ -898,7 +910,7 @@ func wrapTestWithInjectedOperation(toWrap testCall, injectBeforeOperation func(c
// Inject a hook before async operation starts
ctrl.preOperationHook = func(operationName string) {
// Inside the hook, run the function to inject
glog.V(4).Infof("reactor: scheduleOperation reached, injecting call")
klog.V(4).Infof("reactor: scheduleOperation reached, injecting call")
injectBeforeOperation(ctrl, reactor)
}
@ -945,7 +957,7 @@ func evaluateTestResults(ctrl *PersistentVolumeController, reactor *volumeReacto
// 3. Compare resulting volumes and claims with expected volumes and claims.
func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) {
for _, test := range tests {
glog.V(4).Infof("starting test %q", test.name)
klog.V(4).Infof("starting test %q", test.name)
// Initialize the controller
client := &fake.Clientset{}
@ -1008,7 +1020,7 @@ func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storag
// Some limit of calls in enforced to prevent endless loops.
func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, defaultStorageClass string) {
for _, test := range tests {
glog.V(4).Infof("starting multisync test %q", test.name)
klog.V(4).Infof("starting multisync test %q", test.name)
// Initialize the controller
client := &fake.Clientset{}
@ -1046,7 +1058,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
counter := 0
for {
counter++
glog.V(4).Infof("test %q: iteration %d", test.name, counter)
klog.V(4).Infof("test %q: iteration %d", test.name, counter)
if counter > 100 {
t.Errorf("Test %q failed: too many iterations", test.name)
@ -1064,7 +1076,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
// Simulate "periodic sync" of everything (until it produces
// no changes).
firstSync = false
glog.V(4).Infof("test %q: simulating periodical sync of all claims and volumes", test.name)
klog.V(4).Infof("test %q: simulating periodical sync of all claims and volumes", test.name)
reactor.syncAll()
} else {
// Last sync did not produce any updates, the test reached
@ -1085,7 +1097,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
if err != nil {
if err == versionConflictError {
// Ignore version errors
glog.V(4).Infof("test intentionaly ignores version error.")
klog.V(4).Infof("test intentionaly ignores version error.")
} else {
t.Errorf("Error calling syncClaim: %v", err)
// Finish the loop on the first error
@ -1102,7 +1114,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
if err != nil {
if err == versionConflictError {
// Ignore version errors
glog.V(4).Infof("test intentionaly ignores version error.")
klog.V(4).Infof("test intentionaly ignores version error.")
} else {
t.Errorf("Error calling syncVolume: %v", err)
// Finish the loop on the first error
@ -1114,7 +1126,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
}
}
evaluateTestResults(ctrl, reactor, test, t)
glog.V(4).Infof("test %q finished after %d iterations", test.name, counter)
klog.V(4).Infof("test %q finished after %d iterations", test.name, counter)
}
}
@ -1185,7 +1197,7 @@ func (plugin *mockVolumePlugin) NewUnmounter(name string, podUID types.UID) (vol
func (plugin *mockVolumePlugin) NewProvisioner(options vol.VolumeOptions) (vol.Provisioner, error) {
if len(plugin.provisionCalls) > 0 {
// mockVolumePlugin directly implements Provisioner interface
glog.V(4).Infof("mock plugin NewProvisioner called, returning mock provisioner")
klog.V(4).Infof("mock plugin NewProvisioner called, returning mock provisioner")
plugin.provisionOptions = options
return plugin, nil
} else {
@ -1201,7 +1213,7 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi
var pv *v1.PersistentVolume
call := plugin.provisionCalls[plugin.provisionCallCounter]
if !reflect.DeepEqual(call.expectedParameters, plugin.provisionOptions.Parameters) {
glog.Errorf("invalid provisioner call, expected options: %+v, got: %+v", call.expectedParameters, plugin.provisionOptions.Parameters)
klog.Errorf("invalid provisioner call, expected options: %+v, got: %+v", call.expectedParameters, plugin.provisionOptions.Parameters)
return nil, fmt.Errorf("Mock plugin error: invalid provisioner call")
}
if call.ret == nil {
@ -1222,11 +1234,15 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},
},
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
}
pv.Spec.VolumeMode = plugin.provisionOptions.PVC.Spec.VolumeMode
}
plugin.provisionCallCounter++
glog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, call.ret)
klog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, call.ret)
return pv, call.ret
}
@ -1235,7 +1251,7 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi
func (plugin *mockVolumePlugin) NewDeleter(spec *vol.Spec) (vol.Deleter, error) {
if len(plugin.deleteCalls) > 0 {
// mockVolumePlugin directly implements Deleter interface
glog.V(4).Infof("mock plugin NewDeleter called, returning mock deleter")
klog.V(4).Infof("mock plugin NewDeleter called, returning mock deleter")
return plugin, nil
} else {
return nil, fmt.Errorf("Mock plugin error: no deleteCalls configured")
@ -1248,7 +1264,7 @@ func (plugin *mockVolumePlugin) Delete() error {
}
ret := plugin.deleteCalls[plugin.deleteCallCounter]
plugin.deleteCallCounter++
glog.V(4).Infof("mock plugin Delete call nr. %d, returning %v", plugin.deleteCallCounter, ret)
klog.V(4).Infof("mock plugin Delete call nr. %d, returning %v", plugin.deleteCallCounter, ret)
return ret
}
@ -1274,6 +1290,6 @@ func (plugin *mockVolumePlugin) Recycle(pvName string, spec *vol.Spec, eventReco
}
ret := plugin.recycleCalls[plugin.recycleCallCounter]
plugin.recycleCallCounter++
glog.V(4).Infof("mock plugin Recycle call nr. %d, returning %v", plugin.recycleCallCounter, ret)
klog.V(4).Infof("mock plugin Recycle call nr. %d, returning %v", plugin.recycleCallCounter, ret)
return ret
}

View File

@ -158,13 +158,13 @@ func findMatchingVolume(
volumeQty := volume.Spec.Capacity[v1.ResourceStorage]
// check if volumeModes do not match (Alpha and feature gate protected)
isMisMatch, err := checkVolumeModeMisMatches(&claim.Spec, &volume.Spec)
// check if volumeModes do not match (feature gate protected)
isMismatch, err := checkVolumeModeMismatches(&claim.Spec, &volume.Spec)
if err != nil {
return nil, fmt.Errorf("error checking if volumeMode was a mismatch: %v", err)
}
// filter out mismatching volumeModes
if isMisMatch {
if isMismatch {
continue
}
@ -211,11 +211,18 @@ func findMatchingVolume(
}
// filter out:
// - volumes in non-available phase
// - volumes bound to another claim
// - volumes whose labels don't match the claim's selector, if specified
// - volumes in Class that is not requested
// - volumes whose NodeAffinity does not match the node
if volume.Spec.ClaimRef != nil {
if volume.Status.Phase != v1.VolumeAvailable {
// We ignore volumes in non-available phase, because volumes that
// satisfies matching criteria will be updated to available, binding
// them now has high chance of encountering unnecessary failures
// due to API conflicts.
continue
} else if volume.Spec.ClaimRef != nil {
continue
} else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) {
continue
@ -251,25 +258,24 @@ func findMatchingVolume(
return nil, nil
}
// checkVolumeModeMatches is a convenience method that checks volumeMode for PersistentVolume
// and PersistentVolumeClaims along with making sure that the Alpha feature gate BlockVolume is
// enabled.
// This is Alpha and could change in the future.
func checkVolumeModeMisMatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) (bool, error) {
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
if pvSpec.VolumeMode != nil && pvcSpec.VolumeMode != nil {
requestedVolumeMode := *pvcSpec.VolumeMode
pvVolumeMode := *pvSpec.VolumeMode
return requestedVolumeMode != pvVolumeMode, nil
} else {
// This also should retrun an error, this means that
// the defaulting has failed.
return true, fmt.Errorf("api defaulting for volumeMode failed")
}
} else {
// feature gate is disabled
// checkVolumeModeMismatches is a convenience method that checks volumeMode for PersistentVolume
// and PersistentVolumeClaims
func checkVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) (bool, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
return false, nil
}
// In HA upgrades, we cannot guarantee that the apiserver is on a version >= controller-manager.
// So we default a nil volumeMode to filesystem
requestedVolumeMode := v1.PersistentVolumeFilesystem
if pvcSpec.VolumeMode != nil {
requestedVolumeMode = *pvcSpec.VolumeMode
}
pvVolumeMode := v1.PersistentVolumeFilesystem
if pvSpec.VolumeMode != nil {
pvVolumeMode = *pvSpec.VolumeMode
}
return requestedVolumeMode != pvVolumeMode, nil
}
// findBestMatchForClaim is a convenience method that finds a volume by the claim's AccessModes and requests for Storage

View File

@ -24,13 +24,16 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/client-go/kubernetes/scheme"
ref "k8s.io/client-go/tools/reference"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume/util"
)
func makePVC(size string, modfn func(*v1.PersistentVolumeClaim)) *v1.PersistentVolumeClaim {
fs := v1.PersistentVolumeFilesystem
pvc := v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claim01",
@ -43,6 +46,7 @@ func makePVC(size string, modfn func(*v1.PersistentVolumeClaim)) *v1.PersistentV
v1.ResourceName(v1.ResourceStorage): resource.MustParse(size),
},
},
VolumeMode: &fs,
},
}
if modfn != nil {
@ -195,6 +199,7 @@ func TestMatchVolume(t *testing.T) {
}
func TestMatchingWithBoundVolumes(t *testing.T) {
fs := v1.PersistentVolumeFilesystem
volumeIndex := newPersistentVolumeOrderedIndex()
// two similar volumes, one is bound
pv1 := &v1.PersistentVolume{
@ -211,7 +216,11 @@ func TestMatchingWithBoundVolumes(t *testing.T) {
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany},
// this one we're pretending is already bound
ClaimRef: &v1.ObjectReference{UID: "abc123"},
ClaimRef: &v1.ObjectReference{UID: "abc123"},
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeBound,
},
}
@ -228,6 +237,10 @@ func TestMatchingWithBoundVolumes(t *testing.T) {
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany},
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
}
@ -246,6 +259,7 @@ func TestMatchingWithBoundVolumes(t *testing.T) {
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1G"),
},
},
VolumeMode: &fs,
},
}
@ -320,6 +334,7 @@ func TestAllPossibleAccessModes(t *testing.T) {
}
func TestFindingVolumeWithDifferentAccessModes(t *testing.T) {
fs := v1.PersistentVolumeFilesystem
gce := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "gce"},
Spec: v1.PersistentVolumeSpec{
@ -329,6 +344,10 @@ func TestFindingVolumeWithDifferentAccessModes(t *testing.T) {
v1.ReadWriteOnce,
v1.ReadOnlyMany,
},
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
}
@ -340,6 +359,10 @@ func TestFindingVolumeWithDifferentAccessModes(t *testing.T) {
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
}
@ -353,6 +376,10 @@ func TestFindingVolumeWithDifferentAccessModes(t *testing.T) {
v1.ReadOnlyMany,
v1.ReadWriteMany,
},
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
}
@ -364,6 +391,7 @@ func TestFindingVolumeWithDifferentAccessModes(t *testing.T) {
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("1G")}},
VolumeMode: &fs,
},
}
@ -423,6 +451,7 @@ func TestFindingVolumeWithDifferentAccessModes(t *testing.T) {
}
func createTestVolumes() []*v1.PersistentVolume {
fs := v1.PersistentVolumeFilesystem
// these volumes are deliberately out-of-order to test indexing and sorting
return []*v1.PersistentVolume{
{
@ -441,6 +470,10 @@ func createTestVolumes() []*v1.PersistentVolume {
v1.ReadWriteOnce,
v1.ReadOnlyMany,
},
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
},
{
@ -460,7 +493,11 @@ func createTestVolumes() []*v1.PersistentVolume {
v1.ReadOnlyMany,
},
// this one we're pretending is already bound
ClaimRef: &v1.ObjectReference{UID: "def456"},
ClaimRef: &v1.ObjectReference{UID: "def456"},
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeBound,
},
},
{
@ -473,13 +510,17 @@ func createTestVolumes() []*v1.PersistentVolume {
v1.ResourceName(v1.ResourceStorage): resource.MustParse("5G"),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{},
Glusterfs: &v1.GlusterfsPersistentVolumeSource{},
},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
v1.ReadWriteMany,
},
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
},
{
@ -499,7 +540,11 @@ func createTestVolumes() []*v1.PersistentVolume {
v1.ReadOnlyMany,
},
// this one we're pretending is already bound
ClaimRef: &v1.ObjectReference{UID: "abc123"},
ClaimRef: &v1.ObjectReference{UID: "abc123"},
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeBound,
},
},
{
@ -512,13 +557,17 @@ func createTestVolumes() []*v1.PersistentVolume {
v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G"),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{},
Glusterfs: &v1.GlusterfsPersistentVolumeSource{},
},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
v1.ReadWriteMany,
},
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
},
{
@ -537,6 +586,10 @@ func createTestVolumes() []*v1.PersistentVolume {
v1.ReadWriteOnce,
v1.ReadOnlyMany,
},
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
},
{
@ -549,13 +602,17 @@ func createTestVolumes() []*v1.PersistentVolume {
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1G"),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{},
Glusterfs: &v1.GlusterfsPersistentVolumeSource{},
},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
v1.ReadWriteMany,
},
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
},
{
@ -576,6 +633,10 @@ func createTestVolumes() []*v1.PersistentVolume {
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
},
{
@ -597,6 +658,10 @@ func createTestVolumes() []*v1.PersistentVolume {
v1.ReadWriteOnce,
},
StorageClassName: classSilver,
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
},
{
@ -615,6 +680,10 @@ func createTestVolumes() []*v1.PersistentVolume {
v1.ReadWriteOnce,
},
StorageClassName: classSilver,
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
},
{
@ -633,6 +702,10 @@ func createTestVolumes() []*v1.PersistentVolume {
v1.ReadWriteOnce,
},
StorageClassName: classGold,
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
},
{
@ -653,6 +726,10 @@ func createTestVolumes() []*v1.PersistentVolume {
v1.ReadWriteMany,
},
StorageClassName: classLarge,
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
},
{
@ -673,6 +750,10 @@ func createTestVolumes() []*v1.PersistentVolume {
v1.ReadWriteMany,
},
StorageClassName: classLarge,
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
},
{
@ -693,6 +774,10 @@ func createTestVolumes() []*v1.PersistentVolume {
},
StorageClassName: classWait,
NodeAffinity: getVolumeNodeAffinity("key1", "value1"),
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
},
{
@ -713,6 +798,10 @@ func createTestVolumes() []*v1.PersistentVolume {
},
StorageClassName: classWait,
NodeAffinity: getVolumeNodeAffinity("key1", "value1"),
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
},
{
@ -734,6 +823,10 @@ func createTestVolumes() []*v1.PersistentVolume {
StorageClassName: classWait,
ClaimRef: &v1.ObjectReference{Name: "claim02", Namespace: "myns"},
NodeAffinity: getVolumeNodeAffinity("key1", "value1"),
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
},
{
@ -754,12 +847,110 @@ func createTestVolumes() []*v1.PersistentVolume {
},
StorageClassName: classWait,
NodeAffinity: getVolumeNodeAffinity("key1", "value3"),
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
},
{
ObjectMeta: metav1.ObjectMeta{
UID: "affinity-pv4-pending",
Name: "affinity004-pending",
},
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("200G"),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
Local: &v1.LocalVolumeSource{},
},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
},
StorageClassName: classWait,
NodeAffinity: getVolumeNodeAffinity("key1", "value4"),
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumePending,
},
},
{
ObjectMeta: metav1.ObjectMeta{
UID: "affinity-pv4-failed",
Name: "affinity004-failed",
},
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("200G"),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
Local: &v1.LocalVolumeSource{},
},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
},
StorageClassName: classWait,
NodeAffinity: getVolumeNodeAffinity("key1", "value4"),
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeFailed,
},
},
{
ObjectMeta: metav1.ObjectMeta{
UID: "affinity-pv4-released",
Name: "affinity004-released",
},
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("200G"),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
Local: &v1.LocalVolumeSource{},
},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
},
StorageClassName: classWait,
NodeAffinity: getVolumeNodeAffinity("key1", "value4"),
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeReleased,
},
},
{
ObjectMeta: metav1.ObjectMeta{
UID: "affinity-pv4-empty",
Name: "affinity004-empty",
},
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("200G"),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
Local: &v1.LocalVolumeSource{},
},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
},
StorageClassName: classWait,
NodeAffinity: getVolumeNodeAffinity("key1", "value4"),
VolumeMode: &fs,
},
},
}
}
func testVolume(name, size string) *v1.PersistentVolume {
fs := v1.PersistentVolumeFilesystem
return &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: name,
@ -769,6 +960,10 @@ func testVolume(name, size string) *v1.PersistentVolume {
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse(size)},
PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{}},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
}
}
@ -811,6 +1006,9 @@ func createVolumeModeBlockTestVolume() *v1.PersistentVolume {
},
VolumeMode: &blockMode,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
}
}
@ -834,6 +1032,32 @@ func createVolumeModeFilesystemTestVolume() *v1.PersistentVolume {
},
VolumeMode: &filesystemMode,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
}
}
func createVolumeModeNilTestVolume() *v1.PersistentVolume {
return &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
UID: "local-1",
Name: "nil-mode",
},
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G"),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
Local: &v1.LocalVolumeSource{},
},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
}
}
@ -843,28 +1067,7 @@ func createTestVolOrderedIndex(pv *v1.PersistentVolume) persistentVolumeOrderedI
return volFile
}
func toggleFeature(toggleFlag bool, featureName string, t *testing.T) {
var valueStr string
if toggleFlag {
// Enable feature
valueStr = featureName + "=true"
err := utilfeature.DefaultFeatureGate.Set(valueStr)
if err != nil {
t.Errorf("Failed to enable feature gate for %s: %v", featureName, err)
return
}
} else {
// Disable feature
valueStr = featureName + "=false"
err := utilfeature.DefaultFeatureGate.Set(valueStr)
if err != nil {
t.Errorf("Failed to disable feature gate for %s: %v", featureName, err)
return
}
}
}
func TestAlphaVolumeModeCheck(t *testing.T) {
func TestVolumeModeCheck(t *testing.T) {
blockMode := v1.PersistentVolumeBlock
filesystemMode := v1.PersistentVolumeFilesystem
@ -872,55 +1075,85 @@ func TestAlphaVolumeModeCheck(t *testing.T) {
// If feature gate is enabled, VolumeMode will always be defaulted
// If feature gate is disabled, VolumeMode is dropped by API and ignored
scenarios := map[string]struct {
isExpectedMisMatch bool
isExpectedMismatch bool
vol *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
enableBlock bool
}{
"feature enabled - pvc block and pv filesystem": {
isExpectedMisMatch: true,
isExpectedMismatch: true,
vol: createVolumeModeFilesystemTestVolume(),
pvc: makeVolumeModePVC("8G", &blockMode, nil),
enableBlock: true,
},
"feature enabled - pvc filesystem and pv block": {
isExpectedMisMatch: true,
isExpectedMismatch: true,
vol: createVolumeModeBlockTestVolume(),
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
enableBlock: true,
},
"feature enabled - pvc block and pv block": {
isExpectedMisMatch: false,
isExpectedMismatch: false,
vol: createVolumeModeBlockTestVolume(),
pvc: makeVolumeModePVC("8G", &blockMode, nil),
enableBlock: true,
},
"feature enabled - pvc filesystem and pv filesystem": {
isExpectedMisMatch: false,
isExpectedMismatch: false,
vol: createVolumeModeFilesystemTestVolume(),
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
enableBlock: true,
},
"feature enabled - pvc filesystem and pv nil": {
isExpectedMismatch: false,
vol: createVolumeModeNilTestVolume(),
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
enableBlock: true,
},
"feature enabled - pvc nil and pv filesytem": {
isExpectedMismatch: false,
vol: createVolumeModeFilesystemTestVolume(),
pvc: makeVolumeModePVC("8G", nil, nil),
enableBlock: true,
},
"feature enabled - pvc nil and pv nil": {
isExpectedMismatch: false,
vol: createVolumeModeNilTestVolume(),
pvc: makeVolumeModePVC("8G", nil, nil),
enableBlock: true,
},
"feature enabled - pvc nil and pv block": {
isExpectedMismatch: true,
vol: createVolumeModeBlockTestVolume(),
pvc: makeVolumeModePVC("8G", nil, nil),
enableBlock: true,
},
"feature enabled - pvc block and pv nil": {
isExpectedMismatch: true,
vol: createVolumeModeNilTestVolume(),
pvc: makeVolumeModePVC("8G", &blockMode, nil),
enableBlock: true,
},
"feature disabled - pvc block and pv filesystem": {
isExpectedMisMatch: false,
isExpectedMismatch: false,
vol: createVolumeModeFilesystemTestVolume(),
pvc: makeVolumeModePVC("8G", &blockMode, nil),
enableBlock: false,
},
"feature disabled - pvc filesystem and pv block": {
isExpectedMisMatch: false,
isExpectedMismatch: false,
vol: createVolumeModeBlockTestVolume(),
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
enableBlock: false,
},
"feature disabled - pvc block and pv block": {
isExpectedMisMatch: false,
isExpectedMismatch: false,
vol: createVolumeModeBlockTestVolume(),
pvc: makeVolumeModePVC("8G", &blockMode, nil),
enableBlock: false,
},
"feature disabled - pvc filesystem and pv filesystem": {
isExpectedMisMatch: false,
isExpectedMismatch: false,
vol: createVolumeModeFilesystemTestVolume(),
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
enableBlock: false,
@ -928,25 +1161,23 @@ func TestAlphaVolumeModeCheck(t *testing.T) {
}
for name, scenario := range scenarios {
toggleFeature(scenario.enableBlock, "BlockVolume", t)
expectedMisMatch, err := checkVolumeModeMisMatches(&scenario.pvc.Spec, &scenario.vol.Spec)
recover := utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, scenario.enableBlock)
expectedMismatch, err := checkVolumeModeMismatches(&scenario.pvc.Spec, &scenario.vol.Spec)
if err != nil {
t.Errorf("Unexpected failure for checkVolumeModeMisMatches: %v", err)
t.Errorf("Unexpected failure for checkVolumeModeMismatches: %v", err)
}
// expected to match but either got an error or no returned pvmatch
if expectedMisMatch && !scenario.isExpectedMisMatch {
if expectedMismatch && !scenario.isExpectedMismatch {
t.Errorf("Unexpected failure for scenario, expected not to mismatch on modes but did: %s", name)
}
if !expectedMisMatch && scenario.isExpectedMisMatch {
if !expectedMismatch && scenario.isExpectedMismatch {
t.Errorf("Unexpected failure for scenario, did not mismatch on mode when expected to mismatch: %s", name)
}
recover()
}
// make sure feature gate is turned off
toggleFeature(false, "BlockVolume", t)
}
func TestAlphaFilteringVolumeModes(t *testing.T) {
func TestFilteringVolumeModes(t *testing.T) {
blockMode := v1.PersistentVolumeBlock
filesystemMode := v1.PersistentVolumeFilesystem
@ -1021,7 +1252,7 @@ func TestAlphaFilteringVolumeModes(t *testing.T) {
}
for name, scenario := range scenarios {
toggleFeature(scenario.enableBlock, "BlockVolume", t)
recover := utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, scenario.enableBlock)
pvmatch, err := scenario.vol.findBestMatchForClaim(scenario.pvc, false)
// expected to match but either got an error or no returned pvmatch
if pvmatch == nil && scenario.isExpectedMatch {
@ -1037,13 +1268,12 @@ func TestAlphaFilteringVolumeModes(t *testing.T) {
if err != nil && !scenario.isExpectedMatch {
t.Errorf("Unexpected failure for scenario: %s - %+v", name, err)
}
recover()
}
// make sure feature gate is turned off
toggleFeature(false, "BlockVolume", t)
}
func TestAlphaStorageObjectInUseProtectionFiltering(t *testing.T) {
func TestStorageObjectInUseProtectionFiltering(t *testing.T) {
fs := v1.PersistentVolumeFilesystem
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pv1",
@ -1053,6 +1283,10 @@ func TestAlphaStorageObjectInUseProtectionFiltering(t *testing.T) {
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("1G")},
PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{}},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
}
@ -1068,6 +1302,7 @@ func TestAlphaStorageObjectInUseProtectionFiltering(t *testing.T) {
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("1G")}},
VolumeMode: &fs,
},
}
@ -1078,43 +1313,45 @@ func TestAlphaStorageObjectInUseProtectionFiltering(t *testing.T) {
enableStorageObjectInUseProtection bool
}{
"feature enabled - pv deletionTimeStamp not set": {
isExpectedMatch: true,
vol: pv,
pvc: pvc,
isExpectedMatch: true,
vol: pv,
pvc: pvc,
enableStorageObjectInUseProtection: true,
},
"feature enabled - pv deletionTimeStamp set": {
isExpectedMatch: false,
vol: pvToDelete,
pvc: pvc,
isExpectedMatch: false,
vol: pvToDelete,
pvc: pvc,
enableStorageObjectInUseProtection: true,
},
"feature disabled - pv deletionTimeStamp not set": {
isExpectedMatch: true,
vol: pv,
pvc: pvc,
isExpectedMatch: true,
vol: pv,
pvc: pvc,
enableStorageObjectInUseProtection: false,
},
"feature disabled - pv deletionTimeStamp set": {
isExpectedMatch: true,
vol: pvToDelete,
pvc: pvc,
isExpectedMatch: true,
vol: pvToDelete,
pvc: pvc,
enableStorageObjectInUseProtection: false,
},
}
for name, testCase := range satisfyingTestCases {
toggleFeature(testCase.enableStorageObjectInUseProtection, "StorageObjectInUseProtection", t)
err := checkVolumeSatisfyClaim(testCase.vol, testCase.pvc)
// expected to match but got an error
if err != nil && testCase.isExpectedMatch {
t.Errorf("%s: expected to match but got an error: %v", name, err)
}
// not expected to match but did
if err == nil && !testCase.isExpectedMatch {
t.Errorf("%s: not expected to match but did", name)
}
t.Run(name, func(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StorageObjectInUseProtection, testCase.enableStorageObjectInUseProtection)()
err := checkVolumeSatisfyClaim(testCase.vol, testCase.pvc)
// expected to match but got an error
if err != nil && testCase.isExpectedMatch {
t.Errorf("%s: expected to match but got an error: %v", name, err)
}
// not expected to match but did
if err == nil && !testCase.isExpectedMatch {
t.Errorf("%s: not expected to match but did", name)
}
})
}
filteringTestCases := map[string]struct {
@ -1124,54 +1361,55 @@ func TestAlphaStorageObjectInUseProtectionFiltering(t *testing.T) {
enableStorageObjectInUseProtection bool
}{
"feature enabled - pv deletionTimeStamp not set": {
isExpectedMatch: true,
vol: createTestVolOrderedIndex(pv),
pvc: pvc,
isExpectedMatch: true,
vol: createTestVolOrderedIndex(pv),
pvc: pvc,
enableStorageObjectInUseProtection: true,
},
"feature enabled - pv deletionTimeStamp set": {
isExpectedMatch: false,
vol: createTestVolOrderedIndex(pvToDelete),
pvc: pvc,
isExpectedMatch: false,
vol: createTestVolOrderedIndex(pvToDelete),
pvc: pvc,
enableStorageObjectInUseProtection: true,
},
"feature disabled - pv deletionTimeStamp not set": {
isExpectedMatch: true,
vol: createTestVolOrderedIndex(pv),
pvc: pvc,
isExpectedMatch: true,
vol: createTestVolOrderedIndex(pv),
pvc: pvc,
enableStorageObjectInUseProtection: false,
},
"feature disabled - pv deletionTimeStamp set": {
isExpectedMatch: true,
vol: createTestVolOrderedIndex(pvToDelete),
pvc: pvc,
isExpectedMatch: true,
vol: createTestVolOrderedIndex(pvToDelete),
pvc: pvc,
enableStorageObjectInUseProtection: false,
},
}
for name, testCase := range filteringTestCases {
toggleFeature(testCase.enableStorageObjectInUseProtection, "StorageObjectInUseProtection", t)
pvmatch, err := testCase.vol.findBestMatchForClaim(testCase.pvc, false)
// expected to match but either got an error or no returned pvmatch
if pvmatch == nil && testCase.isExpectedMatch {
t.Errorf("Unexpected failure for testcase, no matching volume: %s", name)
}
if err != nil && testCase.isExpectedMatch {
t.Errorf("Unexpected failure for testcase: %s - %+v", name, err)
}
// expected to not match but either got an error or a returned pvmatch
if pvmatch != nil && !testCase.isExpectedMatch {
t.Errorf("Unexpected failure for testcase, expected no matching volume: %s", name)
}
if err != nil && !testCase.isExpectedMatch {
t.Errorf("Unexpected failure for testcase: %s - %+v", name, err)
}
}
t.Run(name, func(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StorageObjectInUseProtection, testCase.enableStorageObjectInUseProtection)()
// make sure feature gate is turned off
toggleFeature(false, "StorageObjectInUseProtection", t)
pvmatch, err := testCase.vol.findBestMatchForClaim(testCase.pvc, false)
// expected to match but either got an error or no returned pvmatch
if pvmatch == nil && testCase.isExpectedMatch {
t.Errorf("Unexpected failure for testcase, no matching volume: %s", name)
}
if err != nil && testCase.isExpectedMatch {
t.Errorf("Unexpected failure for testcase: %s - %+v", name, err)
}
// expected to not match but either got an error or a returned pvmatch
if pvmatch != nil && !testCase.isExpectedMatch {
t.Errorf("Unexpected failure for testcase, expected no matching volume: %s", name)
}
if err != nil && !testCase.isExpectedMatch {
t.Errorf("Unexpected failure for testcase: %s - %+v", name, err)
}
})
}
}
func TestFindingPreboundVolumes(t *testing.T) {
fs := v1.PersistentVolumeFilesystem
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claim01",
@ -1181,6 +1419,7 @@ func TestFindingPreboundVolumes(t *testing.T) {
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi")}},
VolumeMode: &fs,
},
}
claimRef, err := ref.GetReference(scheme.Scheme, claim)
@ -1274,6 +1513,11 @@ func TestFindMatchVolumeWithNode(t *testing.T) {
Labels: map[string]string{"key1": "value3"},
},
}
node4 := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"key1": "value4"},
},
}
scenarios := map[string]struct {
expectedMatch string
@ -1341,6 +1585,15 @@ func TestFindMatchVolumeWithNode(t *testing.T) {
}),
node: node3,
},
"fail-nonavaiable": {
expectedMatch: "",
claim: makePVC("100G", func(pvc *v1.PersistentVolumeClaim) {
pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
pvc.Spec.StorageClassName = &classWait
pvc.Name = "claim04"
}),
node: node4,
},
"success-bad-and-good-node-affinity": {
expectedMatch: "affinity-pv3",
claim: makePVC("100G", func(pvc *v1.PersistentVolumeClaim) {

View File

@ -0,0 +1,29 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"testing"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
_ "k8s.io/kubernetes/pkg/features"
)
func TestMain(m *testing.M) {
utilfeaturetesting.VerifyFeatureGatesUnchanged(utilfeature.DefaultFeatureGate, m.Run)
}

View File

@ -1,18 +1,14 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["metrics.go"],
importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
@ -27,4 +23,5 @@ filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -21,8 +21,8 @@ import (
"k8s.io/api/core/v1"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/klog"
)
const (
@ -56,6 +56,8 @@ type PVCLister interface {
func Register(pvLister PVLister, pvcLister PVCLister) {
registerMetrics.Do(func() {
prometheus.MustRegister(newPVAndPVCCountCollector(pvLister, pvcLister))
prometheus.MustRegister(volumeOperationMetric)
prometheus.MustRegister(volumeOperationErrorsMetric)
})
}
@ -89,6 +91,19 @@ var (
prometheus.BuildFQName("", pvControllerSubsystem, unboundPVCKey),
"Gauge measuring number of persistent volume claim currently unbound",
[]string{namespaceLabel}, nil)
volumeOperationMetric = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "volume_operation_total_seconds",
Help: "Total volume operation time",
},
[]string{"plugin_name", "operation_name"})
volumeOperationErrorsMetric = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "volume_operation_total_errors",
Help: "Total volume operation erros",
},
[]string{"plugin_name", "operation_name"})
)
func (collector *pvAndPVCCountCollector) Describe(ch chan<- *prometheus.Desc) {
@ -124,7 +139,7 @@ func (collector *pvAndPVCCountCollector) pvCollect(ch chan<- prometheus.Metric)
float64(number),
storageClassName)
if err != nil {
glog.Warningf("Create bound pv number metric failed: %v", err)
klog.Warningf("Create bound pv number metric failed: %v", err)
continue
}
ch <- metric
@ -136,7 +151,7 @@ func (collector *pvAndPVCCountCollector) pvCollect(ch chan<- prometheus.Metric)
float64(number),
storageClassName)
if err != nil {
glog.Warningf("Create unbound pv number metric failed: %v", err)
klog.Warningf("Create unbound pv number metric failed: %v", err)
continue
}
ch <- metric
@ -164,7 +179,7 @@ func (collector *pvAndPVCCountCollector) pvcCollect(ch chan<- prometheus.Metric)
float64(number),
namespace)
if err != nil {
glog.Warningf("Create bound pvc number metric failed: %v", err)
klog.Warningf("Create bound pvc number metric failed: %v", err)
continue
}
ch <- metric
@ -176,9 +191,21 @@ func (collector *pvAndPVCCountCollector) pvcCollect(ch chan<- prometheus.Metric)
float64(number),
namespace)
if err != nil {
glog.Warningf("Create unbound pvc number metric failed: %v", err)
klog.Warningf("Create unbound pvc number metric failed: %v", err)
continue
}
ch <- metric
}
}
// RecordVolumeOperationMetric records the latency and errors of volume operations.
func RecordVolumeOperationMetric(pluginName, opName string, timeTaken float64, err error) {
if pluginName == "" {
pluginName = "N/A"
}
if err != nil {
volumeOperationErrorsMetric.WithLabelValues(pluginName, opName).Inc()
return
}
volumeOperationMetric.WithLabelValues(pluginName, opName).Observe(timeTaken)
}

View File

@ -34,6 +34,7 @@ var class2Parameters = map[string]string{
"param2": "value2",
}
var deleteReclaimPolicy = v1.PersistentVolumeReclaimDelete
var modeImmediate = storage.VolumeBindingImmediate
var storageClasses = []*storage.StorageClass{
{
TypeMeta: metav1.TypeMeta{
@ -44,9 +45,10 @@ var storageClasses = []*storage.StorageClass{
Name: "gold",
},
Provisioner: mockPluginName,
Parameters: class1Parameters,
ReclaimPolicy: &deleteReclaimPolicy,
Provisioner: mockPluginName,
Parameters: class1Parameters,
ReclaimPolicy: &deleteReclaimPolicy,
VolumeBindingMode: &modeImmediate,
},
{
TypeMeta: metav1.TypeMeta{
@ -55,9 +57,10 @@ var storageClasses = []*storage.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "silver",
},
Provisioner: mockPluginName,
Parameters: class2Parameters,
ReclaimPolicy: &deleteReclaimPolicy,
Provisioner: mockPluginName,
Parameters: class2Parameters,
ReclaimPolicy: &deleteReclaimPolicy,
VolumeBindingMode: &modeImmediate,
},
{
TypeMeta: metav1.TypeMeta{
@ -66,9 +69,10 @@ var storageClasses = []*storage.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "external",
},
Provisioner: "vendor.com/my-volume",
Parameters: class1Parameters,
ReclaimPolicy: &deleteReclaimPolicy,
Provisioner: "vendor.com/my-volume",
Parameters: class1Parameters,
ReclaimPolicy: &deleteReclaimPolicy,
VolumeBindingMode: &modeImmediate,
},
{
TypeMeta: metav1.TypeMeta{
@ -77,9 +81,10 @@ var storageClasses = []*storage.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "unknown-internal",
},
Provisioner: "kubernetes.io/unknown",
Parameters: class1Parameters,
ReclaimPolicy: &deleteReclaimPolicy,
Provisioner: "kubernetes.io/unknown",
Parameters: class1Parameters,
ReclaimPolicy: &deleteReclaimPolicy,
VolumeBindingMode: &modeImmediate,
},
{
TypeMeta: metav1.TypeMeta{
@ -88,10 +93,11 @@ var storageClasses = []*storage.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "unsupported-mountoptions",
},
Provisioner: mockPluginName,
Parameters: class1Parameters,
ReclaimPolicy: &deleteReclaimPolicy,
MountOptions: []string{"foo"},
Provisioner: mockPluginName,
Parameters: class1Parameters,
ReclaimPolicy: &deleteReclaimPolicy,
MountOptions: []string{"foo"},
VolumeBindingMode: &modeImmediate,
},
}
@ -166,7 +172,7 @@ func TestProvisionSync(t *testing.T) {
{
// No provisioning if there is a matching volume available
"11-6 - provisioning when there is a volume available",
newVolumeArray("volume11-6", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
newVolumeArray("volume11-6", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold),
newVolumeArray("volume11-6", "1Gi", "uid11-6", "claim11-6", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classGold, annBoundByController),
newClaimArray("claim11-6", "uid11-6", "1Gi", "", v1.ClaimPending, &classGold),
newClaimArray("claim11-6", "uid11-6", "1Gi", "volume11-6", v1.ClaimBound, &classGold, annBoundByController, annBindCompleted),

File diff suppressed because it is too large Load Diff

View File

@ -38,13 +38,13 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics"
"k8s.io/kubernetes/pkg/util/goroutinemap"
vol "k8s.io/kubernetes/pkg/volume"
"github.com/golang/glog"
"k8s.io/klog"
)
// This file contains the controller base functionality, i.e. framework to
@ -73,18 +73,18 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
eventRecorder := p.EventRecorder
if eventRecorder == nil {
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(glog.Infof)
broadcaster.StartLogging(klog.Infof)
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: p.KubeClient.CoreV1().Events("")})
eventRecorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"})
}
controller := &PersistentVolumeController{
volumes: newPersistentVolumeOrderedIndex(),
claims: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
kubeClient: p.KubeClient,
eventRecorder: eventRecorder,
runningOperations: goroutinemap.NewGoRoutineMap(true /* exponentialBackOffOnError */),
cloud: p.Cloud,
volumes: newPersistentVolumeOrderedIndex(),
claims: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
kubeClient: p.KubeClient,
eventRecorder: eventRecorder,
runningOperations: goroutinemap.NewGoRoutineMap(true /* exponentialBackOffOnError */),
cloud: p.Cloud,
enableDynamicProvisioning: p.EnableDynamicProvisioning,
clusterName: p.ClusterName,
createProvisionedPVRetryCount: createProvisionedPVRetryCount,
@ -134,27 +134,27 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
func (ctrl *PersistentVolumeController) initializeCaches(volumeLister corelisters.PersistentVolumeLister, claimLister corelisters.PersistentVolumeClaimLister) {
volumeList, err := volumeLister.List(labels.Everything())
if err != nil {
glog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
klog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
return
}
for _, volume := range volumeList {
volumeClone := volume.DeepCopy()
if _, err = ctrl.storeVolumeUpdate(volumeClone); err != nil {
glog.Errorf("error updating volume cache: %v", err)
klog.Errorf("error updating volume cache: %v", err)
}
}
claimList, err := claimLister.List(labels.Everything())
if err != nil {
glog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
klog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
return
}
for _, claim := range claimList {
if _, err = ctrl.storeClaimUpdate(claim.DeepCopy()); err != nil {
glog.Errorf("error updating claim cache: %v", err)
klog.Errorf("error updating claim cache: %v", err)
}
}
glog.V(4).Infof("controller initialized")
klog.V(4).Infof("controller initialized")
}
// enqueueWork adds volume or claim to given work queue.
@ -165,10 +165,10 @@ func (ctrl *PersistentVolumeController) enqueueWork(queue workqueue.Interface, o
}
objName, err := controller.KeyFunc(obj)
if err != nil {
glog.Errorf("failed to get key from object: %v", err)
klog.Errorf("failed to get key from object: %v", err)
return
}
glog.V(5).Infof("enqueued %q for sync", objName)
klog.V(5).Infof("enqueued %q for sync", objName)
queue.Add(objName)
}
@ -187,7 +187,7 @@ func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume
// is an old version.
new, err := ctrl.storeVolumeUpdate(volume)
if err != nil {
glog.Errorf("%v", err)
klog.Errorf("%v", err)
}
if !new {
return
@ -198,9 +198,9 @@ func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume
if errors.IsConflict(err) {
// Version conflict error happens quite often and the controller
// recovers from it easily.
glog.V(3).Infof("could not sync volume %q: %+v", volume.Name, err)
klog.V(3).Infof("could not sync volume %q: %+v", volume.Name, err)
} else {
glog.Errorf("could not sync volume %q: %+v", volume.Name, err)
klog.Errorf("could not sync volume %q: %+v", volume.Name, err)
}
}
}
@ -208,7 +208,7 @@ func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume
// deleteVolume runs in worker thread and handles "volume deleted" event.
func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume) {
_ = ctrl.volumes.store.Delete(volume)
glog.V(4).Infof("volume %q deleted", volume.Name)
klog.V(4).Infof("volume %q deleted", volume.Name)
if volume.Spec.ClaimRef == nil {
return
@ -217,7 +217,7 @@ func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume
// claim here in response to volume deletion prevents the claim from
// waiting until the next sync period for its Lost status.
claimKey := claimrefToClaimKey(volume.Spec.ClaimRef)
glog.V(5).Infof("deleteVolume[%s]: scheduling sync of claim %q", volume.Name, claimKey)
klog.V(5).Infof("deleteVolume[%s]: scheduling sync of claim %q", volume.Name, claimKey)
ctrl.claimQueue.Add(claimKey)
}
@ -228,7 +228,7 @@ func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeCl
// an old version.
new, err := ctrl.storeClaimUpdate(claim)
if err != nil {
glog.Errorf("%v", err)
klog.Errorf("%v", err)
}
if !new {
return
@ -238,9 +238,9 @@ func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeCl
if errors.IsConflict(err) {
// Version conflict error happens quite often and the controller
// recovers from it easily.
glog.V(3).Infof("could not sync claim %q: %+v", claimToClaimKey(claim), err)
klog.V(3).Infof("could not sync claim %q: %+v", claimToClaimKey(claim), err)
} else {
glog.Errorf("could not sync volume %q: %+v", claimToClaimKey(claim), err)
klog.Errorf("could not sync volume %q: %+v", claimToClaimKey(claim), err)
}
}
}
@ -248,17 +248,17 @@ func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeCl
// deleteClaim runs in worker thread and handles "claim deleted" event.
func (ctrl *PersistentVolumeController) deleteClaim(claim *v1.PersistentVolumeClaim) {
_ = ctrl.claims.Delete(claim)
glog.V(4).Infof("claim %q deleted", claimToClaimKey(claim))
klog.V(4).Infof("claim %q deleted", claimToClaimKey(claim))
volumeName := claim.Spec.VolumeName
if volumeName == "" {
glog.V(5).Infof("deleteClaim[%q]: volume not bound", claimToClaimKey(claim))
klog.V(5).Infof("deleteClaim[%q]: volume not bound", claimToClaimKey(claim))
return
}
// sync the volume when its claim is deleted. Explicitly sync'ing the
// volume here in response to claim deletion prevents the volume from
// waiting until the next sync period for its Release.
glog.V(5).Infof("deleteClaim[%q]: scheduling sync of volume %s", claimToClaimKey(claim), volumeName)
klog.V(5).Infof("deleteClaim[%q]: scheduling sync of volume %s", claimToClaimKey(claim), volumeName)
ctrl.volumeQueue.Add(volumeName)
}
@ -268,8 +268,8 @@ func (ctrl *PersistentVolumeController) Run(stopCh <-chan struct{}) {
defer ctrl.claimQueue.ShutDown()
defer ctrl.volumeQueue.ShutDown()
glog.Infof("Starting persistent volume controller")
defer glog.Infof("Shutting down persistent volume controller")
klog.Infof("Starting persistent volume controller")
defer klog.Infof("Shutting down persistent volume controller")
if !controller.WaitForCacheSync("persistent volume", stopCh, ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced, ctrl.NodeListerSynced) {
return
@ -296,11 +296,11 @@ func (ctrl *PersistentVolumeController) volumeWorker() {
}
defer ctrl.volumeQueue.Done(keyObj)
key := keyObj.(string)
glog.V(5).Infof("volumeWorker[%s]", key)
klog.V(5).Infof("volumeWorker[%s]", key)
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
glog.V(4).Infof("error getting name of volume %q to get volume from informer: %v", key, err)
klog.V(4).Infof("error getting name of volume %q to get volume from informer: %v", key, err)
return false
}
volume, err := ctrl.volumeLister.Get(name)
@ -311,7 +311,7 @@ func (ctrl *PersistentVolumeController) volumeWorker() {
return false
}
if !errors.IsNotFound(err) {
glog.V(2).Infof("error getting volume %q from informer: %v", key, err)
klog.V(2).Infof("error getting volume %q from informer: %v", key, err)
return false
}
@ -319,18 +319,18 @@ func (ctrl *PersistentVolumeController) volumeWorker() {
// "delete"
volumeObj, found, err := ctrl.volumes.store.GetByKey(key)
if err != nil {
glog.V(2).Infof("error getting volume %q from cache: %v", key, err)
klog.V(2).Infof("error getting volume %q from cache: %v", key, err)
return false
}
if !found {
// The controller has already processed the delete event and
// deleted the volume from its cache
glog.V(2).Infof("deletion of volume %q was already processed", key)
klog.V(2).Infof("deletion of volume %q was already processed", key)
return false
}
volume, ok := volumeObj.(*v1.PersistentVolume)
if !ok {
glog.Errorf("expected volume, got %+v", volumeObj)
klog.Errorf("expected volume, got %+v", volumeObj)
return false
}
ctrl.deleteVolume(volume)
@ -338,7 +338,7 @@ func (ctrl *PersistentVolumeController) volumeWorker() {
}
for {
if quit := workFunc(); quit {
glog.Infof("volume worker queue shutting down")
klog.Infof("volume worker queue shutting down")
return
}
}
@ -354,11 +354,11 @@ func (ctrl *PersistentVolumeController) claimWorker() {
}
defer ctrl.claimQueue.Done(keyObj)
key := keyObj.(string)
glog.V(5).Infof("claimWorker[%s]", key)
klog.V(5).Infof("claimWorker[%s]", key)
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
glog.V(4).Infof("error getting namespace & name of claim %q to get claim from informer: %v", key, err)
klog.V(4).Infof("error getting namespace & name of claim %q to get claim from informer: %v", key, err)
return false
}
claim, err := ctrl.claimLister.PersistentVolumeClaims(namespace).Get(name)
@ -369,25 +369,25 @@ func (ctrl *PersistentVolumeController) claimWorker() {
return false
}
if !errors.IsNotFound(err) {
glog.V(2).Infof("error getting claim %q from informer: %v", key, err)
klog.V(2).Infof("error getting claim %q from informer: %v", key, err)
return false
}
// The claim is not in informer cache, the event must have been "delete"
claimObj, found, err := ctrl.claims.GetByKey(key)
if err != nil {
glog.V(2).Infof("error getting claim %q from cache: %v", key, err)
klog.V(2).Infof("error getting claim %q from cache: %v", key, err)
return false
}
if !found {
// The controller has already processed the delete event and
// deleted the claim from its cache
glog.V(2).Infof("deletion of claim %q was already processed", key)
klog.V(2).Infof("deletion of claim %q was already processed", key)
return false
}
claim, ok := claimObj.(*v1.PersistentVolumeClaim)
if !ok {
glog.Errorf("expected claim, got %+v", claimObj)
klog.Errorf("expected claim, got %+v", claimObj)
return false
}
ctrl.deleteClaim(claim)
@ -395,7 +395,7 @@ func (ctrl *PersistentVolumeController) claimWorker() {
}
for {
if quit := workFunc(); quit {
glog.Infof("claim worker queue shutting down")
klog.Infof("claim worker queue shutting down")
return
}
}
@ -405,11 +405,11 @@ func (ctrl *PersistentVolumeController) claimWorker() {
// all consumers of PV/PVC shared informer to have a short resync period,
// therefore we do our own.
func (ctrl *PersistentVolumeController) resync() {
glog.V(4).Infof("resyncing PV controller")
klog.V(4).Infof("resyncing PV controller")
pvcs, err := ctrl.claimLister.List(labels.NewSelector())
if err != nil {
glog.Warningf("cannot list claims: %s", err)
klog.Warningf("cannot list claims: %s", err)
return
}
for _, pvc := range pvcs {
@ -418,7 +418,7 @@ func (ctrl *PersistentVolumeController) resync() {
pvs, err := ctrl.volumeLister.List(labels.NewSelector())
if err != nil {
glog.Warningf("cannot list persistent volumes: %s", err)
klog.Warningf("cannot list persistent volumes: %s", err)
return
}
for _, pv := range pvs {
@ -504,7 +504,7 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo
if !found {
// This is a new object
glog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion())
klog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion())
if err = store.Add(obj); err != nil {
return false, fmt.Errorf("Error adding %s %q to controller cache: %v", className, objName, err)
}
@ -528,11 +528,11 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo
// Throw away only older version, let the same version pass - we do want to
// get periodic sync events.
if oldObjResourceVersion > objResourceVersion {
glog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion())
klog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion())
return false, nil
}
glog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion())
klog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion())
if err = store.Update(obj); err != nil {
return false, fmt.Errorf("Error updating %s %q in controller cache: %v", className, objName, err)
}

View File

@ -20,18 +20,26 @@ import (
"testing"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/features"
)
var (
classNotHere = "not-here"
classNoMode = "no-mode"
classImmediateMode = "immediate-mode"
classWaitMode = "wait-mode"
)
// Test the real controller methods (add/update/delete claim/volume) with
@ -96,7 +104,7 @@ func TestControllerSync(t *testing.T) {
}
for _, test := range tests {
glog.V(4).Infof("starting test %q", test.name)
klog.V(4).Infof("starting test %q", test.name)
// Initialize the controller
client := &fake.Clientset{}
@ -140,7 +148,7 @@ func TestControllerSync(t *testing.T) {
time.Sleep(10 * time.Millisecond)
}
glog.V(4).Infof("controller synced, starting test")
klog.V(4).Infof("controller synced, starting test")
// Call the tested function
err = test.test(ctrl, reactor, test)
@ -237,12 +245,21 @@ func addVolumeAnnotation(volume *v1.PersistentVolume, annName, annValue string)
return volume
}
func makePVCClass(scName *string) *v1.PersistentVolumeClaim {
return &v1.PersistentVolumeClaim{
func makePVCClass(scName *string, hasSelectNodeAnno bool) *v1.PersistentVolumeClaim {
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{},
},
Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: scName,
},
}
if hasSelectNodeAnno {
claim.Annotations[annSelectedNode] = "node-name"
}
return claim
}
func makeStorageClass(scName string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass {
@ -255,42 +272,36 @@ func makeStorageClass(scName string, mode *storagev1.VolumeBindingMode) *storage
}
func TestDelayBinding(t *testing.T) {
var (
classNotHere = "not-here"
classNoMode = "no-mode"
classImmediateMode = "immediate-mode"
classWaitMode = "wait-mode"
modeImmediate = storagev1.VolumeBindingImmediate
modeWait = storagev1.VolumeBindingWaitForFirstConsumer
)
tests := map[string]struct {
pvc *v1.PersistentVolumeClaim
shouldDelay bool
shouldFail bool
}{
"nil-class": {
pvc: makePVCClass(nil),
pvc: makePVCClass(nil, false),
shouldDelay: false,
},
"class-not-found": {
pvc: makePVCClass(&classNotHere),
pvc: makePVCClass(&classNotHere, false),
shouldDelay: false,
},
"no-mode-class": {
pvc: makePVCClass(&classNoMode),
pvc: makePVCClass(&classNoMode, false),
shouldDelay: false,
shouldFail: true,
},
"immediate-mode-class": {
pvc: makePVCClass(&classImmediateMode),
pvc: makePVCClass(&classImmediateMode, false),
shouldDelay: false,
},
"wait-mode-class": {
pvc: makePVCClass(&classWaitMode),
pvc: makePVCClass(&classWaitMode, false),
shouldDelay: true,
},
"wait-mode-class-with-selectedNode": {
pvc: makePVCClass(&classWaitMode, true),
shouldDelay: false,
},
}
classes := []*storagev1.StorageClass{
@ -312,22 +323,8 @@ func TestDelayBinding(t *testing.T) {
}
}
// When volumeScheduling feature gate is disabled, should always be delayed
name := "volumeScheduling-feature-disabled"
shouldDelay, err := ctrl.shouldDelayBinding(makePVCClass(&classWaitMode))
if err != nil {
t.Errorf("Test %q returned error: %v", name, err)
}
if shouldDelay {
t.Errorf("Test %q returned true, expected false", name)
}
// Enable volumeScheduling feature gate
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false")
for name, test := range tests {
shouldDelay, err = ctrl.shouldDelayBinding(test.pvc)
shouldDelay, err := ctrl.shouldDelayBinding(test.pvc)
if err != nil && !test.shouldFail {
t.Errorf("Test %q returned error: %v", name, err)
}
@ -338,39 +335,21 @@ func TestDelayBinding(t *testing.T) {
t.Errorf("Test %q returned unexpected %v", name, test.shouldDelay)
}
}
}
// When dynamicProvisioningScheduling feature gate is disabled, should be delayed,
// even if the pvc has selectedNode annotation.
provisionedClaim := makePVCClass(&classWaitMode)
provisionedClaim.Annotations = map[string]string{annSelectedNode: "node-name"}
name = "dynamicProvisioningScheduling-feature-disabled"
shouldDelay, err = ctrl.shouldDelayBinding(provisionedClaim)
if err != nil {
t.Errorf("Test %q returned error: %v", name, err)
}
if !shouldDelay {
t.Errorf("Test %q returned false, expected true", name)
func TestDelayBindingDisabled(t *testing.T) {
// When volumeScheduling feature gate is disabled, should always be immediate
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeScheduling, false)()
client := &fake.Clientset{}
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
classInformer := informerFactory.Storage().V1().StorageClasses()
ctrl := &PersistentVolumeController{
classLister: classInformer.Lister(),
}
// Enable DynamicProvisioningScheduling feature gate
utilfeature.DefaultFeatureGate.Set("DynamicProvisioningScheduling=true")
defer utilfeature.DefaultFeatureGate.Set("DynamicProvisioningScheduling=false")
// When the pvc does not have selectedNode annotation, should be delayed,
// even if dynamicProvisioningScheduling feature gate is enabled.
name = "dynamicProvisioningScheduling-feature-enabled, selectedNode-annotation-not-set"
shouldDelay, err = ctrl.shouldDelayBinding(makePVCClass(&classWaitMode))
if err != nil {
t.Errorf("Test %q returned error: %v", name, err)
}
if !shouldDelay {
t.Errorf("Test %q returned false, expected true", name)
}
// Should not be delayed when dynamicProvisioningScheduling feature gate is enabled,
// and the pvc has selectedNode annotation.
name = "dynamicProvisioningScheduling-feature-enabled, selectedNode-annotation-set"
shouldDelay, err = ctrl.shouldDelayBinding(provisionedClaim)
name := "volumeScheduling-feature-disabled"
shouldDelay, err := ctrl.shouldDelayBinding(makePVCClass(&classWaitMode, false))
if err != nil {
t.Errorf("Test %q returned error: %v", name, err)
}

View File

@ -229,6 +229,16 @@ func TestRecycleSync(t *testing.T) {
// recycler simulates one recycle() call that succeeds.
wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume),
},
{
// volume is used by a completed pod, pod using claim with the same name bound to different pv is running, should recycle
"6-14 - seemingly used by running pod",
newVolumeArray("volume6-14", "1Gi", "uid6-14", "completedClaim", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController),
newVolumeArray("volume6-14", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRecycle, classEmpty),
newClaimArray("completedClaim", "uid6-14-x", "10Gi", "", v1.ClaimBound, nil),
newClaimArray("completedClaim", "uid6-14-x", "10Gi", "", v1.ClaimBound, nil),
noevents, noerrors,
wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume),
},
}
runSyncTests(t, tests, []*storage.StorageClass{}, pods)
}

View File

@ -21,7 +21,7 @@ import (
"strconv"
"sync"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
@ -83,7 +83,8 @@ func (e *errObjectName) Error() string {
// Restore() sets the latest object pointer back to the informer object.
// Get/List() always returns the latest object pointer.
type assumeCache struct {
mutex sync.Mutex
// Synchronizes updates to store
rwMutex sync.RWMutex
// describes the object stored
description string
@ -151,37 +152,37 @@ func (c *assumeCache) add(obj interface{}) {
name, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
glog.Errorf("add failed: %v", &errObjectName{err})
klog.Errorf("add failed: %v", &errObjectName{err})
return
}
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
if objInfo, _ := c.getObjInfo(name); objInfo != nil {
newVersion, err := c.getObjVersion(name, obj)
if err != nil {
glog.Errorf("add: couldn't get object version: %v", err)
klog.Errorf("add: couldn't get object version: %v", err)
return
}
storedVersion, err := c.getObjVersion(name, objInfo.latestObj)
if err != nil {
glog.Errorf("add: couldn't get stored object version: %v", err)
klog.Errorf("add: couldn't get stored object version: %v", err)
return
}
// Only update object if version is newer.
// This is so we don't override assumed objects due to informer resync.
if newVersion <= storedVersion {
glog.V(10).Infof("Skip adding %v %v to assume cache because version %v is not newer than %v", c.description, name, newVersion, storedVersion)
klog.V(10).Infof("Skip adding %v %v to assume cache because version %v is not newer than %v", c.description, name, newVersion, storedVersion)
return
}
}
objInfo := &objInfo{name: name, latestObj: obj, apiObj: obj}
c.store.Update(objInfo)
glog.V(10).Infof("Adding %v %v to assume cache: %+v ", c.description, name, obj)
klog.V(10).Infof("Adding %v %v to assume cache: %+v ", c.description, name, obj)
}
func (c *assumeCache) update(oldObj interface{}, newObj interface{}) {
@ -195,17 +196,17 @@ func (c *assumeCache) delete(obj interface{}) {
name, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
glog.Errorf("delete failed: %v", &errObjectName{err})
klog.Errorf("delete failed: %v", &errObjectName{err})
return
}
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
objInfo := &objInfo{name: name}
err = c.store.Delete(objInfo)
if err != nil {
glog.Errorf("delete: failed to delete %v %v: %v", c.description, name, err)
klog.Errorf("delete: failed to delete %v %v: %v", c.description, name, err)
}
}
@ -239,8 +240,8 @@ func (c *assumeCache) getObjInfo(name string) (*objInfo, error) {
}
func (c *assumeCache) Get(objName string) (interface{}, error) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.RLock()
defer c.rwMutex.RUnlock()
objInfo, err := c.getObjInfo(objName)
if err != nil {
@ -250,20 +251,20 @@ func (c *assumeCache) Get(objName string) (interface{}, error) {
}
func (c *assumeCache) List(indexObj interface{}) []interface{} {
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.RLock()
defer c.rwMutex.RUnlock()
allObjs := []interface{}{}
objs, err := c.store.Index(c.indexName, &objInfo{latestObj: indexObj})
if err != nil {
glog.Errorf("list index error: %v", err)
klog.Errorf("list index error: %v", err)
return nil
}
for _, obj := range objs {
objInfo, ok := obj.(*objInfo)
if !ok {
glog.Errorf("list error: %v", &errWrongType{"objInfo", obj})
klog.Errorf("list error: %v", &errWrongType{"objInfo", obj})
continue
}
allObjs = append(allObjs, objInfo.latestObj)
@ -277,8 +278,8 @@ func (c *assumeCache) Assume(obj interface{}) error {
return &errObjectName{err}
}
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
objInfo, err := c.getObjInfo(name)
if err != nil {
@ -301,21 +302,21 @@ func (c *assumeCache) Assume(obj interface{}) error {
// Only update the cached object
objInfo.latestObj = obj
glog.V(4).Infof("Assumed %v %q, version %v", c.description, name, newVersion)
klog.V(4).Infof("Assumed %v %q, version %v", c.description, name, newVersion)
return nil
}
func (c *assumeCache) Restore(objName string) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
objInfo, err := c.getObjInfo(objName)
if err != nil {
// This could be expected if object got deleted
glog.V(5).Infof("Restore %v %q warning: %v", c.description, objName, err)
klog.V(5).Infof("Restore %v %q warning: %v", c.description, objName, err)
} else {
objInfo.latestObj = objInfo.apiObj
glog.V(4).Infof("Restored %v %q", c.description, objName)
klog.V(4).Infof("Restored %v %q", c.description, objName)
}
}
@ -365,7 +366,7 @@ func (c *pvAssumeCache) ListPVs(storageClassName string) []*v1.PersistentVolume
for _, obj := range objs {
pv, ok := obj.(*v1.PersistentVolume)
if !ok {
glog.Errorf("ListPVs: %v", &errWrongType{"v1.PersistentVolume", obj})
klog.Errorf("ListPVs: %v", &errWrongType{"v1.PersistentVolume", obj})
}
pvs = append(pvs, pv)
}

View File

@ -456,7 +456,7 @@ func TestAssumeUpdatePVCCache(t *testing.T) {
// Assume PVC
newPVC := pvc.DeepCopy()
newPVC.Annotations["volume.alpha.kubernetes.io/selected-node"] = "test-node"
newPVC.Annotations[annSelectedNode] = "test-node"
if err := cache.Assume(newPVC); err != nil {
t.Fatalf("failed to assume PVC: %v", err)
}

View File

@ -0,0 +1,60 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"github.com/prometheus/client_golang/prometheus"
)
// VolumeSchedulerSubsystem - subsystem name used by scheduler
const VolumeSchedulerSubsystem = "scheduler_volume"
var (
VolumeBindingRequestSchedulerBinderCache = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: VolumeSchedulerSubsystem,
Name: "binder_cache_requests_total",
Help: "Total number for request volume binding cache",
},
[]string{"operation"},
)
VolumeSchedulingStageLatency = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Subsystem: VolumeSchedulerSubsystem,
Name: "scheduling_duration_seconds",
Help: "Volume scheduling stage latency",
Buckets: prometheus.ExponentialBuckets(1000, 2, 15),
},
[]string{"operation"},
)
VolumeSchedulingStageFailed = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: VolumeSchedulerSubsystem,
Name: "scheduling_stage_error_total",
Help: "Volume scheduling stage error count",
},
[]string{"operation"},
)
)
// RegisterVolumeSchedulingMetrics is used for scheduler, because the volume binding cache is a library
// used by scheduler process.
func RegisterVolumeSchedulingMetrics() {
prometheus.MustRegister(VolumeBindingRequestSchedulerBinderCache)
prometheus.MustRegister(VolumeSchedulingStageLatency)
prometheus.MustRegister(VolumeSchedulingStageFailed)
}

View File

@ -19,18 +19,18 @@ package persistentvolume
import (
"fmt"
"sort"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1"
storageinformers "k8s.io/client-go/informers/storage/v1"
clientset "k8s.io/client-go/kubernetes"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
@ -44,16 +44,19 @@ import (
// a. Invokes all predicate functions, parallelized across nodes. FindPodVolumes() is invoked here.
// b. Invokes all priority functions. Future/TBD
// c. Selects the best node for the Pod.
// d. Cache the node selection for the Pod. (Assume phase)
// d. Cache the node selection for the Pod. AssumePodVolumes() is invoked here.
// i. If PVC binding is required, cache in-memory only:
// * Updated PV objects for prebinding to the corresponding PVCs.
// * For the pod, which PVs need API updates.
// AssumePodVolumes() is invoked here. Then BindPodVolumes() is called asynchronously by the
// scheduler. After BindPodVolumes() is complete, the Pod is added back to the scheduler queue
// to be processed again until all PVCs are bound.
// ii. If PVC binding is not required, cache the Pod->Node binding in the scheduler's pod cache,
// and asynchronously bind the Pod to the Node. This is handled in the scheduler and not here.
// 2. Once the assume operation is done, the scheduler processes the next Pod in the scheduler queue
// * For manual binding: update PV objects for prebinding to the corresponding PVCs.
// * For dynamic provisioning: update PVC object with a selected node from c)
// * For the pod, which PVCs and PVs need API updates.
// ii. Afterwards, the main scheduler caches the Pod->Node binding in the scheduler's pod cache,
// This is handled in the scheduler and not here.
// e. Asynchronously bind volumes and pod in a separate goroutine
// i. BindPodVolumes() is called first. It makes all the necessary API updates and waits for
// PV controller to fully bind and provision the PVCs. If binding fails, the Pod is sent
// back through the scheduler.
// ii. After BindPodVolumes() is complete, then the scheduler does the final Pod->Node binding.
// 2. Once all the assume operations are done in d), the scheduler processes the next Pod in the scheduler queue
// while the actual binding operation occurs in the background.
type SchedulerVolumeBinder interface {
// FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the node.
@ -73,18 +76,18 @@ type SchedulerVolumeBinder interface {
// 2. Take the PVCs that need provisioning and update the PVC cache with related
// annotations set.
//
// It returns true if all volumes are fully bound, and returns true if any volume binding/provisioning
// API operation needs to be done afterwards.
// It returns true if all volumes are fully bound
//
// This function will modify assumedPod with the node name.
// This function is called serially.
AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, bindingRequired bool, err error)
AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, err error)
// BindPodVolumes will:
// 1. Initiate the volume binding by making the API call to prebind the PV
// to its matching PVC.
// 2. Trigger the volume provisioning by making the API call to set related
// annotations on the PVC
// 3. Wait for PVCs to be completely bound by the PV controller
//
// This function can be called in parallel.
BindPodVolumes(assumedPod *v1.Pod) error
@ -102,6 +105,9 @@ type volumeBinder struct {
// Stores binding decisions that were made in FindPodVolumes for use in AssumePodVolumes.
// AssumePodVolumes modifies the bindings again for use in BindPodVolumes.
podBindingCache PodBindingCache
// Amount of time to wait for the bind operation to succeed
bindTimeout time.Duration
}
// NewVolumeBinder sets up all the caches needed for the scheduler to make volume binding decisions.
@ -109,7 +115,8 @@ func NewVolumeBinder(
kubeClient clientset.Interface,
pvcInformer coreinformers.PersistentVolumeClaimInformer,
pvInformer coreinformers.PersistentVolumeInformer,
storageClassInformer storageinformers.StorageClassInformer) SchedulerVolumeBinder {
storageClassInformer storageinformers.StorageClassInformer,
bindTimeout time.Duration) SchedulerVolumeBinder {
// TODO: find better way...
ctrl := &PersistentVolumeController{
@ -122,6 +129,7 @@ func NewVolumeBinder(
pvcCache: NewPVCAssumeCache(pvcInformer.Informer()),
pvCache: NewPVAssumeCache(pvInformer.Informer()),
podBindingCache: NewPodBindingCache(),
bindTimeout: bindTimeout,
}
return b
@ -136,11 +144,18 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
podName := getPodName(pod)
// Warning: Below log needs high verbosity as it can be printed several times (#60933).
glog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name)
klog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name)
// Initialize to true for pods that don't have volumes
unboundVolumesSatisfied = true
boundVolumesSatisfied = true
start := time.Now()
defer func() {
VolumeSchedulingStageLatency.WithLabelValues("predicate").Observe(time.Since(start).Seconds())
if err != nil {
VolumeSchedulingStageFailed.WithLabelValues("predicate").Inc()
}
}()
// The pod's volumes need to be processed in one call to avoid the race condition where
// volumes can get bound/provisioned in between calls.
@ -151,7 +166,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
// Immediate claims should be bound
if len(unboundClaimsImmediate) > 0 {
return false, false, fmt.Errorf("pod has unbound PersistentVolumeClaims")
return false, false, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims")
}
// Check PV node affinity on bound volumes
@ -169,13 +184,11 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
return false, false, err
}
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
// Try to provision for unbound volumes
if !unboundVolumesSatisfied {
unboundVolumesSatisfied, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
if err != nil {
return false, false, err
}
// Try to provision for unbound volumes
if !unboundVolumesSatisfied {
unboundVolumesSatisfied, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
if err != nil {
return false, false, err
}
}
}
@ -187,25 +200,34 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
// in podBindingCache for the chosen node, and:
// 1. Update the pvCache with the new prebound PV.
// 2. Update the pvcCache with the new PVCs with annotations set
// It will update podBindingCache again with the PVs and PVCs that need an API update.
func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound, bindingRequired bool, err error) {
// 3. Update podBindingCache again with cached API updates for PVs and PVCs.
func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, err error) {
podName := getPodName(assumedPod)
glog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName)
klog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName)
start := time.Now()
defer func() {
VolumeSchedulingStageLatency.WithLabelValues("assume").Observe(time.Since(start).Seconds())
if err != nil {
VolumeSchedulingStageFailed.WithLabelValues("assume").Inc()
}
}()
if allBound := b.arePodVolumesBound(assumedPod); allBound {
glog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName)
return true, false, nil
klog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName)
return true, nil
}
assumedPod.Spec.NodeName = nodeName
// Assume PV
claimsToBind := b.podBindingCache.GetBindings(assumedPod, nodeName)
newBindings := []*bindingInfo{}
claimsToBind := b.podBindingCache.GetBindings(assumedPod, nodeName)
claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, nodeName)
// Assume PV
newBindings := []*bindingInfo{}
for _, binding := range claimsToBind {
newPV, dirty, err := b.ctrl.getBindVolumeToClaim(binding.pv, binding.pvc)
glog.V(5).Infof("AssumePodVolumes: getBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v",
klog.V(5).Infof("AssumePodVolumes: getBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v",
podName,
binding.pv.Name,
binding.pvc.Name,
@ -214,29 +236,20 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al
err)
if err != nil {
b.revertAssumedPVs(newBindings)
return false, true, err
return false, err
}
// TODO: can we assume everytime?
if dirty {
err = b.pvCache.Assume(newPV)
if err != nil {
b.revertAssumedPVs(newBindings)
return false, true, err
return false, err
}
newBindings = append(newBindings, &bindingInfo{pv: newPV, pvc: binding.pvc})
}
}
// Don't update cached bindings if no API updates are needed. This can happen if we
// previously updated the PV object and are waiting for the PV controller to finish binding.
if len(newBindings) != 0 {
bindingRequired = true
b.podBindingCache.UpdateBindings(assumedPod, nodeName, newBindings)
newBindings = append(newBindings, &bindingInfo{pv: newPV, pvc: binding.pvc})
}
// Assume PVCs
claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, nodeName)
newProvisionedPVCs := []*v1.PersistentVolumeClaim{}
for _, claim := range claimsToProvision {
// The claims from method args can be pointing to watcher cache. We must not
@ -253,50 +266,45 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al
newProvisionedPVCs = append(newProvisionedPVCs, claimClone)
}
if len(newProvisionedPVCs) != 0 {
bindingRequired = true
b.podBindingCache.UpdateProvisionedPVCs(assumedPod, nodeName, newProvisionedPVCs)
}
// Update cache with the assumed pvcs and pvs
// Even if length is zero, update the cache with an empty slice to indicate that no
// operations are needed
b.podBindingCache.UpdateBindings(assumedPod, nodeName, newBindings)
b.podBindingCache.UpdateProvisionedPVCs(assumedPod, nodeName, newProvisionedPVCs)
return
}
// BindPodVolumes gets the cached bindings and PVCs to provision in podBindingCache
// and makes the API update for those PVs/PVCs.
func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) error {
// BindPodVolumes gets the cached bindings and PVCs to provision in podBindingCache,
// makes the API update for those PVs/PVCs, and waits for the PVCs to be completely bound
// by the PV controller.
func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) (err error) {
podName := getPodName(assumedPod)
glog.V(4).Infof("BindPodVolumes for pod %q", podName)
klog.V(4).Infof("BindPodVolumes for pod %q, node %q", podName, assumedPod.Spec.NodeName)
start := time.Now()
defer func() {
VolumeSchedulingStageLatency.WithLabelValues("bind").Observe(time.Since(start).Seconds())
if err != nil {
VolumeSchedulingStageFailed.WithLabelValues("bind").Inc()
}
}()
bindings := b.podBindingCache.GetBindings(assumedPod, assumedPod.Spec.NodeName)
claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, assumedPod.Spec.NodeName)
// Do the actual prebinding. Let the PV controller take care of the rest
// There is no API rollback if the actual binding fails
for i, bindingInfo := range bindings {
glog.V(5).Infof("BindPodVolumes: Pod %q, binding PV %q to PVC %q", podName, bindingInfo.pv.Name, bindingInfo.pvc.Name)
_, err := b.ctrl.updateBindVolumeToClaim(bindingInfo.pv, bindingInfo.pvc, false)
if err != nil {
// only revert assumed cached updates for volumes we haven't successfully bound
b.revertAssumedPVs(bindings[i:])
// Revert all of the assumed cached updates for claims,
// since no actual API update will be done
b.revertAssumedPVCs(claimsToProvision)
return err
}
// Start API operations
err = b.bindAPIUpdate(podName, bindings, claimsToProvision)
if err != nil {
return err
}
// Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
for i, claim := range claimsToProvision {
if _, err := b.ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil {
glog.V(4).Infof("updating PersistentVolumeClaim[%s] failed: %v", getPVCName(claim), err)
// only revert assumed cached updates for claims we haven't successfully updated
b.revertAssumedPVCs(claimsToProvision[i:])
return err
}
}
return nil
return wait.Poll(time.Second, b.bindTimeout, func() (bool, error) {
// Get cached values every time in case the pod gets deleted
bindings = b.podBindingCache.GetBindings(assumedPod, assumedPod.Spec.NodeName)
claimsToProvision = b.podBindingCache.GetProvisionedPVCs(assumedPod, assumedPod.Spec.NodeName)
return b.checkBindings(assumedPod, bindings, claimsToProvision)
})
}
func getPodName(pod *v1.Pod) string {
@ -307,46 +315,162 @@ func getPVCName(pvc *v1.PersistentVolumeClaim) string {
return pvc.Namespace + "/" + pvc.Name
}
func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume, checkFullyBound bool) (bool, *v1.PersistentVolumeClaim, error) {
// bindAPIUpdate gets the cached bindings and PVCs to provision in podBindingCache
// and makes the API update for those PVs/PVCs.
func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) error {
if bindings == nil {
return fmt.Errorf("failed to get cached bindings for pod %q", podName)
}
if claimsToProvision == nil {
return fmt.Errorf("failed to get cached claims to provision for pod %q", podName)
}
lastProcessedBinding := 0
lastProcessedProvisioning := 0
defer func() {
// only revert assumed cached updates for volumes we haven't successfully bound
if lastProcessedBinding < len(bindings) {
b.revertAssumedPVs(bindings[lastProcessedBinding:])
}
// only revert assumed cached updates for claims we haven't updated,
if lastProcessedProvisioning < len(claimsToProvision) {
b.revertAssumedPVCs(claimsToProvision[lastProcessedProvisioning:])
}
}()
var (
binding *bindingInfo
claim *v1.PersistentVolumeClaim
)
// Do the actual prebinding. Let the PV controller take care of the rest
// There is no API rollback if the actual binding fails
for _, binding = range bindings {
klog.V(5).Infof("bindAPIUpdate: Pod %q, binding PV %q to PVC %q", podName, binding.pv.Name, binding.pvc.Name)
// TODO: does it hurt if we make an api call and nothing needs to be updated?
if _, err := b.ctrl.updateBindVolumeToClaim(binding.pv, binding.pvc, false); err != nil {
return err
}
lastProcessedBinding++
}
// Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
for _, claim = range claimsToProvision {
klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim))
if _, err := b.ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil {
return err
}
lastProcessedProvisioning++
}
return nil
}
// checkBindings runs through all the PVCs in the Pod and checks:
// * if the PVC is fully bound
// * if there are any conditions that require binding to fail and be retried
//
// It returns true when all of the Pod's PVCs are fully bound, and error if
// binding (and scheduling) needs to be retried
func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*bindingInfo, claimsToProvision []*v1.PersistentVolumeClaim) (bool, error) {
podName := getPodName(pod)
if bindings == nil {
return false, fmt.Errorf("failed to get cached bindings for pod %q", podName)
}
if claimsToProvision == nil {
return false, fmt.Errorf("failed to get cached claims to provision for pod %q", podName)
}
for _, binding := range bindings {
// Check for any conditions that might require scheduling retry
// Check if pv still exists
pv, err := b.pvCache.GetPV(binding.pv.Name)
if err != nil || pv == nil {
return false, fmt.Errorf("failed to check pv binding: %v", err)
}
// Check if pv.ClaimRef got dropped by unbindVolume()
if pv.Spec.ClaimRef == nil || pv.Spec.ClaimRef.UID == "" {
return false, fmt.Errorf("ClaimRef got reset for pv %q", pv.Name)
}
// Check if pvc is fully bound
if isBound, _, err := b.isPVCBound(binding.pvc.Namespace, binding.pvc.Name); !isBound || err != nil {
return false, err
}
// TODO; what if pvc is bound to the wrong pv? It means our assume cache should be reverted.
// Or will pv controller cleanup the pv.ClaimRef?
}
for _, claim := range claimsToProvision {
bound, pvc, err := b.isPVCBound(claim.Namespace, claim.Name)
if err != nil || pvc == nil {
return false, fmt.Errorf("failed to check pvc binding: %v", err)
}
// Check if selectedNode annotation is still set
if pvc.Annotations == nil {
return false, fmt.Errorf("selectedNode annotation reset for PVC %q", pvc.Name)
}
selectedNode := pvc.Annotations[annSelectedNode]
if selectedNode != pod.Spec.NodeName {
return false, fmt.Errorf("selectedNode annotation value %q not set to scheduled node %q", selectedNode, pod.Spec.NodeName)
}
if !bound {
return false, nil
}
}
// All pvs and pvcs that we operated on are bound
klog.V(4).Infof("All PVCs for pod %q are bound", podName)
return true, nil
}
func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume) (bool, *v1.PersistentVolumeClaim, error) {
if vol.PersistentVolumeClaim == nil {
return true, nil, nil
}
pvcName := vol.PersistentVolumeClaim.ClaimName
return b.isPVCBound(namespace, pvcName)
}
func (b *volumeBinder) isPVCBound(namespace, pvcName string) (bool, *v1.PersistentVolumeClaim, error) {
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: pvcName,
Namespace: namespace,
},
}
pvc, err := b.pvcCache.GetPVC(getPVCName(claim))
pvcKey := getPVCName(claim)
pvc, err := b.pvcCache.GetPVC(pvcKey)
if err != nil || pvc == nil {
return false, nil, fmt.Errorf("error getting PVC %q: %v", pvcName, err)
return false, nil, fmt.Errorf("error getting PVC %q: %v", pvcKey, err)
}
pvName := pvc.Spec.VolumeName
if pvName != "" {
if checkFullyBound {
if metav1.HasAnnotation(pvc.ObjectMeta, annBindCompleted) {
glog.V(5).Infof("PVC %q is fully bound to PV %q", getPVCName(pvc), pvName)
return true, pvc, nil
} else {
glog.V(5).Infof("PVC %q is not fully bound to PV %q", getPVCName(pvc), pvName)
return false, pvc, nil
}
if metav1.HasAnnotation(pvc.ObjectMeta, annBindCompleted) {
klog.V(5).Infof("PVC %q is fully bound to PV %q", pvcKey, pvName)
return true, pvc, nil
} else {
klog.V(5).Infof("PVC %q is not fully bound to PV %q", pvcKey, pvName)
return false, pvc, nil
}
glog.V(5).Infof("PVC %q is bound or prebound to PV %q", getPVCName(pvc), pvName)
return true, pvc, nil
}
glog.V(5).Infof("PVC %q is not bound", getPVCName(pvc))
klog.V(5).Infof("PVC %q is not bound", pvcKey)
return false, pvc, nil
}
// arePodVolumesBound returns true if all volumes are fully bound
func (b *volumeBinder) arePodVolumesBound(pod *v1.Pod) bool {
for _, vol := range pod.Spec.Volumes {
if isBound, _, _ := b.isVolumeBound(pod.Namespace, &vol, true); !isBound {
if isBound, _, _ := b.isVolumeBound(pod.Namespace, &vol); !isBound {
// Pod has at least one PVC that needs binding
return false
}
@ -362,7 +486,7 @@ func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV
unboundClaims = []*bindingInfo{}
for _, vol := range pod.Spec.Volumes {
volumeBound, pvc, err := b.isVolumeBound(pod.Namespace, &vol, false)
volumeBound, pvc, err := b.isVolumeBound(pod.Namespace, &vol)
if err != nil {
return nil, nil, nil, err
}
@ -376,7 +500,8 @@ func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV
if err != nil {
return nil, nil, nil, err
}
if delayBinding {
// Prebound PVCs are treated as unbound immediate binding
if delayBinding && pvc.Spec.VolumeName == "" {
// Scheduler path
unboundClaims = append(unboundClaims, &bindingInfo{pvc: pvc})
} else {
@ -398,13 +523,13 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node
err = volumeutil.CheckNodeAffinity(pv, node.Labels)
if err != nil {
glog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, err.Error(), podName)
klog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, podName, err)
return false, nil
}
glog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName)
klog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName)
}
glog.V(4).Infof("All bound volumes for Pod %q match with Node %q", podName, node.Name)
klog.V(4).Infof("All bound volumes for Pod %q match with Node %q", podName, node.Name)
return true, nil
}
@ -428,6 +553,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI
storageClassName = *storageClass
}
allPVs := b.pvCache.ListPVs(storageClassName)
pvcName := getPVCName(bindingInfo.pvc)
// Find a matching PV
bindingInfo.pv, err = findMatchingVolume(bindingInfo.pvc, allPVs, node, chosenPVs, true)
@ -435,7 +561,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI
return false, nil, err
}
if bindingInfo.pv == nil {
glog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, getPVCName(bindingInfo.pvc), node.Name)
klog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, pvcName, node.Name)
unboundClaims = append(unboundClaims, bindingInfo.pvc)
foundMatches = false
continue
@ -444,7 +570,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI
// matching PV needs to be excluded so we don't select it again
chosenPVs[bindingInfo.pv.Name] = bindingInfo.pv
matchedClaims = append(matchedClaims, bindingInfo)
glog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", bindingInfo.pv.Name, getPVCName(bindingInfo.pvc), node.Name, podName)
klog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", bindingInfo.pv.Name, pvcName, node.Name, podName)
}
// Mark cache with all the matches for each PVC for this node
@ -453,7 +579,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI
}
if foundMatches {
glog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name)
klog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name)
}
return
@ -467,9 +593,10 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v
provisionedClaims := []*v1.PersistentVolumeClaim{}
for _, claim := range claimsToProvision {
pvcName := getPVCName(claim)
className := v1helper.GetPersistentVolumeClaimClass(claim)
if className == "" {
return false, fmt.Errorf("no class for claim %q", getPVCName(claim))
return false, fmt.Errorf("no class for claim %q", pvcName)
}
class, err := b.ctrl.classLister.Get(className)
@ -478,13 +605,13 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v
}
provisioner := class.Provisioner
if provisioner == "" || provisioner == notSupportedProvisioner {
glog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, getPVCName(claim))
klog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, pvcName)
return false, nil
}
// Check if the node can satisfy the topology requirement in the class
if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) {
glog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, getPVCName(claim))
klog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, pvcName)
return false, nil
}
@ -494,7 +621,7 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v
provisionedClaims = append(provisionedClaims, claim)
}
glog.V(4).Infof("Provisioning for claims of pod %q that has no matching volumes on node %q ...", podName, node.Name)
klog.V(4).Infof("Provisioning for claims of pod %q that has no matching volumes on node %q ...", podName, node.Name)
// Mark cache with all the PVCs that need provisioning for this node
b.podBindingCache.UpdateProvisionedPVCs(pod, node.Name, provisionedClaims)
@ -522,19 +649,6 @@ type bindingInfo struct {
pv *v1.PersistentVolume
}
// Used in unit test errors
func (b bindingInfo) String() string {
pvcName := ""
pvName := ""
if b.pvc != nil {
pvcName = getPVCName(b.pvc)
}
if b.pv != nil {
pvName = b.pv.Name
}
return fmt.Sprintf("[PVC %q, PV %q]", pvcName, pvName)
}
type byPVCSize []*bindingInfo
func (a byPVCSize) Len() int {

View File

@ -31,6 +31,8 @@ type PodBindingCache interface {
UpdateBindings(pod *v1.Pod, node string, bindings []*bindingInfo)
// GetBindings will return the cached bindings for the given pod and node.
// A nil return value means that the entry was not found. An empty slice
// means that no binding operations are needed.
GetBindings(pod *v1.Pod, node string) []*bindingInfo
// UpdateProvisionedPVCs will update the cache with the given provisioning decisions
@ -38,6 +40,8 @@ type PodBindingCache interface {
UpdateProvisionedPVCs(pod *v1.Pod, node string, provisionings []*v1.PersistentVolumeClaim)
// GetProvisionedPVCs will return the cached provisioning decisions for the given pod and node.
// A nil return value means that the entry was not found. An empty slice
// means that no provisioning operations are needed.
GetProvisionedPVCs(pod *v1.Pod, node string) []*v1.PersistentVolumeClaim
// DeleteBindings will remove all cached bindings and provisionings for the given pod.
@ -46,7 +50,8 @@ type PodBindingCache interface {
}
type podBindingCache struct {
mutex sync.Mutex
// synchronizes bindingDecisions
rwMutex sync.RWMutex
// Key = pod name
// Value = nodeDecisions
@ -68,16 +73,20 @@ func NewPodBindingCache() PodBindingCache {
}
func (c *podBindingCache) DeleteBindings(pod *v1.Pod) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
podName := getPodName(pod)
delete(c.bindingDecisions, podName)
if _, ok := c.bindingDecisions[podName]; ok {
delete(c.bindingDecisions, podName)
VolumeBindingRequestSchedulerBinderCache.WithLabelValues("delete").Inc()
}
}
func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*bindingInfo) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
podName := getPodName(pod)
decisions, ok := c.bindingDecisions[podName]
@ -90,6 +99,7 @@ func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*b
decision = nodeDecision{
bindings: bindings,
}
VolumeBindingRequestSchedulerBinderCache.WithLabelValues("add").Inc()
} else {
decision.bindings = bindings
}
@ -97,8 +107,8 @@ func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*b
}
func (c *podBindingCache) GetBindings(pod *v1.Pod, node string) []*bindingInfo {
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.RLock()
defer c.rwMutex.RUnlock()
podName := getPodName(pod)
decisions, ok := c.bindingDecisions[podName]
@ -113,8 +123,8 @@ func (c *podBindingCache) GetBindings(pod *v1.Pod, node string) []*bindingInfo {
}
func (c *podBindingCache) UpdateProvisionedPVCs(pod *v1.Pod, node string, pvcs []*v1.PersistentVolumeClaim) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
podName := getPodName(pod)
decisions, ok := c.bindingDecisions[podName]
@ -134,8 +144,8 @@ func (c *podBindingCache) UpdateProvisionedPVCs(pod *v1.Pod, node string, pvcs [
}
func (c *podBindingCache) GetProvisionedPVCs(pod *v1.Pod, node string) []*v1.PersistentVolumeClaim {
c.mutex.Lock()
defer c.mutex.Unlock()
c.rwMutex.RLock()
defer c.rwMutex.RUnlock()
podName := getPodName(pod)
decisions, ok := c.bindingDecisions[podName]

View File

@ -16,18 +16,15 @@ limitations under the License.
package persistentvolume
import (
"k8s.io/api/core/v1"
)
import "k8s.io/api/core/v1"
type FakeVolumeBinderConfig struct {
AllBound bool
FindUnboundSatsified bool
FindBoundSatsified bool
FindErr error
AssumeBindingRequired bool
AssumeErr error
BindErr error
AllBound bool
FindUnboundSatsified bool
FindBoundSatsified bool
FindErr error
AssumeErr error
BindErr error
}
// NewVolumeBinder sets up all the caches needed for the scheduler to make
@ -48,9 +45,9 @@ func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVo
return b.config.FindUnboundSatsified, b.config.FindBoundSatsified, b.config.FindErr
}
func (b *FakeVolumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (bool, bool, error) {
func (b *FakeVolumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (bool, error) {
b.AssumeCalled = true
return b.config.AllBound, b.config.AssumeBindingRequired, b.config.AssumeErr
return b.config.AllBound, b.config.AssumeErr
}
func (b *FakeVolumeBinder) BindPodVolumes(assumedPod *v1.Pod) error {

View File

@ -20,8 +20,9 @@ import (
"fmt"
"reflect"
"testing"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
@ -29,7 +30,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/diff"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
@ -38,20 +38,30 @@ import (
)
var (
unboundPVC = makeTestPVC("unbound-pvc", "1G", pvcUnbound, "", "1", &waitClass)
unboundPVC2 = makeTestPVC("unbound-pvc2", "5G", pvcUnbound, "", "1", &waitClass)
preboundPVC = makeTestPVC("prebound-pvc", "1G", pvcPrebound, "pv-node1a", "1", &waitClass)
boundPVC = makeTestPVC("bound-pvc", "1G", pvcBound, "pv-bound", "1", &waitClass)
boundPVC2 = makeTestPVC("bound-pvc2", "1G", pvcBound, "pv-bound2", "1", &waitClass)
badPVC = makeBadPVC()
immediateUnboundPVC = makeTestPVC("immediate-unbound-pvc", "1G", pvcUnbound, "", "1", &immediateClass)
immediateBoundPVC = makeTestPVC("immediate-bound-pvc", "1G", pvcBound, "pv-bound-immediate", "1", &immediateClass)
provisionedPVC = makeTestPVC("provisioned-pvc", "1Gi", pvcUnbound, "", "1", &waitClass)
provisionedPVC2 = makeTestPVC("provisioned-pvc2", "1Gi", pvcUnbound, "", "1", &waitClass)
provisionedPVCHigherVersion = makeTestPVC("provisioned-pvc2", "1Gi", pvcUnbound, "", "2", &waitClass)
noProvisionerPVC = makeTestPVC("no-provisioner-pvc", "1Gi", pvcUnbound, "", "1", &provisionNotSupportClass)
topoMismatchPVC = makeTestPVC("topo-mismatch-pvc", "1Gi", pvcUnbound, "", "1", &topoMismatchClass)
// PVCs for manual binding
// TODO: clean up all of these
unboundPVC = makeTestPVC("unbound-pvc", "1G", "", pvcUnbound, "", "1", &waitClass)
unboundPVC2 = makeTestPVC("unbound-pvc2", "5G", "", pvcUnbound, "", "1", &waitClass)
preboundPVC = makeTestPVC("prebound-pvc", "1G", "", pvcPrebound, "pv-node1a", "1", &waitClass)
preboundPVCNode1a = makeTestPVC("unbound-pvc", "1G", "", pvcPrebound, "pv-node1a", "1", &waitClass)
boundPVC = makeTestPVC("bound-pvc", "1G", "", pvcBound, "pv-bound", "1", &waitClass)
boundPVC2 = makeTestPVC("bound-pvc2", "1G", "", pvcBound, "pv-bound2", "1", &waitClass)
boundPVCNode1a = makeTestPVC("unbound-pvc", "1G", "", pvcBound, "pv-node1a", "1", &waitClass)
badPVC = makeBadPVC()
immediateUnboundPVC = makeTestPVC("immediate-unbound-pvc", "1G", "", pvcUnbound, "", "1", &immediateClass)
immediateBoundPVC = makeTestPVC("immediate-bound-pvc", "1G", "", pvcBound, "pv-bound-immediate", "1", &immediateClass)
// PVCs for dynamic provisioning
provisionedPVC = makeTestPVC("provisioned-pvc", "1Gi", "", pvcUnbound, "", "1", &waitClassWithProvisioner)
provisionedPVC2 = makeTestPVC("provisioned-pvc2", "1Gi", "", pvcUnbound, "", "1", &waitClassWithProvisioner)
provisionedPVCHigherVersion = makeTestPVC("provisioned-pvc2", "1Gi", "", pvcUnbound, "", "2", &waitClassWithProvisioner)
provisionedPVCBound = makeTestPVC("provisioned-pvc", "1Gi", "", pvcBound, "some-pv", "1", &waitClassWithProvisioner)
noProvisionerPVC = makeTestPVC("no-provisioner-pvc", "1Gi", "", pvcUnbound, "", "1", &waitClass)
topoMismatchPVC = makeTestPVC("topo-mismatch-pvc", "1Gi", "", pvcUnbound, "", "1", &topoMismatchClass)
selectedNodePVC = makeTestPVC("provisioned-pvc", "1Gi", nodeLabelValue, pvcSelectedNode, "", "1", &waitClassWithProvisioner)
// PVs for manual binding
pvNoNode = makeTestPV("pv-no-node", "", "1G", "1", nil, waitClass)
pvNode1a = makeTestPV("pv-node1a", "node1", "5G", "1", nil, waitClass)
pvNode1b = makeTestPV("pv-node1b", "node1", "10G", "1", nil, waitClass)
@ -59,12 +69,13 @@ var (
pvNode2 = makeTestPV("pv-node2", "node2", "1G", "1", nil, waitClass)
pvPrebound = makeTestPV("pv-prebound", "node1", "1G", "1", unboundPVC, waitClass)
pvBound = makeTestPV("pv-bound", "node1", "1G", "1", boundPVC, waitClass)
pvNode1aBound = makeTestPV("pv-node1a", "node1", "1G", "1", unboundPVC, waitClass)
pvNode1bBound = makeTestPV("pv-node1b", "node1", "5G", "1", unboundPVC2, waitClass)
pvNode1bBoundHigherVersion = makeTestPV("pv-node1b", "node1", "5G", "2", unboundPVC2, waitClass)
pvNode1aBound = makeTestPV("pv-node1a", "node1", "5G", "1", unboundPVC, waitClass)
pvNode1bBound = makeTestPV("pv-node1b", "node1", "10G", "1", unboundPVC2, waitClass)
pvNode1bBoundHigherVersion = makeTestPV("pv-node1b", "node1", "10G", "2", unboundPVC2, waitClass)
pvBoundImmediate = makeTestPV("pv-bound-immediate", "node1", "1G", "1", immediateBoundPVC, immediateClass)
pvBoundImmediateNode2 = makeTestPV("pv-bound-immediate", "node2", "1G", "1", immediateBoundPVC, immediateClass)
// PVC/PV bindings for manual binding
binding1a = makeBinding(unboundPVC, pvNode1a)
binding1b = makeBinding(unboundPVC2, pvNode1b)
bindingNoNode = makeBinding(unboundPVC, pvNoNode)
@ -72,11 +83,13 @@ var (
binding1aBound = makeBinding(unboundPVC, pvNode1aBound)
binding1bBound = makeBinding(unboundPVC2, pvNode1bBound)
// storage class names
waitClass = "waitClass"
immediateClass = "immediateClass"
provisionNotSupportClass = "provisionNotSupportedClass"
waitClassWithProvisioner = "waitClassWithProvisioner"
topoMismatchClass = "topoMismatchClass"
// node topology
nodeLabelKey = "nodeKey"
nodeLabelValue = "node1"
)
@ -102,7 +115,8 @@ func newTestBinder(t *testing.T) *testEnv {
client,
pvcInformer,
informerFactory.Core().V1().PersistentVolumes(),
classInformer)
classInformer,
10*time.Second)
// Add storageclasses
waitMode := storagev1.VolumeBindingWaitForFirstConsumer
@ -110,7 +124,7 @@ func newTestBinder(t *testing.T) *testEnv {
classes := []*storagev1.StorageClass{
{
ObjectMeta: metav1.ObjectMeta{
Name: waitClass,
Name: waitClassWithProvisioner,
},
VolumeBindingMode: &waitMode,
Provisioner: "test-provisioner",
@ -133,7 +147,7 @@ func newTestBinder(t *testing.T) *testEnv {
},
{
ObjectMeta: metav1.ObjectMeta{
Name: provisionNotSupportClass,
Name: waitClass,
},
VolumeBindingMode: &waitMode,
Provisioner: "kubernetes.io/no-provisioner",
@ -247,17 +261,44 @@ func (env *testEnv) initPodCache(pod *v1.Pod, node string, bindings []*bindingIn
func (env *testEnv) validatePodCache(t *testing.T, name, node string, pod *v1.Pod, expectedBindings []*bindingInfo, expectedProvisionings []*v1.PersistentVolumeClaim) {
cache := env.internalBinder.podBindingCache
bindings := cache.GetBindings(pod, node)
if aLen, eLen := len(bindings), len(expectedBindings); aLen != eLen {
t.Errorf("Test %q failed. expected %v bindings, got %v", name, eLen, aLen)
} else if expectedBindings == nil && bindings != nil {
// nil and empty are different
t.Errorf("Test %q failed. expected nil bindings, got empty", name)
} else if expectedBindings != nil && bindings == nil {
// nil and empty are different
t.Errorf("Test %q failed. expected empty bindings, got nil", name)
} else {
for i := 0; i < aLen; i++ {
// Validate PV
if !reflect.DeepEqual(expectedBindings[i].pv, bindings[i].pv) {
t.Errorf("Test %q failed. binding.pv doesn't match [A-expected, B-got]: %s", name, diff.ObjectDiff(expectedBindings[i].pv, bindings[i].pv))
}
if !reflect.DeepEqual(expectedBindings, bindings) {
t.Errorf("Test %q failed: Expected bindings %+v, got %+v", name, expectedBindings, bindings)
// Validate PVC
if !reflect.DeepEqual(expectedBindings[i].pvc, bindings[i].pvc) {
t.Errorf("Test %q failed. binding.pvc doesn't match [A-expected, B-got]: %s", name, diff.ObjectDiff(expectedBindings[i].pvc, bindings[i].pvc))
}
}
}
provisionedClaims := cache.GetProvisionedPVCs(pod, node)
if !reflect.DeepEqual(expectedProvisionings, provisionedClaims) {
t.Errorf("Test %q failed: Expected provisionings %+v, got %+v", name, expectedProvisionings, provisionedClaims)
if aLen, eLen := len(provisionedClaims), len(expectedProvisionings); aLen != eLen {
t.Errorf("Test %q failed. expected %v provisioned claims, got %v", name, eLen, aLen)
} else if expectedProvisionings == nil && provisionedClaims != nil {
// nil and empty are different
t.Errorf("Test %q failed. expected nil provisionings, got empty", name)
} else if expectedProvisionings != nil && provisionedClaims == nil {
// nil and empty are different
t.Errorf("Test %q failed. expected empty provisionings, got nil", name)
} else {
for i := 0; i < aLen; i++ {
if !reflect.DeepEqual(expectedProvisionings[i], provisionedClaims[i]) {
t.Errorf("Test %q failed. provisioned claims doesn't match [A-expected, B-got]: %s", name, diff.ObjectDiff(expectedProvisionings[i], provisionedClaims[i]))
}
}
}
}
func (env *testEnv) getPodBindings(t *testing.T, name, node string, pod *v1.Pod) []*bindingInfo {
@ -266,8 +307,6 @@ func (env *testEnv) getPodBindings(t *testing.T, name, node string, pod *v1.Pod)
}
func (env *testEnv) validateAssume(t *testing.T, name string, pod *v1.Pod, bindings []*bindingInfo, provisionings []*v1.PersistentVolumeClaim) {
// TODO: Check binding cache
// Check pv cache
pvCache := env.internalBinder.pvCache
for _, b := range bindings {
@ -383,17 +422,22 @@ const (
pvcUnbound = iota
pvcPrebound
pvcBound
pvcSelectedNode
)
func makeTestPVC(name, size string, pvcBoundState int, pvName, resourceVersion string, className *string) *v1.PersistentVolumeClaim {
func makeTestPVC(name, size, node string, pvcBoundState int, pvName, resourceVersion string, className *string) *v1.PersistentVolumeClaim {
fs := v1.PersistentVolumeFilesystem
pvc := &v1.PersistentVolumeClaim{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "testns",
UID: types.UID("pvc-uid"),
ResourceVersion: resourceVersion,
SelfLink: testapi.Default.SelfLink("pvc", name),
Annotations: map[string]string{},
},
Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.ResourceRequirements{
@ -402,10 +446,14 @@ func makeTestPVC(name, size string, pvcBoundState int, pvName, resourceVersion s
},
},
StorageClassName: className,
VolumeMode: &fs,
},
}
switch pvcBoundState {
case pvcSelectedNode:
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, annSelectedNode, node)
// don't fallthrough
case pvcBound:
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, annBindCompleted, "yes")
fallthrough
@ -416,6 +464,7 @@ func makeTestPVC(name, size string, pvcBoundState int, pvName, resourceVersion s
}
func makeBadPVC() *v1.PersistentVolumeClaim {
fs := v1.PersistentVolumeFilesystem
return &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "bad-pvc",
@ -431,11 +480,13 @@ func makeBadPVC() *v1.PersistentVolumeClaim {
},
},
StorageClassName: &waitClass,
VolumeMode: &fs,
},
}
}
func makeTestPV(name, node, capacity, version string, boundToPVC *v1.PersistentVolumeClaim, className string) *v1.PersistentVolume {
fs := v1.PersistentVolumeFilesystem
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: name,
@ -446,6 +497,10 @@ func makeTestPV(name, node, capacity, version string, boundToPVC *v1.PersistentV
v1.ResourceName(v1.ResourceStorage): resource.MustParse(capacity),
},
StorageClassName: className,
VolumeMode: &fs,
},
Status: v1.PersistentVolumeStatus{
Phase: v1.VolumeAvailable,
},
}
if node != "" {
@ -454,9 +509,12 @@ func makeTestPV(name, node, capacity, version string, boundToPVC *v1.PersistentV
if boundToPVC != nil {
pv.Spec.ClaimRef = &v1.ObjectReference{
Name: boundToPVC.Name,
Namespace: boundToPVC.Namespace,
UID: boundToPVC.UID,
Kind: boundToPVC.Kind,
APIVersion: boundToPVC.APIVersion,
ResourceVersion: boundToPVC.ResourceVersion,
Name: boundToPVC.Name,
Namespace: boundToPVC.Namespace,
UID: boundToPVC.UID,
}
metav1.SetMetaDataAnnotation(&pv.ObjectMeta, annBoundByController, "yes")
}
@ -464,6 +522,24 @@ func makeTestPV(name, node, capacity, version string, boundToPVC *v1.PersistentV
return pv
}
func pvcSetSelectedNode(pvc *v1.PersistentVolumeClaim, node string) *v1.PersistentVolumeClaim {
newPVC := pvc.DeepCopy()
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, annSelectedNode, node)
return newPVC
}
func pvcSetEmptyAnnotations(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
newPVC := pvc.DeepCopy()
newPVC.Annotations = map[string]string{}
return newPVC
}
func pvRemoveClaimUID(pv *v1.PersistentVolume) *v1.PersistentVolume {
newPV := pv.DeepCopy()
newPV.Spec.ClaimRef.UID = ""
return newPV
}
func makePod(pvcs []*v1.PersistentVolumeClaim) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -515,7 +591,7 @@ func makeBinding(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) *bindin
func addProvisionAnn(pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
res := pvc.DeepCopy()
// Add provision related annotations
res.Annotations[annSelectedNode] = nodeLabelValue
metav1.SetMetaDataAnnotation(&res.ObjectMeta, annSelectedNode, nodeLabelValue)
return res
}
@ -568,10 +644,9 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
shouldFail: true,
},
"prebound-pvc": {
podPVCs: []*v1.PersistentVolumeClaim{preboundPVC},
pvs: []*v1.PersistentVolume{pvNode1aBound},
expectedUnbound: true,
expectedBound: true,
podPVCs: []*v1.PersistentVolumeClaim{preboundPVC},
pvs: []*v1.PersistentVolume{pvNode1aBound},
shouldFail: true,
},
"unbound-pvc,pv-same-node": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
@ -621,11 +696,9 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
expectedBound: true,
},
"one-prebound,one-unbound": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, preboundPVC},
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
expectedBindings: []*bindingInfo{binding1a},
expectedUnbound: true,
expectedBound: true,
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, preboundPVC},
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
shouldFail: true,
},
"immediate-bound-pvc": {
podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC},
@ -660,10 +733,6 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
},
}
// Set feature gate
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false")
testNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
@ -674,7 +743,7 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
}
for name, scenario := range scenarios {
glog.V(5).Infof("Running test case %q", name)
klog.V(5).Infof("Running test case %q", name)
// Setup
testEnv := newTestBinder(t)
@ -776,10 +845,6 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
},
}
// Set VolumeScheduling and DynamicProvisioningScheduling feature gate
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true,DynamicProvisioningScheduling=true")
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false,DynamicProvisioningScheduling=false")
testNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
@ -834,74 +899,69 @@ func TestAssumePodVolumes(t *testing.T) {
provisionedPVCs []*v1.PersistentVolumeClaim
// Expected return values
shouldFail bool
expectedBindingRequired bool
expectedAllBound bool
shouldFail bool
expectedAllBound bool
// if nil, use bindings
expectedBindings []*bindingInfo
expectedBindings []*bindingInfo
expectedProvisionings []*v1.PersistentVolumeClaim
}{
"all-bound": {
podPVCs: []*v1.PersistentVolumeClaim{boundPVC},
pvs: []*v1.PersistentVolume{pvBound},
expectedAllBound: true,
},
"prebound-pvc": {
podPVCs: []*v1.PersistentVolumeClaim{preboundPVC},
pvs: []*v1.PersistentVolume{pvNode1a},
},
"one-binding": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
bindings: []*bindingInfo{binding1a},
pvs: []*v1.PersistentVolume{pvNode1a},
expectedBindingRequired: true,
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
bindings: []*bindingInfo{binding1a},
pvs: []*v1.PersistentVolume{pvNode1a},
expectedBindings: []*bindingInfo{binding1aBound},
expectedProvisionings: []*v1.PersistentVolumeClaim{},
},
"two-bindings": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
bindings: []*bindingInfo{binding1a, binding1b},
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
expectedBindingRequired: true,
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
bindings: []*bindingInfo{binding1a, binding1b},
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
expectedBindings: []*bindingInfo{binding1aBound, binding1bBound},
expectedProvisionings: []*v1.PersistentVolumeClaim{},
},
"pv-already-bound": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
bindings: []*bindingInfo{binding1aBound},
pvs: []*v1.PersistentVolume{pvNode1aBound},
expectedBindingRequired: false,
expectedBindings: []*bindingInfo{},
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
bindings: []*bindingInfo{binding1aBound},
pvs: []*v1.PersistentVolume{pvNode1aBound},
expectedBindings: []*bindingInfo{binding1aBound},
expectedProvisionings: []*v1.PersistentVolumeClaim{},
},
"claimref-failed": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
bindings: []*bindingInfo{binding1a, bindingBad},
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
shouldFail: true,
expectedBindingRequired: true,
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
bindings: []*bindingInfo{binding1a, bindingBad},
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
shouldFail: true,
},
"tmpupdate-failed": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
bindings: []*bindingInfo{binding1a, binding1b},
pvs: []*v1.PersistentVolume{pvNode1a},
shouldFail: true,
expectedBindingRequired: true,
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
bindings: []*bindingInfo{binding1a, binding1b},
pvs: []*v1.PersistentVolume{pvNode1a},
shouldFail: true,
},
"one-binding, one-pvc-provisioned": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC},
bindings: []*bindingInfo{binding1a},
pvs: []*v1.PersistentVolume{pvNode1a},
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
expectedBindingRequired: true,
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC},
bindings: []*bindingInfo{binding1a},
pvs: []*v1.PersistentVolume{pvNode1a},
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
expectedBindings: []*bindingInfo{binding1aBound},
expectedProvisionings: []*v1.PersistentVolumeClaim{selectedNodePVC},
},
"one-binding, one-provision-tmpupdate-failed": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVCHigherVersion},
bindings: []*bindingInfo{binding1a},
pvs: []*v1.PersistentVolume{pvNode1a},
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC2},
shouldFail: true,
expectedBindingRequired: true,
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVCHigherVersion},
bindings: []*bindingInfo{binding1a},
pvs: []*v1.PersistentVolume{pvNode1a},
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC2},
shouldFail: true,
},
}
for name, scenario := range scenarios {
glog.V(5).Infof("Running test case %q", name)
klog.V(5).Infof("Running test case %q", name)
// Setup
testEnv := newTestBinder(t)
@ -911,7 +971,7 @@ func TestAssumePodVolumes(t *testing.T) {
testEnv.initVolumes(scenario.pvs, scenario.pvs)
// Execute
allBound, bindingRequired, err := testEnv.binder.AssumePodVolumes(pod, "node1")
allBound, err := testEnv.binder.AssumePodVolumes(pod, "node1")
// Validate
if !scenario.shouldFail && err != nil {
@ -920,24 +980,25 @@ func TestAssumePodVolumes(t *testing.T) {
if scenario.shouldFail && err == nil {
t.Errorf("Test %q failed: returned success but expected error", name)
}
if scenario.expectedBindingRequired != bindingRequired {
t.Errorf("Test %q failed: returned unexpected bindingRequired: %v", name, bindingRequired)
}
if scenario.expectedAllBound != allBound {
t.Errorf("Test %q failed: returned unexpected allBound: %v", name, allBound)
}
if scenario.expectedBindings == nil {
scenario.expectedBindings = scenario.bindings
}
if scenario.shouldFail {
testEnv.validateFailedAssume(t, name, pod, scenario.expectedBindings, scenario.provisionedPVCs)
} else {
testEnv.validateAssume(t, name, pod, scenario.expectedBindings, scenario.provisionedPVCs)
if scenario.expectedProvisionings == nil {
scenario.expectedProvisionings = scenario.provisionedPVCs
}
if scenario.shouldFail {
testEnv.validateFailedAssume(t, name, pod, scenario.expectedBindings, scenario.expectedProvisionings)
} else {
testEnv.validateAssume(t, name, pod, scenario.expectedBindings, scenario.expectedProvisionings)
}
testEnv.validatePodCache(t, name, pod.Spec.NodeName, pod, scenario.expectedBindings, scenario.expectedProvisionings)
}
}
func TestBindPodVolumes(t *testing.T) {
func TestBindAPIUpdate(t *testing.T) {
scenarios := map[string]struct {
// Inputs
bindings []*bindingInfo
@ -960,34 +1021,56 @@ func TestBindPodVolumes(t *testing.T) {
// if nil, use expectedPVCs
expectedAPIPVCs []*v1.PersistentVolumeClaim
}{
"all-bound": {},
"not-fully-bound": {
bindings: []*bindingInfo{},
"nothing-to-bind-nil": {
shouldFail: true,
},
"nothing-to-bind-bindings-nil": {
provisionedPVCs: []*v1.PersistentVolumeClaim{},
shouldFail: true,
},
"nothing-to-bind-provisionings-nil": {
bindings: []*bindingInfo{},
shouldFail: true,
},
"nothing-to-bind-empty": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
},
"one-binding": {
bindings: []*bindingInfo{binding1aBound},
cachedPVs: []*v1.PersistentVolume{pvNode1a},
expectedPVs: []*v1.PersistentVolume{pvNode1aBound},
bindings: []*bindingInfo{binding1aBound},
cachedPVs: []*v1.PersistentVolume{pvNode1a},
expectedPVs: []*v1.PersistentVolume{pvNode1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
},
"two-bindings": {
bindings: []*bindingInfo{binding1aBound, binding1bBound},
cachedPVs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
expectedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBound},
bindings: []*bindingInfo{binding1aBound, binding1bBound},
cachedPVs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
expectedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
},
"api-already-updated": {
bindings: []*bindingInfo{binding1aBound},
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
expectedPVs: []*v1.PersistentVolume{pvNode1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
},
"api-update-failed": {
bindings: []*bindingInfo{binding1aBound, binding1bBound},
cachedPVs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
apiPVs: []*v1.PersistentVolume{pvNode1a, pvNode1bBoundHigherVersion},
expectedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1b},
expectedAPIPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBoundHigherVersion},
shouldFail: true,
bindings: []*bindingInfo{binding1aBound, binding1bBound},
cachedPVs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
apiPVs: []*v1.PersistentVolume{pvNode1a, pvNode1bBoundHigherVersion},
expectedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1b},
expectedAPIPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBoundHigherVersion},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
shouldFail: true,
},
"one-provisioned-pvc": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
expectedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
},
"provision-api-update-failed": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), addProvisionAnn(provisionedPVC2)},
cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC, provisionedPVC2},
apiPVCs: []*v1.PersistentVolumeClaim{provisionedPVC, provisionedPVCHigherVersion},
@ -995,7 +1078,7 @@ func TestBindPodVolumes(t *testing.T) {
expectedAPIPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC), provisionedPVCHigherVersion},
shouldFail: true,
},
"bingding-succeed, provision-api-update-failed": {
"binding-succeed, provision-api-update-failed": {
bindings: []*bindingInfo{binding1aBound},
cachedPVs: []*v1.PersistentVolume{pvNode1a},
expectedPVs: []*v1.PersistentVolume{pvNode1aBound},
@ -1008,7 +1091,7 @@ func TestBindPodVolumes(t *testing.T) {
},
}
for name, scenario := range scenarios {
glog.V(5).Infof("Running test case %q", name)
klog.V(4).Infof("Running test case %q", name)
// Setup
testEnv := newTestBinder(t)
@ -1024,7 +1107,7 @@ func TestBindPodVolumes(t *testing.T) {
testEnv.assumeVolumes(t, name, "node1", pod, scenario.bindings, scenario.provisionedPVCs)
// Execute
err := testEnv.binder.BindPodVolumes(pod)
err := testEnv.internalBinder.bindAPIUpdate(pod.Name, scenario.bindings, scenario.provisionedPVCs)
// Validate
if !scenario.shouldFail && err != nil {
@ -1044,11 +1127,302 @@ func TestBindPodVolumes(t *testing.T) {
}
}
func TestFindAssumeVolumes(t *testing.T) {
// Set feature gate
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false")
func TestCheckBindings(t *testing.T) {
scenarios := map[string]struct {
// Inputs
bindings []*bindingInfo
cachedPVs []*v1.PersistentVolume
provisionedPVCs []*v1.PersistentVolumeClaim
cachedPVCs []*v1.PersistentVolumeClaim
// Expected return values
shouldFail bool
expectedBound bool
}{
"nothing-to-bind-nil": {
shouldFail: true,
},
"nothing-to-bind-bindings-nil": {
provisionedPVCs: []*v1.PersistentVolumeClaim{},
shouldFail: true,
},
"nothing-to-bind-provisionings-nil": {
bindings: []*bindingInfo{},
shouldFail: true,
},
"nothing-to-bind": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
expectedBound: true,
},
"binding-bound": {
bindings: []*bindingInfo{binding1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a},
expectedBound: true,
},
"binding-prebound": {
bindings: []*bindingInfo{binding1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
cachedPVCs: []*v1.PersistentVolumeClaim{preboundPVCNode1a},
},
"binding-unbound": {
bindings: []*bindingInfo{binding1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
cachedPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
},
"binding-pvc-not-exists": {
bindings: []*bindingInfo{binding1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
shouldFail: true,
},
"binding-pv-not-exists": {
bindings: []*bindingInfo{binding1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a},
shouldFail: true,
},
"binding-claimref-nil": {
bindings: []*bindingInfo{binding1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
cachedPVs: []*v1.PersistentVolume{pvNode1a},
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a},
shouldFail: true,
},
"binding-claimref-uid-empty": {
bindings: []*bindingInfo{binding1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
cachedPVs: []*v1.PersistentVolume{pvRemoveClaimUID(pvNode1aBound)},
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a},
shouldFail: true,
},
"binding-one-bound,one-unbound": {
bindings: []*bindingInfo{binding1aBound, binding1bBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{},
cachedPVs: []*v1.PersistentVolume{pvNode1aBound, pvNode1bBound},
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a, unboundPVC2},
},
"provisioning-pvc-bound": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
cachedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVCBound)},
expectedBound: true,
},
"provisioning-pvc-unbound": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
cachedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
},
"provisioning-pvc-not-exists": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
shouldFail: true,
},
"provisioning-pvc-annotations-nil": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
cachedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
shouldFail: true,
},
"provisioning-pvc-selected-node-dropped": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
cachedPVCs: []*v1.PersistentVolumeClaim{pvcSetEmptyAnnotations(provisionedPVC)},
shouldFail: true,
},
"provisioning-pvc-selected-node-wrong-node": {
bindings: []*bindingInfo{},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
cachedPVCs: []*v1.PersistentVolumeClaim{pvcSetSelectedNode(provisionedPVC, "wrong-node")},
shouldFail: true,
},
"binding-bound-provisioning-unbound": {
bindings: []*bindingInfo{binding1aBound},
provisionedPVCs: []*v1.PersistentVolumeClaim{addProvisionAnn(provisionedPVC)},
cachedPVs: []*v1.PersistentVolume{pvNode1aBound},
cachedPVCs: []*v1.PersistentVolumeClaim{boundPVCNode1a, addProvisionAnn(provisionedPVC)},
},
}
for name, scenario := range scenarios {
klog.V(4).Infof("Running test case %q", name)
// Setup
pod := makePod(nil)
testEnv := newTestBinder(t)
testEnv.initVolumes(scenario.cachedPVs, nil)
testEnv.initClaims(scenario.cachedPVCs, nil)
// Execute
allBound, err := testEnv.internalBinder.checkBindings(pod, scenario.bindings, scenario.provisionedPVCs)
// Validate
if !scenario.shouldFail && err != nil {
t.Errorf("Test %q failed: returned error: %v", name, err)
}
if scenario.shouldFail && err == nil {
t.Errorf("Test %q failed: returned success but expected error", name)
}
if scenario.expectedBound != allBound {
t.Errorf("Test %q failed: returned bound %v", name, allBound)
}
}
}
func TestBindPodVolumes(t *testing.T) {
scenarios := map[string]struct {
// Inputs
// These tests only support a single pv and pvc and static binding
bindingsNil bool // Pass in nil bindings slice
binding *bindingInfo
cachedPV *v1.PersistentVolume
cachedPVC *v1.PersistentVolumeClaim
apiPV *v1.PersistentVolume
// This function runs with a delay of 5 seconds
delayFunc func(*testing.T, *testEnv, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim)
// Expected return values
shouldFail bool
}{
"nothing-to-bind-nil": {
bindingsNil: true,
shouldFail: true,
},
"nothing-to-bind-empty": {},
"already-bound": {
binding: binding1aBound,
cachedPV: pvNode1aBound,
cachedPVC: boundPVCNode1a,
},
"binding-succeeds-after-time": {
binding: binding1aBound,
cachedPV: pvNode1a,
cachedPVC: unboundPVC,
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
// Update PVC to be fully bound to PV
newPVC := pvc.DeepCopy()
newPVC.ResourceVersion = "100"
newPVC.Spec.VolumeName = pv.Name
metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, annBindCompleted, "yes")
// Update pvc cache, fake client doesn't invoke informers
internalBinder, ok := testEnv.binder.(*volumeBinder)
if !ok {
t.Fatalf("Failed to convert to internal binder")
}
pvcCache := internalBinder.pvcCache
internalPVCCache, ok := pvcCache.(*pvcAssumeCache)
if !ok {
t.Fatalf("Failed to convert to internal PVC cache")
}
internalPVCCache.add(newPVC)
},
},
"pod-deleted-after-time": {
binding: binding1aBound,
cachedPV: pvNode1a,
cachedPVC: unboundPVC,
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
bindingsCache := testEnv.binder.GetBindingsCache()
if bindingsCache == nil {
t.Fatalf("Failed to get bindings cache")
}
// Delete the pod from the cache
bindingsCache.DeleteBindings(pod)
// Check that it's deleted
bindings := bindingsCache.GetBindings(pod, "node1")
if bindings != nil {
t.Fatalf("Failed to delete bindings")
}
},
shouldFail: true,
},
"binding-times-out": {
binding: binding1aBound,
cachedPV: pvNode1a,
cachedPVC: unboundPVC,
shouldFail: true,
},
"binding-fails": {
binding: binding1bBound,
cachedPV: pvNode1b,
apiPV: pvNode1bBoundHigherVersion,
cachedPVC: unboundPVC2,
shouldFail: true,
},
"check-fails": {
binding: binding1aBound,
cachedPV: pvNode1a,
cachedPVC: unboundPVC,
delayFunc: func(t *testing.T, testEnv *testEnv, pod *v1.Pod, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
// Delete PVC
// Update pvc cache, fake client doesn't invoke informers
internalBinder, ok := testEnv.binder.(*volumeBinder)
if !ok {
t.Fatalf("Failed to convert to internal binder")
}
pvcCache := internalBinder.pvcCache
internalPVCCache, ok := pvcCache.(*pvcAssumeCache)
if !ok {
t.Fatalf("Failed to convert to internal PVC cache")
}
internalPVCCache.delete(pvc)
},
shouldFail: true,
},
}
for name, scenario := range scenarios {
klog.V(4).Infof("Running test case %q", name)
// Setup
pod := makePod(nil)
if scenario.apiPV == nil {
scenario.apiPV = scenario.cachedPV
}
testEnv := newTestBinder(t)
if !scenario.bindingsNil {
if scenario.binding != nil {
testEnv.initVolumes([]*v1.PersistentVolume{scenario.cachedPV}, []*v1.PersistentVolume{scenario.apiPV})
testEnv.initClaims([]*v1.PersistentVolumeClaim{scenario.cachedPVC}, nil)
testEnv.assumeVolumes(t, name, "node1", pod, []*bindingInfo{scenario.binding}, []*v1.PersistentVolumeClaim{})
} else {
testEnv.assumeVolumes(t, name, "node1", pod, []*bindingInfo{}, []*v1.PersistentVolumeClaim{})
}
}
if scenario.delayFunc != nil {
go func() {
time.Sleep(5 * time.Second)
klog.V(5).Infof("Running delay function")
scenario.delayFunc(t, testEnv, pod, scenario.binding.pv, scenario.binding.pvc)
}()
}
// Execute
err := testEnv.binder.BindPodVolumes(pod)
// Validate
if !scenario.shouldFail && err != nil {
t.Errorf("Test %q failed: returned error: %v", name, err)
}
if scenario.shouldFail && err == nil {
t.Errorf("Test %q failed: returned success but expected error", name)
}
}
}
func TestFindAssumeVolumes(t *testing.T) {
// Test case
podPVCs := []*v1.PersistentVolumeClaim{unboundPVC}
pvs := []*v1.PersistentVolume{pvNode2, pvNode1a, pvNode1c}
@ -1080,17 +1454,15 @@ func TestFindAssumeVolumes(t *testing.T) {
expectedBindings := testEnv.getPodBindings(t, "before-assume", testNode.Name, pod)
// 2. Assume matches
allBound, bindingRequired, err := testEnv.binder.AssumePodVolumes(pod, testNode.Name)
allBound, err := testEnv.binder.AssumePodVolumes(pod, testNode.Name)
if err != nil {
t.Errorf("Test failed: AssumePodVolumes returned error: %v", err)
}
if allBound {
t.Errorf("Test failed: detected unbound volumes as bound")
}
if !bindingRequired {
t.Errorf("Test failed: binding not required")
}
testEnv.validateAssume(t, "assume", pod, expectedBindings, nil)
// After assume, claimref should be set on pv
expectedBindings = testEnv.getPodBindings(t, "after-assume", testNode.Name, pod)
@ -1106,6 +1478,6 @@ func TestFindAssumeVolumes(t *testing.T) {
if !unboundSatisfied {
t.Errorf("Test failed: couldn't find PVs for all PVCs")
}
testEnv.validatePodCache(t, "after-assume", testNode.Name, pod, expectedBindings, nil)
testEnv.validatePodCache(t, "after-assume", testNode.Name, pod, expectedBindings, []*v1.PersistentVolumeClaim{})
}
}

View File

@ -25,8 +25,9 @@ import (
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/util/io"
cloudprovider "k8s.io/cloud-provider"
csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
vol "k8s.io/kubernetes/pkg/volume"
)
@ -79,10 +80,6 @@ func (ctrl *PersistentVolumeController) GetMounter(pluginName string) mount.Inte
return nil
}
func (ctrl *PersistentVolumeController) GetWriter() io.Writer {
return nil
}
func (ctrl *PersistentVolumeController) GetHostName() string {
return ""
}
@ -113,6 +110,12 @@ func (ctrl *PersistentVolumeController) GetServiceAccountTokenFunc() func(_, _ s
}
}
func (ctrl *PersistentVolumeController) DeleteServiceAccountTokenFunc() func(types.UID) {
return func(types.UID) {
klog.Errorf("DeleteServiceAccountToken unsupported in PersistentVolumeController")
}
}
func (adc *PersistentVolumeController) GetExec(pluginName string) mount.Exec {
return mount.NewOsExec()
}
@ -128,3 +131,8 @@ func (ctrl *PersistentVolumeController) GetNodeName() types.NodeName {
func (ctrl *PersistentVolumeController) GetEventRecorder() record.EventRecorder {
return ctrl.eventRecorder
}
func (ctrl *PersistentVolumeController) GetCSIClient() csiclientset.Interface {
// No volume plugin needs csi.storage.k8s.io client in PV controller.
return nil
}