vendor cleanup: remove unused,non-go and test files

This commit is contained in:
Madhu Rajanna
2019-01-16 00:05:52 +05:30
parent 52cf4aa902
commit b10ba188e7
15421 changed files with 17 additions and 4208853 deletions

View File

@ -1,126 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"index.go",
"pv_controller.go",
"pv_controller_base.go",
"scheduler_assume_cache.go",
"scheduler_binder.go",
"scheduler_binder_cache.go",
"scheduler_binder_fake.go",
"volume_host.go",
],
importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume",
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/volume/events:go_default_library",
"//pkg/controller/volume/persistentvolume/metrics:go_default_library",
"//pkg/features:go_default_library",
"//pkg/util/goroutinemap:go_default_library",
"//pkg/util/goroutinemap/exponentialbackoff:go_default_library",
"//pkg/util/io:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/recyclerclient:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/authentication/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/informers/storage/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/storage/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"binder_test.go",
"delete_test.go",
"framework_test.go",
"index_test.go",
"provision_test.go",
"pv_controller_test.go",
"recycle_test.go",
"scheduler_assume_cache_test.go",
"scheduler_binder_cache_test.go",
"scheduler_binder_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/api/testapi:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/recyclerclient:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/storage/v1:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/tools/reference:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/controller/volume/persistentvolume/metrics:all-srcs",
"//pkg/controller/volume/persistentvolume/options:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,5 +0,0 @@
approvers:
- jsafrane
- saad-ali
- thockin
- msau42 # for volume scheduling

View File

@ -1,865 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"testing"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
)
// Test single call to syncClaim and syncVolume methods.
// 1. Fill in the controller with initial data
// 2. Call the tested function (syncClaim/syncVolume) via
// controllerTest.testCall *once*.
// 3. Compare resulting volumes and claims with expected volumes and claims.
func TestSync(t *testing.T) {
labels := map[string]string{
"foo": "true",
"bar": "false",
}
modeBlock := v1.PersistentVolumeBlock
modeFile := v1.PersistentVolumeFilesystem
tests := []controllerTest{
// [Unit test set 1] User did not care which PV they get.
// Test the matching with no claim.Spec.VolumeName and with various
// volumes.
{
// syncClaim binds to a matching unbound volume.
"1-1 - successful bind",
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, nil),
newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim does not do anything when there is no matching volume.
"1-2 - noop",
newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim1-2", "uid1-2", "10Gi", "", v1.ClaimPending, nil),
newClaimArray("claim1-2", "uid1-2", "10Gi", "", v1.ClaimPending, nil),
[]string{"Normal FailedBinding"},
noerrors, testSyncClaim,
},
{
// syncClaim resets claim.Status to Pending when there is no
// matching volume.
"1-3 - reset to Pending",
newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim1-3", "uid1-3", "10Gi", "", v1.ClaimBound, nil),
newClaimArray("claim1-3", "uid1-3", "10Gi", "", v1.ClaimPending, nil),
[]string{"Normal FailedBinding"},
noerrors, testSyncClaim,
},
{
// syncClaim binds claims to the smallest matching volume
"1-4 - smallest volume",
[]*v1.PersistentVolume{
newVolume("volume1-4_1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-4_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
},
[]*v1.PersistentVolume{
newVolume("volume1-4_1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-4_2", "1Gi", "uid1-4", "claim1-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
},
newClaimArray("claim1-4", "uid1-4", "1Gi", "", v1.ClaimPending, nil),
newClaimArray("claim1-4", "uid1-4", "1Gi", "volume1-4_2", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim binds a claim only to volume that points to it (by
// name), even though a smaller one is available.
"1-5 - prebound volume by name - success",
[]*v1.PersistentVolume{
newVolume("volume1-5_1", "10Gi", "", "claim1-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-5_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
},
[]*v1.PersistentVolume{
newVolume("volume1-5_1", "10Gi", "uid1-5", "claim1-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-5_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
},
newClaimArray("claim1-5", "uid1-5", "1Gi", "", v1.ClaimPending, nil),
withExpectedCapacity("10Gi", newClaimArray("claim1-5", "uid1-5", "1Gi", "volume1-5_1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim binds a claim only to volume that points to it (by
// UID), even though a smaller one is available.
"1-6 - prebound volume by UID - success",
[]*v1.PersistentVolume{
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-6_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
},
[]*v1.PersistentVolume{
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-6_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
},
newClaimArray("claim1-6", "uid1-6", "1Gi", "", v1.ClaimPending, nil),
withExpectedCapacity("10Gi", newClaimArray("claim1-6", "uid1-6", "1Gi", "volume1-6_1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim does not bind claim to a volume prebound to a claim with
// same name and different UID
"1-7 - prebound volume to different claim",
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim1-7", "uid1-7", "1Gi", "", v1.ClaimPending, nil),
newClaimArray("claim1-7", "uid1-7", "1Gi", "", v1.ClaimPending, nil),
[]string{"Normal FailedBinding"},
noerrors, testSyncClaim,
},
{
// syncClaim completes binding - simulates controller crash after
// PV.ClaimRef is saved
"1-8 - complete bind after crash - PV bound",
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newClaimArray("claim1-8", "uid1-8", "1Gi", "", v1.ClaimPending, nil),
newClaimArray("claim1-8", "uid1-8", "1Gi", "volume1-8", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim completes binding - simulates controller crash after
// PV.Status is saved
"1-9 - complete bind after crash - PV status saved",
newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newClaimArray("claim1-9", "uid1-9", "1Gi", "", v1.ClaimPending, nil),
newClaimArray("claim1-9", "uid1-9", "1Gi", "volume1-9", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim completes binding - simulates controller crash after
// PVC.VolumeName is saved
"1-10 - complete bind after crash - PVC bound",
newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", v1.ClaimPending, nil, annBoundByController, annBindCompleted),
newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim binds a claim only when the label selector matches the volume
"1-11 - bind when selector matches",
withLabels(labels, newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withLabels(labels, newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, nil)),
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim does not bind a claim when the label selector doesn't match
"1-12 - do not bind when selector does not match",
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, nil)),
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, nil)),
[]string{"Normal FailedBinding"},
noerrors, testSyncClaim,
},
{
// syncClaim does not do anything when binding is delayed
"1-13 - delayed binding",
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classWait),
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classWait),
newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, &classWait),
newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, &classWait),
[]string{"Normal WaitForFirstConsumer"},
noerrors, testSyncClaim,
},
{
// syncClaim binds when binding is delayed but PV is prebound to PVC
"1-14 - successful prebound PV",
newVolumeArray("volume1-1", "1Gi", "", "claim1-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classWait),
newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classWait),
newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, &classWait),
newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", v1.ClaimBound, &classWait, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim binds pre-bound PVC only to the volume it points to,
// even if there is smaller volume available
"1-15 - successful prebound PVC",
[]*v1.PersistentVolume{
newVolume("volume1-15_1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume1-15_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
},
[]*v1.PersistentVolume{
newVolume("volume1-15_1", "10Gi", "uid1-15", "claim1-15", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newVolume("volume1-15_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
},
newClaimArray("claim1-15", "uid1-15", "1Gi", "volume1-15_1", v1.ClaimPending, nil),
withExpectedCapacity("10Gi", newClaimArray("claim1-15", "uid1-15", "1Gi", "volume1-15_1", v1.ClaimBound, nil, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim does not bind pre-bound PVC to PV with different AccessMode
"1-16 - successful prebound PVC",
// PV has ReadWriteOnce
newVolumeArray("volume1-16", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume1-16", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
claimWithAccessMode([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, newClaimArray("claim1-16", "uid1-16", "1Gi", "volume1-16", v1.ClaimPending, nil)),
claimWithAccessMode([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, newClaimArray("claim1-16", "uid1-16", "1Gi", "volume1-16", v1.ClaimPending, nil)),
noevents, noerrors, testSyncClaim,
},
// [Unit test set 2] User asked for a specific PV.
// Test the binding when pv.ClaimRef is already set by controller or
// by user.
{
// syncClaim with claim pre-bound to a PV that does not exist
"2-1 - claim prebound to non-existing volume - noop",
novolumes,
novolumes,
newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", v1.ClaimPending, nil),
newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", v1.ClaimPending, nil),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim with claim pre-bound to a PV that does not exist.
// Check that the claim status is reset to Pending
"2-2 - claim prebound to non-existing volume - reset status",
novolumes,
novolumes,
newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", v1.ClaimBound, nil),
newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", v1.ClaimPending, nil),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim with claim pre-bound to a PV that exists and is
// unbound. Check it gets bound and no annBoundByController is set.
"2-3 - claim prebound to unbound volume",
newVolumeArray("volume2-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", v1.ClaimPending, nil),
newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", v1.ClaimBound, nil, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// claim with claim pre-bound to a PV that is pre-bound to the claim
// by name. Check it gets bound and no annBoundByController is set.
"2-4 - claim prebound to prebound volume by name",
newVolumeArray("volume2-4", "1Gi", "", "claim2-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-4", "1Gi", "uid2-4", "claim2-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim2-4", "uid2-4", "1Gi", "volume2-4", v1.ClaimPending, nil),
newClaimArray("claim2-4", "uid2-4", "1Gi", "volume2-4", v1.ClaimBound, nil, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim with claim pre-bound to a PV that is pre-bound to the
// claim by UID. Check it gets bound and no annBoundByController is
// set.
"2-5 - claim prebound to prebound volume by UID",
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim2-5", "uid2-5", "1Gi", "volume2-5", v1.ClaimPending, nil),
newClaimArray("claim2-5", "uid2-5", "1Gi", "volume2-5", v1.ClaimBound, nil, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim with claim pre-bound to a PV that is bound to different
// claim. Check it's reset to Pending.
"2-6 - claim prebound to already bound volume",
newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim2-6", "uid2-6", "1Gi", "volume2-6", v1.ClaimBound, nil),
newClaimArray("claim2-6", "uid2-6", "1Gi", "volume2-6", v1.ClaimPending, nil),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim with claim bound by controller to a PV that is bound to
// different claim. Check it throws an error.
"2-7 - claim bound by controller to already bound volume",
newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim2-7", "uid2-7", "1Gi", "volume2-7", v1.ClaimBound, nil, annBoundByController),
newClaimArray("claim2-7", "uid2-7", "1Gi", "volume2-7", v1.ClaimBound, nil, annBoundByController),
noevents, noerrors, testSyncClaimError,
},
{
// syncClaim with claim pre-bound to a PV that exists and is
// unbound, but does not match the selector. Check it gets bound
// and no annBoundByController is set.
"2-8 - claim prebound to unbound volume that does not match the selector",
newVolumeArray("volume2-8", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-8", "1Gi", "uid2-8", "claim2-8", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
withLabelSelector(labels, newClaimArray("claim2-8", "uid2-8", "1Gi", "volume2-8", v1.ClaimPending, nil)),
withLabelSelector(labels, newClaimArray("claim2-8", "uid2-8", "1Gi", "volume2-8", v1.ClaimBound, nil, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim with claim pre-bound to a PV that exists and is
// unbound, but its size is smaller than requested.
//Check that the claim status is reset to Pending
"2-9 - claim prebound to unbound volume that size is smaller than requested",
newVolumeArray("volume2-9", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume2-9", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim2-9", "uid2-9", "2Gi", "volume2-9", v1.ClaimBound, nil),
newClaimArray("claim2-9", "uid2-9", "2Gi", "volume2-9", v1.ClaimPending, nil),
[]string{"Warning VolumeMismatch"}, noerrors, testSyncClaim,
},
{
// syncClaim with claim pre-bound to a PV that exists and is
// unbound, but its class does not match. Check that the claim status is reset to Pending
"2-10 - claim prebound to unbound volume that class is different",
newVolumeArray("volume2-10", "1Gi", "1", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
newVolumeArray("volume2-10", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
newClaimArray("claim2-10", "uid2-10", "1Gi", "volume2-10", v1.ClaimBound, nil),
newClaimArray("claim2-10", "uid2-10", "1Gi", "volume2-10", v1.ClaimPending, nil),
[]string{"Warning VolumeMismatch"}, noerrors, testSyncClaim,
},
// [Unit test set 3] Syncing bound claim
{
// syncClaim with claim bound and its claim.Spec.VolumeName is
// removed. Check it's marked as Lost.
"3-1 - bound claim with missing VolumeName",
novolumes,
novolumes,
newClaimArray("claim3-1", "uid3-1", "10Gi", "", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
newClaimArray("claim3-1", "uid3-1", "10Gi", "", v1.ClaimLost, nil, annBoundByController, annBindCompleted),
[]string{"Warning ClaimLost"}, noerrors, testSyncClaim,
},
{
// syncClaim with claim bound to non-existing volume. Check it's
// marked as Lost.
"3-2 - bound claim with missing volume",
novolumes,
novolumes,
newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", v1.ClaimLost, nil, annBoundByController, annBindCompleted),
[]string{"Warning ClaimLost"}, noerrors, testSyncClaim,
},
{
// syncClaim with claim bound to unbound volume. Check it's bound.
// Also check that Pending phase is set to Bound
"3-3 - bound claim with unbound volume",
newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimPending, nil, annBoundByController, annBindCompleted),
newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim with claim bound to volume with missing (or different)
// volume.Spec.ClaimRef.UID. Check that the claim is marked as lost.
"3-4 - bound claim with prebound volume",
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", v1.ClaimPending, nil, annBoundByController, annBindCompleted),
newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", v1.ClaimLost, nil, annBoundByController, annBindCompleted),
[]string{"Warning ClaimMisbound"}, noerrors, testSyncClaim,
},
{
// syncClaim with claim bound to bound volume. Check that the
// controller does not do anything. Also check that Pending phase is
// set to Bound
"3-5 - bound claim with bound volume",
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", v1.ClaimPending, nil, annBindCompleted),
newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", v1.ClaimBound, nil, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncClaim with claim bound to a volume that is bound to different
// claim. Check that the claim is marked as lost.
// TODO: test that an event is emitted
"3-6 - bound claim with bound volume",
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", v1.ClaimPending, nil, annBindCompleted),
newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", v1.ClaimLost, nil, annBindCompleted),
[]string{"Warning ClaimMisbound"}, noerrors, testSyncClaim,
},
{
// syncClaim with claim bound to unbound volume. Check it's bound
// even if the claim's selector doesn't match the volume. Also
// check that Pending phase is set to Bound
"3-7 - bound claim with unbound volume where selector doesn't match",
newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimPending, nil, annBoundByController, annBindCompleted)),
withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
// [Unit test set 4] All syncVolume tests.
{
// syncVolume with pending volume. Check it's marked as Available.
"4-1 - pending volume",
newVolumeArray("volume4-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume4-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
noclaims,
noclaims,
noevents, noerrors, testSyncVolume,
},
{
// syncVolume with prebound pending volume. Check it's marked as
// Available.
"4-2 - pending prebound volume",
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
noclaims,
noclaims,
noevents, noerrors, testSyncVolume,
},
{
// syncVolume with volume bound to missing claim.
// Check the volume gets Released
"4-3 - bound volume with missing claim",
newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", v1.VolumeReleased, v1.PersistentVolumeReclaimRetain, classEmpty),
noclaims,
noclaims,
noevents, noerrors, testSyncVolume,
},
{
// syncVolume with volume bound to claim with different UID.
// Check the volume gets Released.
"4-4 - volume bound to claim with different UID",
newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", v1.VolumeReleased, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", v1.ClaimBound, nil, annBindCompleted),
newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", v1.ClaimBound, nil, annBindCompleted),
noevents, noerrors, testSyncVolume,
},
{
// syncVolume with volume bound by controller to unbound claim.
// Check syncVolume does not do anything.
"4-5 - volume bound by controller to unbound claim",
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newClaimArray("claim4-5", "uid4-5", "10Gi", "", v1.ClaimPending, nil),
newClaimArray("claim4-5", "uid4-5", "10Gi", "", v1.ClaimPending, nil),
noevents, noerrors, testSyncVolume,
},
{
// syncVolume with volume bound by user to unbound claim.
// Check syncVolume does not do anything.
"4-5 - volume bound by user to bound claim",
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim4-5", "uid4-5", "10Gi", "", v1.ClaimPending, nil),
newClaimArray("claim4-5", "uid4-5", "10Gi", "", v1.ClaimPending, nil),
noevents, noerrors, testSyncVolume,
},
{
// syncVolume with volume bound to bound claim.
// Check that the volume is marked as Bound.
"4-6 - volume bound by to bound claim",
newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", v1.ClaimBound, nil),
newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", v1.ClaimBound, nil),
noevents, noerrors, testSyncVolume,
},
{
// syncVolume with volume bound by controller to claim bound to
// another volume. Check that the volume is rolled back.
"4-7 - volume bound by controller to claim bound somewhere else",
newVolumeArray("volume4-7", "10Gi", "uid4-7", "claim4-7", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newVolumeArray("volume4-7", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", v1.ClaimBound, nil),
newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", v1.ClaimBound, nil),
noevents, noerrors, testSyncVolume,
},
{
// syncVolume with volume bound by user to claim bound to
// another volume. Check that the volume is marked as Available
// and its UID is reset.
"4-8 - volume bound by user to claim bound somewhere else",
newVolumeArray("volume4-8", "10Gi", "uid4-8", "claim4-8", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume4-8", "10Gi", "", "claim4-8", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", v1.ClaimBound, nil),
newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", v1.ClaimBound, nil),
noevents, noerrors, testSyncVolume,
},
// PVC with class
{
// syncVolume binds a claim to requested class even if there is a
// smaller PV available
"13-1 - binding to class",
[]*v1.PersistentVolume{
newVolume("volume13-1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume13-1-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
},
[]*v1.PersistentVolume{
newVolume("volume13-1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolume("volume13-1-2", "10Gi", "uid13-1", "claim13-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classGold, annBoundByController),
},
newClaimArray("claim13-1", "uid13-1", "1Gi", "", v1.ClaimPending, &classGold),
withExpectedCapacity("10Gi", newClaimArray("claim13-1", "uid13-1", "1Gi", "volume13-1-2", v1.ClaimBound, &classGold, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds a claim without a class even if there is a
// smaller PV with a class available
"13-2 - binding without a class",
[]*v1.PersistentVolume{
newVolume("volume13-2-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
newVolume("volume13-2-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
},
[]*v1.PersistentVolume{
newVolume("volume13-2-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
newVolume("volume13-2-2", "10Gi", "uid13-2", "claim13-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
},
newClaimArray("claim13-2", "uid13-2", "1Gi", "", v1.ClaimPending, nil),
withExpectedCapacity("10Gi", newClaimArray("claim13-2", "uid13-2", "1Gi", "volume13-2-2", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds a claim with given class even if there is a
// smaller PV with different class available
"13-3 - binding to specific a class",
[]*v1.PersistentVolume{
newVolume("volume13-3-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classSilver),
newVolume("volume13-3-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
},
[]*v1.PersistentVolume{
newVolume("volume13-3-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classSilver),
newVolume("volume13-3-2", "10Gi", "uid13-3", "claim13-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classGold, annBoundByController),
},
newClaimArray("claim13-3", "uid13-3", "1Gi", "", v1.ClaimPending, &classGold),
withExpectedCapacity("10Gi", newClaimArray("claim13-3", "uid13-3", "1Gi", "volume13-3-2", v1.ClaimBound, &classGold, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds claim requesting class "" to claim to PV with
// class=""
"13-4 - empty class",
newVolumeArray("volume13-4", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume13-4", "1Gi", "uid13-4", "claim13-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newClaimArray("claim13-4", "uid13-4", "1Gi", "", v1.ClaimPending, &classEmpty),
newClaimArray("claim13-4", "uid13-4", "1Gi", "volume13-4", v1.ClaimBound, &classEmpty, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds claim requesting class nil to claim to PV with
// class = ""
"13-5 - nil class",
newVolumeArray("volume13-5", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume13-5", "1Gi", "uid13-5", "claim13-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newClaimArray("claim13-5", "uid13-5", "1Gi", "", v1.ClaimPending, nil),
newClaimArray("claim13-5", "uid13-5", "1Gi", "volume13-5", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
// All of these should bind as feature set is not enabled for BlockVolume
// meaning volumeMode will be ignored and dropped
{
// syncVolume binds a requested block claim to a block volume
"14-1 - binding to volumeMode block",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "uid14-1", "claim14-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "volume14-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds a requested filesystem claim to a filesystem volume
"14-2 - binding to volumeMode filesystem",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "uid14-2", "claim14-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "volume14-2", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds an unspecified volumemode for claim to a specified filesystem volume
"14-3 - binding to volumeMode filesystem using default for claim",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "uid14-3", "claim14-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(nil, newClaimArray("claim14-3", "uid14-3", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(nil, newClaimArray("claim14-3", "uid14-3", "10Gi", "volume14-3", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds a requested filesystem claim to an unspecified volumeMode for volume
"14-4 - binding to unspecified volumeMode using requested filesystem for claim",
withVolumeVolumeMode(nil, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(nil, newVolumeArray("volume14-4", "10Gi", "uid14-4", "claim14-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "volume14-4", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds a requested filesystem claim to an unspecified volumeMode for volume
"14-5 - binding different volumeModes should be ignored",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "uid14-5", "claim14-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5", "uid14-5", "10Gi", "volume14-5", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
}
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false")
runSyncTests(t, tests, []*storage.StorageClass{
{
ObjectMeta: metav1.ObjectMeta{Name: classWait},
VolumeBindingMode: &modeWait,
},
}, []*v1.Pod{})
}
func TestSyncAlphaBlockVolume(t *testing.T) {
modeBlock := v1.PersistentVolumeBlock
modeFile := v1.PersistentVolumeFilesystem
// Tests assume defaulting, so feature enabled will never have nil volumeMode
tests := []controllerTest{
// PVC with VolumeMode
{
// syncVolume binds a requested block claim to a block volume
"14-1 - binding to volumeMode block",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "uid14-1", "claim14-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "volume14-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds a requested filesystem claim to a filesystem volume
"14-2 - binding to volumeMode filesystem",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "uid14-2", "claim14-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "volume14-2", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// failed syncVolume do not bind to an unspecified volumemode for claim to a specified filesystem volume
"14-3 - do not bind pv volumeMode filesystem and pvc volumeMode block",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-3", "uid14-3", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-3", "uid14-3", "10Gi", "", v1.ClaimPending, nil)),
[]string{"Normal FailedBinding"},
noerrors, testSyncClaim,
},
{
// failed syncVolume do not bind a requested filesystem claim to an unspecified volumeMode for volume
"14-4 - do not bind pv volumeMode block and pvc volumeMode filesystem",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "", v1.ClaimPending, nil)),
[]string{"Normal FailedBinding"},
noerrors, testSyncClaim,
},
{
// failed syncVolume do not bind when matching class but not matching volumeModes
"14-5 - do not bind when matching class but not volumeMode",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, &classGold)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, &classGold)),
[]string{"Warning ProvisioningFailed"},
noerrors, testSyncClaim,
},
{
// failed syncVolume do not bind when matching volumeModes but class does not match
"14-5-1 - do not bind when matching volumeModes but class does not match",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-1", "uid14-5-1", "10Gi", "", v1.ClaimPending, &classSilver)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-1", "uid14-5-1", "10Gi", "", v1.ClaimPending, &classSilver)),
[]string{"Warning ProvisioningFailed"},
noerrors, testSyncClaim,
},
{
// failed syncVolume do not bind when pvc is prebound to pv with matching volumeModes but class does not match
"14-5-2 - do not bind when pvc is prebound to pv with matching volumeModes but class does not match",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-2", "uid14-5-2", "10Gi", "volume14-5-2", v1.ClaimPending, &classSilver)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-2", "uid14-5-2", "10Gi", "volume14-5-2", v1.ClaimPending, &classSilver)),
[]string{"Warning VolumeMismatch"},
noerrors, testSyncClaim,
},
{
// syncVolume bind when pv is prebound and volumeModes match
"14-7 - bind when pv volume is prebound and volumeModes match",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-7", "10Gi", "", "claim14-7", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-7", "10Gi", "uid14-7", "claim14-7", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-7", "uid14-7", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-7", "uid14-7", "10Gi", "volume14-7", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// failed syncVolume do not bind when pvc is prebound to pv with mismatching volumeModes
"14-8 - do not bind when pvc is prebound to pv with mismatching volumeModes",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-8", "uid14-8", "10Gi", "volume14-8", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-8", "uid14-8", "10Gi", "volume14-8", v1.ClaimPending, nil)),
[]string{"Warning VolumeMismatch"},
noerrors, testSyncClaim,
},
{
// failed syncVolume do not bind when pvc is prebound to pv with mismatching volumeModes
"14-8-1 - do not bind when pvc is prebound to pv with mismatching volumeModes",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-8-1", "uid14-8-1", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-8-1", "uid14-8-1", "10Gi", "", v1.ClaimPending, nil)),
[]string{"Normal FailedBinding"},
noerrors, testSyncClaim,
},
{
// syncVolume binds when pvc is prebound to pv with matching volumeModes block
"14-9 - bind when pvc is prebound to pv with matching volumeModes block",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-9", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-9", "10Gi", "uid14-9", "claim14-9", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-9", "uid14-9", "10Gi", "volume14-9", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-9", "uid14-9", "10Gi", "volume14-9", v1.ClaimBound, nil, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds when pv is prebound to pvc with matching volumeModes block
"14-10 - bind when pv is prebound to pvc with matching volumeModes block",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-10", "10Gi", "", "claim14-10", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-10", "10Gi", "uid14-10", "claim14-10", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-10", "uid14-10", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-10", "uid14-10", "10Gi", "volume14-10", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds when pvc is prebound to pv with matching volumeModes filesystem
"14-11 - bind when pvc is prebound to pv with matching volumeModes filesystem",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-11", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-11", "10Gi", "uid14-11", "claim14-11", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-11", "uid14-11", "10Gi", "volume14-11", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-11", "uid14-11", "10Gi", "volume14-11", v1.ClaimBound, nil, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume binds when pv is prebound to pvc with matching volumeModes filesystem
"14-12 - bind when pv is prebound to pvc with matching volumeModes filesystem",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-12", "10Gi", "", "claim14-12", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-12", "10Gi", "uid14-12", "claim14-12", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-12", "uid14-12", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-12", "uid14-12", "10Gi", "volume14-12", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim,
},
{
// syncVolume output warning when pv is prebound to pvc with mismatching volumeMode
"14-13 - output warning when pv is prebound to pvc with different volumeModes",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-13", "10Gi", "uid14-13", "claim14-13", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-13", "10Gi", "uid14-13", "claim14-13", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-13", "uid14-13", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-13", "uid14-13", "10Gi", "", v1.ClaimPending, nil)),
[]string{"Warning VolumeMismatch"},
noerrors, testSyncVolume,
},
{
// syncVolume output warning when pv is prebound to pvc with mismatching volumeMode
"14-13-1 - output warning when pv is prebound to pvc with different volumeModes",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-13-1", "10Gi", "uid14-13-1", "claim14-13-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-13-1", "10Gi", "uid14-13-1", "claim14-13-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-13-1", "uid14-13-1", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-13-1", "uid14-13-1", "10Gi", "", v1.ClaimPending, nil)),
[]string{"Warning VolumeMismatch"},
noerrors, testSyncVolume,
},
{
// syncVolume waits for synClaim without warning when pv is prebound to pvc with matching volumeMode block
"14-14 - wait for synClaim without warning when pv is prebound to pvc with matching volumeModes block",
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-14", "10Gi", "uid14-14", "claim14-14", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-14", "10Gi", "uid14-14", "claim14-14", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-14", "uid14-14", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-14", "uid14-14", "10Gi", "", v1.ClaimPending, nil)),
noevents, noerrors, testSyncVolume,
},
{
// syncVolume waits for synClaim without warning when pv is prebound to pvc with matching volumeMode file
"14-14-1 - wait for synClaim without warning when pv is prebound to pvc with matching volumeModes file",
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-14-1", "10Gi", "uid14-14-1", "claim14-14-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-14-1", "10Gi", "uid14-14-1", "claim14-14-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-14-1", "uid14-14-1", "10Gi", "", v1.ClaimPending, nil)),
withClaimVolumeMode(&modeFile, newClaimArray("claim14-14-1", "uid14-14-1", "10Gi", "", v1.ClaimPending, nil)),
noevents, noerrors, testSyncVolume,
},
}
err := utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
if err != nil {
t.Errorf("Failed to enable feature gate for BlockVolume: %v", err)
return
}
defer utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
runSyncTests(t, tests, []*storage.StorageClass{}, []*v1.Pod{})
}
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
// volume/claims. The test follows this pattern:
// 0. Load the controller with initial data.
// 1. Call controllerTest.testCall() once as in TestSync()
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
// events). Go to 2. if these calls change anything.
// 3. When all changes are processed and no new changes were made, call
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
// 4. If some changes were done by step 3., go to 2. (simulation of
// "volume/claim updated" events, eventually performing step 3. again)
// 5. When 3. does not do any changes, finish the tests and compare final set
// of volumes/claims with expected claims/volumes and report differences.
// Some limit of calls in enforced to prevent endless loops.
func TestMultiSync(t *testing.T) {
tests := []controllerTest{
// Test simple binding
{
// syncClaim binds to a matching unbound volume.
"10-1 - successful bind",
newVolumeArray("volume10-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume10-1", "1Gi", "uid10-1", "claim10-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newClaimArray("claim10-1", "uid10-1", "1Gi", "", v1.ClaimPending, nil),
newClaimArray("claim10-1", "uid10-1", "1Gi", "volume10-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
{
// Two controllers bound two PVs to single claim. Test one of them
// wins and the second rolls back.
"10-2 - bind PV race",
[]*v1.PersistentVolume{
newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newVolume("volume10-2-2", "1Gi", "uid10-2", "claim10-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
},
[]*v1.PersistentVolume{
newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newVolume("volume10-2-2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
},
newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim,
},
}
runMultisyncTests(t, tests, []*storage.StorageClass{}, "")
}

View File

@ -1,228 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"errors"
"testing"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
)
// Test single call to syncVolume, expecting recycling to happen.
// 1. Fill in the controller with initial data
// 2. Call the syncVolume *once*.
// 3. Compare resulting volumes with expected volumes.
func TestDeleteSync(t *testing.T) {
tests := []controllerTest{
{
// delete volume bound by controller
"8-1 - successful delete",
newVolumeArray("volume8-1", "1Gi", "uid8-1", "claim8-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty, annBoundByController),
novolumes,
noclaims,
noclaims,
noevents, noerrors,
// Inject deleter into the controller and call syncVolume. The
// deleter simulates one delete() call that succeeds.
wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume),
},
{
// delete volume bound by user
"8-2 - successful delete with prebound volume",
newVolumeArray("volume8-2", "1Gi", "uid8-2", "claim8-2", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),
novolumes,
noclaims,
noclaims,
noevents, noerrors,
// Inject deleter into the controller and call syncVolume. The
// deleter simulates one delete() call that succeeds.
wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume),
},
{
// delete failure - plugin not found
"8-3 - plugin not found",
newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),
withMessage("Error getting deleter volume plugin for volume \"volume8-3\": no volume plugin matched", newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", v1.VolumeFailed, v1.PersistentVolumeReclaimDelete, classEmpty)),
noclaims,
noclaims,
[]string{"Warning VolumeFailedDelete"}, noerrors, testSyncVolume,
},
{
// delete failure - newDeleter returns error
"8-4 - newDeleter returns error",
newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),
withMessage("Failed to create deleter for volume \"volume8-4\": Mock plugin error: no deleteCalls configured", newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", v1.VolumeFailed, v1.PersistentVolumeReclaimDelete, classEmpty)),
noclaims,
noclaims,
[]string{"Warning VolumeFailedDelete"}, noerrors,
wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume),
},
{
// delete failure - delete() returns error
"8-5 - delete returns error",
newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),
withMessage("Mock delete error", newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", v1.VolumeFailed, v1.PersistentVolumeReclaimDelete, classEmpty)),
noclaims,
noclaims,
[]string{"Warning VolumeFailedDelete"}, noerrors,
wrapTestWithReclaimCalls(operationDelete, []error{errors.New("Mock delete error")}, testSyncVolume),
},
{
// delete success(?) - volume is deleted before doDelete() starts
"8-6 - volume is deleted before deleting",
newVolumeArray("volume8-6", "1Gi", "uid8-6", "claim8-6", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),
novolumes,
noclaims,
noclaims,
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
// Delete the volume before delete operation starts
reactor.lock.Lock()
delete(reactor.volumes, "volume8-6")
reactor.lock.Unlock()
}),
},
{
// delete success(?) - volume is bound just at the time doDelete()
// starts. This simulates "volume no longer needs recycling,
// skipping".
"8-7 - volume is bound before deleting",
newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty, annBoundByController),
newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty, annBoundByController),
noclaims,
newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", v1.ClaimBound, nil),
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
reactor.lock.Lock()
defer reactor.lock.Unlock()
// Bind the volume to resurrected claim (this should never
// happen)
claim := newClaim("claim8-7", "uid8-7", "10Gi", "volume8-7", v1.ClaimBound, nil)
reactor.claims[claim.Name] = claim
ctrl.claims.Add(claim)
volume := reactor.volumes["volume8-7"]
volume.Status.Phase = v1.VolumeBound
}),
},
{
// delete success - volume bound by user is deleted, while a new
// claim is created with another UID.
"8-9 - prebound volume is deleted while the claim exists",
newVolumeArray("volume8-9", "1Gi", "uid8-9", "claim8-9", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),
novolumes,
newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", v1.ClaimPending, nil),
newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", v1.ClaimPending, nil),
noevents, noerrors,
// Inject deleter into the controller and call syncVolume. The
// deleter simulates one delete() call that succeeds.
wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume),
},
{
// PV requires external deleter
"8-10 - external deleter",
newVolumeArray("volume8-10", "1Gi", "uid10-1", "claim10-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty, annBoundByController),
newVolumeArray("volume8-10", "1Gi", "uid10-1", "claim10-1", v1.VolumeReleased, v1.PersistentVolumeReclaimDelete, classEmpty, annBoundByController),
noclaims,
noclaims,
noevents, noerrors,
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
// Inject external deleter annotation
test.initialVolumes[0].Annotations[annDynamicallyProvisioned] = "external.io/test"
test.expectedVolumes[0].Annotations[annDynamicallyProvisioned] = "external.io/test"
return testSyncVolume(ctrl, reactor, test)
},
},
{
// delete success - two PVs are provisioned for a single claim.
// One of the PVs is deleted.
"8-11 - two PVs provisioned for a single claim",
[]*v1.PersistentVolume{
newVolume("volume8-11-1", "1Gi", "uid8-11", "claim8-11", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty, annDynamicallyProvisioned),
newVolume("volume8-11-2", "1Gi", "uid8-11", "claim8-11", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty, annDynamicallyProvisioned),
},
[]*v1.PersistentVolume{
newVolume("volume8-11-2", "1Gi", "uid8-11", "claim8-11", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty, annDynamicallyProvisioned),
},
// the claim is bound to volume8-11-2 -> volume8-11-1 has lost the race and will be deleted
newClaimArray("claim8-11", "uid8-11", "10Gi", "volume8-11-2", v1.ClaimBound, nil),
newClaimArray("claim8-11", "uid8-11", "10Gi", "volume8-11-2", v1.ClaimBound, nil),
noevents, noerrors,
// Inject deleter into the controller and call syncVolume. The
// deleter simulates one delete() call that succeeds.
wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume),
},
{
// delete success - two PVs are externally provisioned for a single
// claim. One of the PVs is marked as Released to be deleted by the
// external provisioner.
"8-12 - two PVs externally provisioned for a single claim",
[]*v1.PersistentVolume{
newVolume("volume8-12-1", "1Gi", "uid8-12", "claim8-12", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty, annDynamicallyProvisioned),
newVolume("volume8-12-2", "1Gi", "uid8-12", "claim8-12", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty, annDynamicallyProvisioned),
},
[]*v1.PersistentVolume{
newVolume("volume8-12-1", "1Gi", "uid8-12", "claim8-12", v1.VolumeReleased, v1.PersistentVolumeReclaimDelete, classEmpty, annDynamicallyProvisioned),
newVolume("volume8-12-2", "1Gi", "uid8-12", "claim8-12", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty, annDynamicallyProvisioned),
},
// the claim is bound to volume8-12-2 -> volume8-12-1 has lost the race and will be "Released"
newClaimArray("claim8-12", "uid8-12", "10Gi", "volume8-12-2", v1.ClaimBound, nil),
newClaimArray("claim8-12", "uid8-12", "10Gi", "volume8-12-2", v1.ClaimBound, nil),
noevents, noerrors,
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
// Inject external deleter annotation
test.initialVolumes[0].Annotations[annDynamicallyProvisioned] = "external.io/test"
test.expectedVolumes[0].Annotations[annDynamicallyProvisioned] = "external.io/test"
return testSyncVolume(ctrl, reactor, test)
},
},
}
runSyncTests(t, tests, []*storage.StorageClass{}, []*v1.Pod{})
}
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
// volume/claims. The test follows this pattern:
// 0. Load the controller with initial data.
// 1. Call controllerTest.testCall() once as in TestSync()
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
// events). Go to 2. if these calls change anything.
// 3. When all changes are processed and no new changes were made, call
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
// 4. If some changes were done by step 3., go to 2. (simulation of
// "volume/claim updated" events, eventually performing step 3. again)
// 5. When 3. does not do any changes, finish the tests and compare final set
// of volumes/claims with expected claims/volumes and report differences.
// Some limit of calls in enforced to prevent endless loops.
func TestDeleteMultiSync(t *testing.T) {
tests := []controllerTest{
{
// delete failure - delete returns error. The controller should
// try again.
"9-1 - delete returns error",
newVolumeArray("volume9-1", "1Gi", "uid9-1", "claim9-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classEmpty),
novolumes,
noclaims,
noclaims,
[]string{"Warning VolumeFailedDelete"}, noerrors,
wrapTestWithReclaimCalls(operationDelete, []error{errors.New("Mock delete error"), nil}, testSyncVolume),
},
}
runMultisyncTests(t, tests, []*storage.StorageClass{}, "")
}

File diff suppressed because it is too large Load Diff

View File

@ -1,374 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
"sort"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/cache"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
// persistentVolumeOrderedIndex is a cache.Store that keeps persistent volumes
// indexed by AccessModes and ordered by storage capacity.
type persistentVolumeOrderedIndex struct {
store cache.Indexer
}
func newPersistentVolumeOrderedIndex() persistentVolumeOrderedIndex {
return persistentVolumeOrderedIndex{cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"accessmodes": accessModesIndexFunc})}
}
// accessModesIndexFunc is an indexing function that returns a persistent
// volume's AccessModes as a string
func accessModesIndexFunc(obj interface{}) ([]string, error) {
if pv, ok := obj.(*v1.PersistentVolume); ok {
modes := v1helper.GetAccessModesAsString(pv.Spec.AccessModes)
return []string{modes}, nil
}
return []string{""}, fmt.Errorf("object is not a persistent volume: %v", obj)
}
// listByAccessModes returns all volumes with the given set of
// AccessModeTypes. The list is unsorted!
func (pvIndex *persistentVolumeOrderedIndex) listByAccessModes(modes []v1.PersistentVolumeAccessMode) ([]*v1.PersistentVolume, error) {
pv := &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
AccessModes: modes,
},
}
objs, err := pvIndex.store.Index("accessmodes", pv)
if err != nil {
return nil, err
}
volumes := make([]*v1.PersistentVolume, len(objs))
for i, obj := range objs {
volumes[i] = obj.(*v1.PersistentVolume)
}
return volumes, nil
}
// find returns the nearest PV from the ordered list or nil if a match is not found
func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *v1.PersistentVolumeClaim, delayBinding bool) (*v1.PersistentVolume, error) {
// PVs are indexed by their access modes to allow easier searching. Each
// index is the string representation of a set of access modes. There is a
// finite number of possible sets and PVs will only be indexed in one of
// them (whichever index matches the PV's modes).
//
// A request for resources will always specify its desired access modes.
// Any matching PV must have at least that number of access modes, but it
// can have more. For example, a user asks for ReadWriteOnce but a GCEPD
// is available, which is ReadWriteOnce+ReadOnlyMany.
//
// Searches are performed against a set of access modes, so we can attempt
// not only the exact matching modes but also potential matches (the GCEPD
// example above).
allPossibleModes := pvIndex.allPossibleMatchingAccessModes(claim.Spec.AccessModes)
for _, modes := range allPossibleModes {
volumes, err := pvIndex.listByAccessModes(modes)
if err != nil {
return nil, err
}
bestVol, err := findMatchingVolume(claim, volumes, nil /* node for topology binding*/, nil /* exclusion map */, delayBinding)
if err != nil {
return nil, err
}
if bestVol != nil {
return bestVol, nil
}
}
return nil, nil
}
// findMatchingVolume goes through the list of volumes to find the best matching volume
// for the claim.
//
// This function is used by both the PV controller and scheduler.
//
// delayBinding is true only in the PV controller path. When set, prebound PVs are still returned
// as a match for the claim, but unbound PVs are skipped.
//
// node is set only in the scheduler path. When set, the PV node affinity is checked against
// the node's labels.
//
// excludedVolumes is only used in the scheduler path, and is needed for evaluating multiple
// unbound PVCs for a single Pod at one time. As each PVC finds a matching PV, the chosen
// PV needs to be excluded from future matching.
func findMatchingVolume(
claim *v1.PersistentVolumeClaim,
volumes []*v1.PersistentVolume,
node *v1.Node,
excludedVolumes map[string]*v1.PersistentVolume,
delayBinding bool) (*v1.PersistentVolume, error) {
var smallestVolume *v1.PersistentVolume
var smallestVolumeQty resource.Quantity
requestedQty := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestedClass := v1helper.GetPersistentVolumeClaimClass(claim)
var selector labels.Selector
if claim.Spec.Selector != nil {
internalSelector, err := metav1.LabelSelectorAsSelector(claim.Spec.Selector)
if err != nil {
// should be unreachable code due to validation
return nil, fmt.Errorf("error creating internal label selector for claim: %v: %v", claimToClaimKey(claim), err)
}
selector = internalSelector
}
// Go through all available volumes with two goals:
// - find a volume that is either pre-bound by user or dynamically
// provisioned for this claim. Because of this we need to loop through
// all volumes.
// - find the smallest matching one if there is no volume pre-bound to
// the claim.
for _, volume := range volumes {
if _, ok := excludedVolumes[volume.Name]; ok {
// Skip volumes in the excluded list
continue
}
volumeQty := volume.Spec.Capacity[v1.ResourceStorage]
// check if volumeModes do not match (Alpha and feature gate protected)
isMisMatch, err := checkVolumeModeMisMatches(&claim.Spec, &volume.Spec)
if err != nil {
return nil, fmt.Errorf("error checking if volumeMode was a mismatch: %v", err)
}
// filter out mismatching volumeModes
if isMisMatch {
continue
}
// check if PV's DeletionTimeStamp is set, if so, skip this volume.
if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) {
if volume.ObjectMeta.DeletionTimestamp != nil {
continue
}
}
nodeAffinityValid := true
if node != nil {
// Scheduler path, check that the PV NodeAffinity
// is satisfied by the node
err := volumeutil.CheckNodeAffinity(volume, node.Labels)
if err != nil {
nodeAffinityValid = false
}
}
if isVolumeBoundToClaim(volume, claim) {
// this claim and volume are pre-bound; return
// the volume if the size request is satisfied,
// otherwise continue searching for a match
if volumeQty.Cmp(requestedQty) < 0 {
continue
}
// If PV node affinity is invalid, return no match.
// This means the prebound PV (and therefore PVC)
// is not suitable for this node.
if !nodeAffinityValid {
return nil, nil
}
return volume, nil
}
if node == nil && delayBinding {
// PV controller does not bind this claim.
// Scheduler will handle binding unbound volumes
// Scheduler path will have node != nil
continue
}
// filter out:
// - volumes bound to another claim
// - volumes whose labels don't match the claim's selector, if specified
// - volumes in Class that is not requested
// - volumes whose NodeAffinity does not match the node
if volume.Spec.ClaimRef != nil {
continue
} else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) {
continue
}
if v1helper.GetPersistentVolumeClass(volume) != requestedClass {
continue
}
if !nodeAffinityValid {
continue
}
if node != nil {
// Scheduler path
// Check that the access modes match
if !checkAccessModes(claim, volume) {
continue
}
}
if volumeQty.Cmp(requestedQty) >= 0 {
if smallestVolume == nil || smallestVolumeQty.Cmp(volumeQty) > 0 {
smallestVolume = volume
smallestVolumeQty = volumeQty
}
}
}
if smallestVolume != nil {
// Found a matching volume
return smallestVolume, nil
}
return nil, nil
}
// checkVolumeModeMatches is a convenience method that checks volumeMode for PersistentVolume
// and PersistentVolumeClaims along with making sure that the Alpha feature gate BlockVolume is
// enabled.
// This is Alpha and could change in the future.
func checkVolumeModeMisMatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) (bool, error) {
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
if pvSpec.VolumeMode != nil && pvcSpec.VolumeMode != nil {
requestedVolumeMode := *pvcSpec.VolumeMode
pvVolumeMode := *pvSpec.VolumeMode
return requestedVolumeMode != pvVolumeMode, nil
} else {
// This also should retrun an error, this means that
// the defaulting has failed.
return true, fmt.Errorf("api defaulting for volumeMode failed")
}
} else {
// feature gate is disabled
return false, nil
}
}
// findBestMatchForClaim is a convenience method that finds a volume by the claim's AccessModes and requests for Storage
func (pvIndex *persistentVolumeOrderedIndex) findBestMatchForClaim(claim *v1.PersistentVolumeClaim, delayBinding bool) (*v1.PersistentVolume, error) {
return pvIndex.findByClaim(claim, delayBinding)
}
// allPossibleMatchingAccessModes returns an array of AccessMode arrays that
// can satisfy a user's requested modes.
//
// see comments in the Find func above regarding indexing.
//
// allPossibleMatchingAccessModes gets all stringified accessmodes from the
// index and returns all those that contain at least all of the requested
// mode.
//
// For example, assume the index contains 2 types of PVs where the stringified
// accessmodes are:
//
// "RWO,ROX" -- some number of GCEPDs
// "RWO,ROX,RWX" -- some number of NFS volumes
//
// A request for RWO could be satisfied by both sets of indexed volumes, so
// allPossibleMatchingAccessModes returns:
//
// [][]v1.PersistentVolumeAccessMode {
// []v1.PersistentVolumeAccessMode {
// v1.ReadWriteOnce, v1.ReadOnlyMany,
// },
// []v1.PersistentVolumeAccessMode {
// v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany,
// },
// }
//
// A request for RWX can be satisfied by only one set of indexed volumes, so
// the return is:
//
// [][]v1.PersistentVolumeAccessMode {
// []v1.PersistentVolumeAccessMode {
// v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany,
// },
// }
//
// This func returns modes with ascending levels of modes to give the user
// what is closest to what they actually asked for.
func (pvIndex *persistentVolumeOrderedIndex) allPossibleMatchingAccessModes(requestedModes []v1.PersistentVolumeAccessMode) [][]v1.PersistentVolumeAccessMode {
matchedModes := [][]v1.PersistentVolumeAccessMode{}
keys := pvIndex.store.ListIndexFuncValues("accessmodes")
for _, key := range keys {
indexedModes := v1helper.GetAccessModesFromString(key)
if volumeutil.AccessModesContainedInAll(indexedModes, requestedModes) {
matchedModes = append(matchedModes, indexedModes)
}
}
// sort by the number of modes in each array with the fewest number of
// modes coming first. this allows searching for volumes by the minimum
// number of modes required of the possible matches.
sort.Sort(byAccessModes{matchedModes})
return matchedModes
}
// byAccessModes is used to order access modes by size, with the fewest modes first
type byAccessModes struct {
modes [][]v1.PersistentVolumeAccessMode
}
func (c byAccessModes) Less(i, j int) bool {
return len(c.modes[i]) < len(c.modes[j])
}
func (c byAccessModes) Swap(i, j int) {
c.modes[i], c.modes[j] = c.modes[j], c.modes[i]
}
func (c byAccessModes) Len() int {
return len(c.modes)
}
func claimToClaimKey(claim *v1.PersistentVolumeClaim) string {
return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name)
}
func claimrefToClaimKey(claimref *v1.ObjectReference) string {
return fmt.Sprintf("%s/%s", claimref.Namespace, claimref.Name)
}
// Returns true if PV satisfies all the PVC's requested AccessModes
func checkAccessModes(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) bool {
pvModesMap := map[v1.PersistentVolumeAccessMode]bool{}
for _, mode := range volume.Spec.AccessModes {
pvModesMap[mode] = true
}
for _, mode := range claim.Spec.AccessModes {
_, ok := pvModesMap[mode]
if !ok {
return false
}
}
return true
}

File diff suppressed because it is too large Load Diff

View File

@ -1,30 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["metrics.go"],
importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics",
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,184 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"sync"
"k8s.io/api/core/v1"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
)
const (
// Subsystem names.
pvControllerSubsystem = "pv_collector"
// Metric names.
boundPVKey = "bound_pv_count"
unboundPVKey = "unbound_pv_count"
boundPVCKey = "bound_pvc_count"
unboundPVCKey = "unbound_pvc_count"
// Label names.
namespaceLabel = "namespace"
storageClassLabel = "storage_class"
)
var registerMetrics sync.Once
// PVLister used to list persistent volumes.
type PVLister interface {
List() []interface{}
}
// PVCLister used to list persistent volume claims.
type PVCLister interface {
List() []interface{}
}
// Register all metrics for pv controller.
func Register(pvLister PVLister, pvcLister PVCLister) {
registerMetrics.Do(func() {
prometheus.MustRegister(newPVAndPVCCountCollector(pvLister, pvcLister))
})
}
func newPVAndPVCCountCollector(pvLister PVLister, pvcLister PVCLister) *pvAndPVCCountCollector {
return &pvAndPVCCountCollector{pvLister, pvcLister}
}
// Custom collector for current pod and container counts.
type pvAndPVCCountCollector struct {
// Cache for accessing information about PersistentVolumes.
pvLister PVLister
// Cache for accessing information about PersistentVolumeClaims.
pvcLister PVCLister
}
var (
boundPVCountDesc = prometheus.NewDesc(
prometheus.BuildFQName("", pvControllerSubsystem, boundPVKey),
"Gauge measuring number of persistent volume currently bound",
[]string{storageClassLabel}, nil)
unboundPVCountDesc = prometheus.NewDesc(
prometheus.BuildFQName("", pvControllerSubsystem, unboundPVKey),
"Gauge measuring number of persistent volume currently unbound",
[]string{storageClassLabel}, nil)
boundPVCCountDesc = prometheus.NewDesc(
prometheus.BuildFQName("", pvControllerSubsystem, boundPVCKey),
"Gauge measuring number of persistent volume claim currently bound",
[]string{namespaceLabel}, nil)
unboundPVCCountDesc = prometheus.NewDesc(
prometheus.BuildFQName("", pvControllerSubsystem, unboundPVCKey),
"Gauge measuring number of persistent volume claim currently unbound",
[]string{namespaceLabel}, nil)
)
func (collector *pvAndPVCCountCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- boundPVCountDesc
ch <- unboundPVCountDesc
ch <- boundPVCCountDesc
ch <- unboundPVCCountDesc
}
func (collector *pvAndPVCCountCollector) Collect(ch chan<- prometheus.Metric) {
collector.pvCollect(ch)
collector.pvcCollect(ch)
}
func (collector *pvAndPVCCountCollector) pvCollect(ch chan<- prometheus.Metric) {
boundNumberByStorageClass := make(map[string]int)
unboundNumberByStorageClass := make(map[string]int)
for _, pvObj := range collector.pvLister.List() {
pv, ok := pvObj.(*v1.PersistentVolume)
if !ok {
continue
}
if pv.Status.Phase == v1.VolumeBound {
boundNumberByStorageClass[pv.Spec.StorageClassName]++
} else {
unboundNumberByStorageClass[pv.Spec.StorageClassName]++
}
}
for storageClassName, number := range boundNumberByStorageClass {
metric, err := prometheus.NewConstMetric(
boundPVCountDesc,
prometheus.GaugeValue,
float64(number),
storageClassName)
if err != nil {
glog.Warningf("Create bound pv number metric failed: %v", err)
continue
}
ch <- metric
}
for storageClassName, number := range unboundNumberByStorageClass {
metric, err := prometheus.NewConstMetric(
unboundPVCountDesc,
prometheus.GaugeValue,
float64(number),
storageClassName)
if err != nil {
glog.Warningf("Create unbound pv number metric failed: %v", err)
continue
}
ch <- metric
}
}
func (collector *pvAndPVCCountCollector) pvcCollect(ch chan<- prometheus.Metric) {
boundNumberByNamespace := make(map[string]int)
unboundNumberByNamespace := make(map[string]int)
for _, pvcObj := range collector.pvcLister.List() {
pvc, ok := pvcObj.(*v1.PersistentVolumeClaim)
if !ok {
continue
}
if pvc.Status.Phase == v1.ClaimBound {
boundNumberByNamespace[pvc.Namespace]++
} else {
unboundNumberByNamespace[pvc.Namespace]++
}
}
for namespace, number := range boundNumberByNamespace {
metric, err := prometheus.NewConstMetric(
boundPVCCountDesc,
prometheus.GaugeValue,
float64(number),
namespace)
if err != nil {
glog.Warningf("Create bound pvc number metric failed: %v", err)
continue
}
ch <- metric
}
for namespace, number := range unboundNumberByNamespace {
metric, err := prometheus.NewConstMetric(
unboundPVCCountDesc,
prometheus.GaugeValue,
float64(number),
namespace)
if err != nil {
glog.Warningf("Create unbound pvc number metric failed: %v", err)
continue
}
ch <- metric
}
}

View File

@ -1,26 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["options.go"],
importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/options",
deps = ["//vendor/github.com/spf13/pflag:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,91 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"time"
"github.com/spf13/pflag"
)
// VolumeConfigFlags is used to bind CLI flags to variables. This top-level struct contains *all* enumerated
// CLI flags meant to configure all volume plugins. From this config, the binary will create many instances
// of volume.VolumeConfig which are then passed to the appropriate plugin. The ControllerManager binary is the only
// part of the code which knows what plugins are supported and which CLI flags correspond to each plugin.
type VolumeConfigFlags struct {
PersistentVolumeRecyclerMaximumRetry int
PersistentVolumeRecyclerMinimumTimeoutNFS int
PersistentVolumeRecyclerPodTemplateFilePathNFS string
PersistentVolumeRecyclerIncrementTimeoutNFS int
PersistentVolumeRecyclerPodTemplateFilePathHostPath string
PersistentVolumeRecyclerMinimumTimeoutHostPath int
PersistentVolumeRecyclerIncrementTimeoutHostPath int
EnableHostPathProvisioning bool
EnableDynamicProvisioning bool
}
type PersistentVolumeControllerOptions struct {
PVClaimBinderSyncPeriod time.Duration
VolumeConfigFlags VolumeConfigFlags
}
func NewPersistentVolumeControllerOptions() PersistentVolumeControllerOptions {
return PersistentVolumeControllerOptions{
PVClaimBinderSyncPeriod: 15 * time.Second,
VolumeConfigFlags: VolumeConfigFlags{
// default values here
PersistentVolumeRecyclerMaximumRetry: 3,
PersistentVolumeRecyclerMinimumTimeoutNFS: 300,
PersistentVolumeRecyclerIncrementTimeoutNFS: 30,
PersistentVolumeRecyclerMinimumTimeoutHostPath: 60,
PersistentVolumeRecyclerIncrementTimeoutHostPath: 30,
EnableHostPathProvisioning: false,
EnableDynamicProvisioning: true,
},
}
}
func (o *PersistentVolumeControllerOptions) AddFlags(fs *pflag.FlagSet) {
fs.DurationVar(&o.PVClaimBinderSyncPeriod, "pvclaimbinder-sync-period", o.PVClaimBinderSyncPeriod,
"The period for syncing persistent volumes and persistent volume claims")
fs.StringVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathNFS,
"pv-recycler-pod-template-filepath-nfs", o.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathNFS,
"The file path to a pod definition used as a template for NFS persistent volume recycling")
fs.IntVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutNFS, "pv-recycler-minimum-timeout-nfs",
o.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutNFS, "The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod")
fs.IntVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutNFS, "pv-recycler-increment-timeout-nfs",
o.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutNFS, "the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod")
fs.StringVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathHostPath, "pv-recycler-pod-template-filepath-hostpath",
o.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathHostPath,
"The file path to a pod definition used as a template for HostPath persistent volume recycling. "+
"This is for development and testing only and will not work in a multi-node cluster.")
fs.IntVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath",
o.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutHostPath,
"The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.")
fs.IntVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath",
o.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutHostPath,
"the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. "+
"This is for development and testing only and will not work in a multi-node cluster.")
fs.IntVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerMaximumRetry, "pv-recycler-maximum-retry",
o.VolumeConfigFlags.PersistentVolumeRecyclerMaximumRetry,
"Maximum number of attempts to recycle or delete a persistent volume")
fs.BoolVar(&o.VolumeConfigFlags.EnableHostPathProvisioning, "enable-hostpath-provisioner", o.VolumeConfigFlags.EnableHostPathProvisioning,
"Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. "+
"HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.")
fs.BoolVar(&o.VolumeConfigFlags.EnableDynamicProvisioning, "enable-dynamic-provisioning", o.VolumeConfigFlags.EnableDynamicProvisioning,
"Enable dynamic provisioning for environments that support it.")
}

View File

@ -1,462 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"errors"
"testing"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
api "k8s.io/kubernetes/pkg/apis/core"
)
var class1Parameters = map[string]string{
"param1": "value1",
}
var class2Parameters = map[string]string{
"param2": "value2",
}
var deleteReclaimPolicy = v1.PersistentVolumeReclaimDelete
var storageClasses = []*storage.StorageClass{
{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
Name: "gold",
},
Provisioner: mockPluginName,
Parameters: class1Parameters,
ReclaimPolicy: &deleteReclaimPolicy,
},
{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
Name: "silver",
},
Provisioner: mockPluginName,
Parameters: class2Parameters,
ReclaimPolicy: &deleteReclaimPolicy,
},
{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
Name: "external",
},
Provisioner: "vendor.com/my-volume",
Parameters: class1Parameters,
ReclaimPolicy: &deleteReclaimPolicy,
},
{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
Name: "unknown-internal",
},
Provisioner: "kubernetes.io/unknown",
Parameters: class1Parameters,
ReclaimPolicy: &deleteReclaimPolicy,
},
{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
Name: "unsupported-mountoptions",
},
Provisioner: mockPluginName,
Parameters: class1Parameters,
ReclaimPolicy: &deleteReclaimPolicy,
MountOptions: []string{"foo"},
},
}
// call to storageClass 1, returning an error
var provision1Error = provisionCall{
ret: errors.New("Mock provisioner error"),
expectedParameters: class1Parameters,
}
// call to storageClass 1, returning a valid PV
var provision1Success = provisionCall{
ret: nil,
expectedParameters: class1Parameters,
}
// call to storageClass 2, returning a valid PV
var provision2Success = provisionCall{
ret: nil,
expectedParameters: class2Parameters,
}
var provisionAlphaSuccess = provisionCall{
ret: nil,
}
// Test single call to syncVolume, expecting provisioning to happen.
// 1. Fill in the controller with initial data
// 2. Call the syncVolume *once*.
// 3. Compare resulting volumes with expected volumes.
func TestProvisionSync(t *testing.T) {
tests := []controllerTest{
{
// Provision a volume (with a default class)
"11-1 - successful provision with storage class 1",
novolumes,
newVolumeArray("pvc-uid11-1", "1Gi", "uid11-1", "claim11-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classGold, annBoundByController, annDynamicallyProvisioned),
newClaimArray("claim11-1", "uid11-1", "1Gi", "", v1.ClaimPending, &classGold),
// Binding will be completed in the next syncClaim
newClaimArray("claim11-1", "uid11-1", "1Gi", "", v1.ClaimPending, &classGold, annStorageProvisioner),
[]string{"Normal ProvisioningSucceeded"}, noerrors, wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
},
{
// Provision failure - plugin not found
"11-2 - plugin not found",
novolumes,
novolumes,
newClaimArray("claim11-2", "uid11-2", "1Gi", "", v1.ClaimPending, &classGold),
newClaimArray("claim11-2", "uid11-2", "1Gi", "", v1.ClaimPending, &classGold),
[]string{"Warning ProvisioningFailed"}, noerrors,
testSyncClaim,
},
{
// Provision failure - newProvisioner returns error
"11-3 - newProvisioner failure",
novolumes,
novolumes,
newClaimArray("claim11-3", "uid11-3", "1Gi", "", v1.ClaimPending, &classGold),
newClaimArray("claim11-3", "uid11-3", "1Gi", "", v1.ClaimPending, &classGold, annStorageProvisioner),
[]string{"Warning ProvisioningFailed"}, noerrors,
wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
},
{
// Provision failure - Provision returns error
"11-4 - provision failure",
novolumes,
novolumes,
newClaimArray("claim11-4", "uid11-4", "1Gi", "", v1.ClaimPending, &classGold),
newClaimArray("claim11-4", "uid11-4", "1Gi", "", v1.ClaimPending, &classGold, annStorageProvisioner),
[]string{"Warning ProvisioningFailed"}, noerrors,
wrapTestWithProvisionCalls([]provisionCall{provision1Error}, testSyncClaim),
},
{
// No provisioning if there is a matching volume available
"11-6 - provisioning when there is a volume available",
newVolumeArray("volume11-6", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold),
newVolumeArray("volume11-6", "1Gi", "uid11-6", "claim11-6", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classGold, annBoundByController),
newClaimArray("claim11-6", "uid11-6", "1Gi", "", v1.ClaimPending, &classGold),
newClaimArray("claim11-6", "uid11-6", "1Gi", "volume11-6", v1.ClaimBound, &classGold, annBoundByController, annBindCompleted),
noevents, noerrors,
// No provisioning plugin confingure - makes the test fail when
// the controller erroneously tries to provision something
wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
},
{
// Provision success? - claim is bound before provisioner creates
// a volume.
"11-7 - claim is bound before provisioning",
novolumes,
newVolumeArray("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classGold, annBoundByController, annDynamicallyProvisioned),
newClaimArray("claim11-7", "uid11-7", "1Gi", "", v1.ClaimPending, &classGold),
// The claim would be bound in next syncClaim
newClaimArray("claim11-7", "uid11-7", "1Gi", "", v1.ClaimPending, &classGold, annStorageProvisioner),
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
// Create a volume before provisionClaimOperation starts.
// This similates a parallel controller provisioning the volume.
reactor.lock.Lock()
volume := newVolume("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classGold, annBoundByController, annDynamicallyProvisioned)
reactor.volumes[volume.Name] = volume
reactor.lock.Unlock()
}),
},
{
// Provision success - cannot save provisioned PV once,
// second retry succeeds
"11-8 - cannot save provisioned volume",
novolumes,
newVolumeArray("pvc-uid11-8", "1Gi", "uid11-8", "claim11-8", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classGold, annBoundByController, annDynamicallyProvisioned),
newClaimArray("claim11-8", "uid11-8", "1Gi", "", v1.ClaimPending, &classGold),
// Binding will be completed in the next syncClaim
newClaimArray("claim11-8", "uid11-8", "1Gi", "", v1.ClaimPending, &classGold, annStorageProvisioner),
[]string{"Normal ProvisioningSucceeded"},
[]reactorError{
// Inject error to the first
// kubeclient.PersistentVolumes.Create() call. All other calls
// will succeed.
{"create", "persistentvolumes", errors.New("Mock creation error")},
},
wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
},
{
// Provision success? - cannot save provisioned PV five times,
// volume is deleted and delete succeeds
"11-9 - cannot save provisioned volume, delete succeeds",
novolumes,
novolumes,
newClaimArray("claim11-9", "uid11-9", "1Gi", "", v1.ClaimPending, &classGold),
newClaimArray("claim11-9", "uid11-9", "1Gi", "", v1.ClaimPending, &classGold, annStorageProvisioner),
[]string{"Warning ProvisioningFailed"},
[]reactorError{
// Inject error to five kubeclient.PersistentVolumes.Create()
// calls
{"create", "persistentvolumes", errors.New("Mock creation error1")},
{"create", "persistentvolumes", errors.New("Mock creation error2")},
{"create", "persistentvolumes", errors.New("Mock creation error3")},
{"create", "persistentvolumes", errors.New("Mock creation error4")},
{"create", "persistentvolumes", errors.New("Mock creation error5")},
},
wrapTestWithPluginCalls(
nil, // recycle calls
[]error{nil}, // delete calls
[]provisionCall{provision1Success}, // provision calls
testSyncClaim,
),
},
{
// Provision failure - cannot save provisioned PV five times,
// volume delete failed - no plugin found
"11-10 - cannot save provisioned volume, no delete plugin found",
novolumes,
novolumes,
newClaimArray("claim11-10", "uid11-10", "1Gi", "", v1.ClaimPending, &classGold),
newClaimArray("claim11-10", "uid11-10", "1Gi", "", v1.ClaimPending, &classGold, annStorageProvisioner),
[]string{"Warning ProvisioningFailed", "Warning ProvisioningCleanupFailed"},
[]reactorError{
// Inject error to five kubeclient.PersistentVolumes.Create()
// calls
{"create", "persistentvolumes", errors.New("Mock creation error1")},
{"create", "persistentvolumes", errors.New("Mock creation error2")},
{"create", "persistentvolumes", errors.New("Mock creation error3")},
{"create", "persistentvolumes", errors.New("Mock creation error4")},
{"create", "persistentvolumes", errors.New("Mock creation error5")},
},
// No deleteCalls are configured, which results into no deleter plugin available for the volume
wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
},
{
// Provision failure - cannot save provisioned PV five times,
// volume delete failed - deleter returns error five times
"11-11 - cannot save provisioned volume, deleter fails",
novolumes,
novolumes,
newClaimArray("claim11-11", "uid11-11", "1Gi", "", v1.ClaimPending, &classGold),
newClaimArray("claim11-11", "uid11-11", "1Gi", "", v1.ClaimPending, &classGold, annStorageProvisioner),
[]string{"Warning ProvisioningFailed", "Warning ProvisioningCleanupFailed"},
[]reactorError{
// Inject error to five kubeclient.PersistentVolumes.Create()
// calls
{"create", "persistentvolumes", errors.New("Mock creation error1")},
{"create", "persistentvolumes", errors.New("Mock creation error2")},
{"create", "persistentvolumes", errors.New("Mock creation error3")},
{"create", "persistentvolumes", errors.New("Mock creation error4")},
{"create", "persistentvolumes", errors.New("Mock creation error5")},
},
wrapTestWithPluginCalls(
nil, // recycle calls
[]error{ // delete calls
errors.New("Mock deletion error1"),
errors.New("Mock deletion error2"),
errors.New("Mock deletion error3"),
errors.New("Mock deletion error4"),
errors.New("Mock deletion error5"),
},
[]provisionCall{provision1Success}, // provision calls
testSyncClaim),
},
{
// Provision failure - cannot save provisioned PV five times,
// volume delete succeeds 2nd time
"11-12 - cannot save provisioned volume, delete succeeds 2nd time",
novolumes,
novolumes,
newClaimArray("claim11-12", "uid11-12", "1Gi", "", v1.ClaimPending, &classGold),
newClaimArray("claim11-12", "uid11-12", "1Gi", "", v1.ClaimPending, &classGold, annStorageProvisioner),
[]string{"Warning ProvisioningFailed"},
[]reactorError{
// Inject error to five kubeclient.PersistentVolumes.Create()
// calls
{"create", "persistentvolumes", errors.New("Mock creation error1")},
{"create", "persistentvolumes", errors.New("Mock creation error2")},
{"create", "persistentvolumes", errors.New("Mock creation error3")},
{"create", "persistentvolumes", errors.New("Mock creation error4")},
{"create", "persistentvolumes", errors.New("Mock creation error5")},
},
wrapTestWithPluginCalls(
nil, // recycle calls
[]error{ // delete calls
errors.New("Mock deletion error1"),
nil,
}, // provison calls
[]provisionCall{provision1Success},
testSyncClaim,
),
},
{
// Provision a volume (with non-default class)
"11-13 - successful provision with storage class 2",
novolumes,
newVolumeArray("pvc-uid11-13", "1Gi", "uid11-13", "claim11-13", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classSilver, annBoundByController, annDynamicallyProvisioned),
newClaimArray("claim11-13", "uid11-13", "1Gi", "", v1.ClaimPending, &classSilver),
// Binding will be completed in the next syncClaim
newClaimArray("claim11-13", "uid11-13", "1Gi", "", v1.ClaimPending, &classSilver, annStorageProvisioner),
[]string{"Normal ProvisioningSucceeded"}, noerrors, wrapTestWithProvisionCalls([]provisionCall{provision2Success}, testSyncClaim),
},
{
// Provision error - non existing class
"11-14 - fail due to non-existing class",
novolumes,
novolumes,
newClaimArray("claim11-14", "uid11-14", "1Gi", "", v1.ClaimPending, &classNonExisting),
newClaimArray("claim11-14", "uid11-14", "1Gi", "", v1.ClaimPending, &classNonExisting),
noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
},
{
// No provisioning with class=""
"11-15 - no provisioning with class=''",
novolumes,
novolumes,
newClaimArray("claim11-15", "uid11-15", "1Gi", "", v1.ClaimPending, &classEmpty),
newClaimArray("claim11-15", "uid11-15", "1Gi", "", v1.ClaimPending, &classEmpty),
noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
},
{
// No provisioning with class=nil
"11-16 - no provisioning with class=nil",
novolumes,
novolumes,
newClaimArray("claim11-15", "uid11-15", "1Gi", "", v1.ClaimPending, nil),
newClaimArray("claim11-15", "uid11-15", "1Gi", "", v1.ClaimPending, nil),
noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
},
{
// No provisioning + normal event with external provisioner
"11-17 - external provisioner",
novolumes,
novolumes,
newClaimArray("claim11-17", "uid11-17", "1Gi", "", v1.ClaimPending, &classExternal),
claimWithAnnotation(annStorageProvisioner, "vendor.com/my-volume",
newClaimArray("claim11-17", "uid11-17", "1Gi", "", v1.ClaimPending, &classExternal)),
[]string{"Normal ExternalProvisioning"},
noerrors, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
},
{
// No provisioning + warning event with unknown internal provisioner
"11-18 - unknown internal provisioner",
novolumes,
novolumes,
newClaimArray("claim11-18", "uid11-18", "1Gi", "", v1.ClaimPending, &classUnknownInternal),
newClaimArray("claim11-18", "uid11-18", "1Gi", "", v1.ClaimPending, &classUnknownInternal),
[]string{"Warning ProvisioningFailed"},
noerrors, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
},
{
// Provision success - first save of a PV to API server fails (API
// server has written the object to etcd, but crashed before sending
// 200 OK response to the controller). Controller retries and the
// second save of the PV returns "AlreadyExists" because the PV
// object already is in the API server.
//
"11-19 - provisioned volume saved but API server crashed",
novolumes,
// We don't actually simulate API server saving the object and
// crashing afterwards, Create() just returns error without saving
// the volume in this test. So the set of expected volumes at the
// end of the test is empty.
novolumes,
newClaimArray("claim11-19", "uid11-19", "1Gi", "", v1.ClaimPending, &classGold),
newClaimArray("claim11-19", "uid11-19", "1Gi", "", v1.ClaimPending, &classGold, annStorageProvisioner),
noevents,
[]reactorError{
// Inject errors to simulate crashed API server during
// kubeclient.PersistentVolumes.Create()
{"create", "persistentvolumes", errors.New("Mock creation error1")},
{"create", "persistentvolumes", apierrs.NewAlreadyExists(api.Resource("persistentvolumes"), "")},
},
wrapTestWithPluginCalls(
nil, // recycle calls
nil, // delete calls - if Delete was called the test would fail
[]provisionCall{provision1Success},
testSyncClaim,
),
},
{
// No provisioning + warning event with unsupported storageClass.mountOptions
"11-20 - unsupported storageClass.mountOptions",
novolumes,
novolumes,
newClaimArray("claim11-20", "uid11-20", "1Gi", "", v1.ClaimPending, &classUnsupportedMountOptions),
newClaimArray("claim11-20", "uid11-20", "1Gi", "", v1.ClaimPending, &classUnsupportedMountOptions, annStorageProvisioner),
// Expect event to be prefixed with "Mount options" because saving PV will fail anyway
[]string{"Warning ProvisioningFailed Mount options"},
noerrors, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
},
}
runSyncTests(t, tests, storageClasses, []*v1.Pod{})
}
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
// volume/claims. The test follows this pattern:
// 0. Load the controller with initial data.
// 1. Call controllerTest.testCall() once as in TestSync()
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
// events). Go to 2. if these calls change anything.
// 3. When all changes are processed and no new changes were made, call
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
// 4. If some changes were done by step 3., go to 2. (simulation of
// "volume/claim updated" events, eventually performing step 3. again)
// 5. When 3. does not do any changes, finish the tests and compare final set
// of volumes/claims with expected claims/volumes and report differences.
// Some limit of calls in enforced to prevent endless loops.
func TestProvisionMultiSync(t *testing.T) {
tests := []controllerTest{
{
// Provision a volume with binding
"12-1 - successful provision",
novolumes,
newVolumeArray("pvc-uid12-1", "1Gi", "uid12-1", "claim12-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classGold, annBoundByController, annDynamicallyProvisioned),
newClaimArray("claim12-1", "uid12-1", "1Gi", "", v1.ClaimPending, &classGold),
newClaimArray("claim12-1", "uid12-1", "1Gi", "pvc-uid12-1", v1.ClaimBound, &classGold, annBoundByController, annBindCompleted, annStorageProvisioner),
noevents, noerrors, wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
},
}
runMultisyncTests(t, tests, storageClasses, storageClasses[0].Name)
}
// When provisioning is disabled, provisioning a claim should instantly return nil
func TestDisablingDynamicProvisioner(t *testing.T) {
ctrl, err := newTestController(nil, nil, false)
if err != nil {
t.Fatalf("Construct PersistentVolume controller failed: %v", err)
}
retVal := ctrl.provisionClaim(nil)
if retVal != nil {
t.Errorf("Expected nil return but got %v", retVal)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,540 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
"strconv"
"time"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1"
storageinformers "k8s.io/client-go/informers/storage/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics"
"k8s.io/kubernetes/pkg/util/goroutinemap"
vol "k8s.io/kubernetes/pkg/volume"
"github.com/golang/glog"
)
// This file contains the controller base functionality, i.e. framework to
// process PV/PVC added/updated/deleted events. The real binding, provisioning,
// recycling and deleting is done in pv_controller.go
// ControllerParameters contains arguments for creation of a new
// PersistentVolume controller.
type ControllerParameters struct {
KubeClient clientset.Interface
SyncPeriod time.Duration
VolumePlugins []vol.VolumePlugin
Cloud cloudprovider.Interface
ClusterName string
VolumeInformer coreinformers.PersistentVolumeInformer
ClaimInformer coreinformers.PersistentVolumeClaimInformer
ClassInformer storageinformers.StorageClassInformer
PodInformer coreinformers.PodInformer
NodeInformer coreinformers.NodeInformer
EventRecorder record.EventRecorder
EnableDynamicProvisioning bool
}
// NewController creates a new PersistentVolume controller
func NewController(p ControllerParameters) (*PersistentVolumeController, error) {
eventRecorder := p.EventRecorder
if eventRecorder == nil {
broadcaster := record.NewBroadcaster()
broadcaster.StartLogging(glog.Infof)
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: p.KubeClient.CoreV1().Events("")})
eventRecorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"})
}
controller := &PersistentVolumeController{
volumes: newPersistentVolumeOrderedIndex(),
claims: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
kubeClient: p.KubeClient,
eventRecorder: eventRecorder,
runningOperations: goroutinemap.NewGoRoutineMap(true /* exponentialBackOffOnError */),
cloud: p.Cloud,
enableDynamicProvisioning: p.EnableDynamicProvisioning,
clusterName: p.ClusterName,
createProvisionedPVRetryCount: createProvisionedPVRetryCount,
createProvisionedPVInterval: createProvisionedPVInterval,
claimQueue: workqueue.NewNamed("claims"),
volumeQueue: workqueue.NewNamed("volumes"),
resyncPeriod: p.SyncPeriod,
}
// Prober is nil because PV is not aware of Flexvolume.
if err := controller.volumePluginMgr.InitPlugins(p.VolumePlugins, nil /* prober */, controller); err != nil {
return nil, fmt.Errorf("Could not initialize volume plugins for PersistentVolume Controller: %v", err)
}
p.VolumeInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { controller.enqueueWork(controller.volumeQueue, obj) },
UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.volumeQueue, newObj) },
DeleteFunc: func(obj interface{}) { controller.enqueueWork(controller.volumeQueue, obj) },
},
)
controller.volumeLister = p.VolumeInformer.Lister()
controller.volumeListerSynced = p.VolumeInformer.Informer().HasSynced
p.ClaimInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { controller.enqueueWork(controller.claimQueue, obj) },
UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.claimQueue, newObj) },
DeleteFunc: func(obj interface{}) { controller.enqueueWork(controller.claimQueue, obj) },
},
)
controller.claimLister = p.ClaimInformer.Lister()
controller.claimListerSynced = p.ClaimInformer.Informer().HasSynced
controller.classLister = p.ClassInformer.Lister()
controller.classListerSynced = p.ClassInformer.Informer().HasSynced
controller.podLister = p.PodInformer.Lister()
controller.podListerSynced = p.PodInformer.Informer().HasSynced
controller.NodeLister = p.NodeInformer.Lister()
controller.NodeListerSynced = p.NodeInformer.Informer().HasSynced
return controller, nil
}
// initializeCaches fills all controller caches with initial data from etcd in
// order to have the caches already filled when first addClaim/addVolume to
// perform initial synchronization of the controller.
func (ctrl *PersistentVolumeController) initializeCaches(volumeLister corelisters.PersistentVolumeLister, claimLister corelisters.PersistentVolumeClaimLister) {
volumeList, err := volumeLister.List(labels.Everything())
if err != nil {
glog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
return
}
for _, volume := range volumeList {
volumeClone := volume.DeepCopy()
if _, err = ctrl.storeVolumeUpdate(volumeClone); err != nil {
glog.Errorf("error updating volume cache: %v", err)
}
}
claimList, err := claimLister.List(labels.Everything())
if err != nil {
glog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
return
}
for _, claim := range claimList {
if _, err = ctrl.storeClaimUpdate(claim.DeepCopy()); err != nil {
glog.Errorf("error updating claim cache: %v", err)
}
}
glog.V(4).Infof("controller initialized")
}
// enqueueWork adds volume or claim to given work queue.
func (ctrl *PersistentVolumeController) enqueueWork(queue workqueue.Interface, obj interface{}) {
// Beware of "xxx deleted" events
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
objName, err := controller.KeyFunc(obj)
if err != nil {
glog.Errorf("failed to get key from object: %v", err)
return
}
glog.V(5).Infof("enqueued %q for sync", objName)
queue.Add(objName)
}
func (ctrl *PersistentVolumeController) storeVolumeUpdate(volume interface{}) (bool, error) {
return storeObjectUpdate(ctrl.volumes.store, volume, "volume")
}
func (ctrl *PersistentVolumeController) storeClaimUpdate(claim interface{}) (bool, error) {
return storeObjectUpdate(ctrl.claims, claim, "claim")
}
// updateVolume runs in worker thread and handles "volume added",
// "volume updated" and "periodic sync" events.
func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume) {
// Store the new volume version in the cache and do not process it if this
// is an old version.
new, err := ctrl.storeVolumeUpdate(volume)
if err != nil {
glog.Errorf("%v", err)
}
if !new {
return
}
err = ctrl.syncVolume(volume)
if err != nil {
if errors.IsConflict(err) {
// Version conflict error happens quite often and the controller
// recovers from it easily.
glog.V(3).Infof("could not sync volume %q: %+v", volume.Name, err)
} else {
glog.Errorf("could not sync volume %q: %+v", volume.Name, err)
}
}
}
// deleteVolume runs in worker thread and handles "volume deleted" event.
func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume) {
_ = ctrl.volumes.store.Delete(volume)
glog.V(4).Infof("volume %q deleted", volume.Name)
if volume.Spec.ClaimRef == nil {
return
}
// sync the claim when its volume is deleted. Explicitly syncing the
// claim here in response to volume deletion prevents the claim from
// waiting until the next sync period for its Lost status.
claimKey := claimrefToClaimKey(volume.Spec.ClaimRef)
glog.V(5).Infof("deleteVolume[%s]: scheduling sync of claim %q", volume.Name, claimKey)
ctrl.claimQueue.Add(claimKey)
}
// updateClaim runs in worker thread and handles "claim added",
// "claim updated" and "periodic sync" events.
func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeClaim) {
// Store the new claim version in the cache and do not process it if this is
// an old version.
new, err := ctrl.storeClaimUpdate(claim)
if err != nil {
glog.Errorf("%v", err)
}
if !new {
return
}
err = ctrl.syncClaim(claim)
if err != nil {
if errors.IsConflict(err) {
// Version conflict error happens quite often and the controller
// recovers from it easily.
glog.V(3).Infof("could not sync claim %q: %+v", claimToClaimKey(claim), err)
} else {
glog.Errorf("could not sync volume %q: %+v", claimToClaimKey(claim), err)
}
}
}
// deleteClaim runs in worker thread and handles "claim deleted" event.
func (ctrl *PersistentVolumeController) deleteClaim(claim *v1.PersistentVolumeClaim) {
_ = ctrl.claims.Delete(claim)
glog.V(4).Infof("claim %q deleted", claimToClaimKey(claim))
volumeName := claim.Spec.VolumeName
if volumeName == "" {
glog.V(5).Infof("deleteClaim[%q]: volume not bound", claimToClaimKey(claim))
return
}
// sync the volume when its claim is deleted. Explicitly sync'ing the
// volume here in response to claim deletion prevents the volume from
// waiting until the next sync period for its Release.
glog.V(5).Infof("deleteClaim[%q]: scheduling sync of volume %s", claimToClaimKey(claim), volumeName)
ctrl.volumeQueue.Add(volumeName)
}
// Run starts all of this controller's control loops
func (ctrl *PersistentVolumeController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer ctrl.claimQueue.ShutDown()
defer ctrl.volumeQueue.ShutDown()
glog.Infof("Starting persistent volume controller")
defer glog.Infof("Shutting down persistent volume controller")
if !controller.WaitForCacheSync("persistent volume", stopCh, ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced, ctrl.NodeListerSynced) {
return
}
ctrl.initializeCaches(ctrl.volumeLister, ctrl.claimLister)
go wait.Until(ctrl.resync, ctrl.resyncPeriod, stopCh)
go wait.Until(ctrl.volumeWorker, time.Second, stopCh)
go wait.Until(ctrl.claimWorker, time.Second, stopCh)
metrics.Register(ctrl.volumes.store, ctrl.claims)
<-stopCh
}
// volumeWorker processes items from volumeQueue. It must run only once,
// syncVolume is not assured to be reentrant.
func (ctrl *PersistentVolumeController) volumeWorker() {
workFunc := func() bool {
keyObj, quit := ctrl.volumeQueue.Get()
if quit {
return true
}
defer ctrl.volumeQueue.Done(keyObj)
key := keyObj.(string)
glog.V(5).Infof("volumeWorker[%s]", key)
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
glog.V(4).Infof("error getting name of volume %q to get volume from informer: %v", key, err)
return false
}
volume, err := ctrl.volumeLister.Get(name)
if err == nil {
// The volume still exists in informer cache, the event must have
// been add/update/sync
ctrl.updateVolume(volume)
return false
}
if !errors.IsNotFound(err) {
glog.V(2).Infof("error getting volume %q from informer: %v", key, err)
return false
}
// The volume is not in informer cache, the event must have been
// "delete"
volumeObj, found, err := ctrl.volumes.store.GetByKey(key)
if err != nil {
glog.V(2).Infof("error getting volume %q from cache: %v", key, err)
return false
}
if !found {
// The controller has already processed the delete event and
// deleted the volume from its cache
glog.V(2).Infof("deletion of volume %q was already processed", key)
return false
}
volume, ok := volumeObj.(*v1.PersistentVolume)
if !ok {
glog.Errorf("expected volume, got %+v", volumeObj)
return false
}
ctrl.deleteVolume(volume)
return false
}
for {
if quit := workFunc(); quit {
glog.Infof("volume worker queue shutting down")
return
}
}
}
// claimWorker processes items from claimQueue. It must run only once,
// syncClaim is not reentrant.
func (ctrl *PersistentVolumeController) claimWorker() {
workFunc := func() bool {
keyObj, quit := ctrl.claimQueue.Get()
if quit {
return true
}
defer ctrl.claimQueue.Done(keyObj)
key := keyObj.(string)
glog.V(5).Infof("claimWorker[%s]", key)
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
glog.V(4).Infof("error getting namespace & name of claim %q to get claim from informer: %v", key, err)
return false
}
claim, err := ctrl.claimLister.PersistentVolumeClaims(namespace).Get(name)
if err == nil {
// The claim still exists in informer cache, the event must have
// been add/update/sync
ctrl.updateClaim(claim)
return false
}
if !errors.IsNotFound(err) {
glog.V(2).Infof("error getting claim %q from informer: %v", key, err)
return false
}
// The claim is not in informer cache, the event must have been "delete"
claimObj, found, err := ctrl.claims.GetByKey(key)
if err != nil {
glog.V(2).Infof("error getting claim %q from cache: %v", key, err)
return false
}
if !found {
// The controller has already processed the delete event and
// deleted the claim from its cache
glog.V(2).Infof("deletion of claim %q was already processed", key)
return false
}
claim, ok := claimObj.(*v1.PersistentVolumeClaim)
if !ok {
glog.Errorf("expected claim, got %+v", claimObj)
return false
}
ctrl.deleteClaim(claim)
return false
}
for {
if quit := workFunc(); quit {
glog.Infof("claim worker queue shutting down")
return
}
}
}
// resync supplements short resync period of shared informers - we don't want
// all consumers of PV/PVC shared informer to have a short resync period,
// therefore we do our own.
func (ctrl *PersistentVolumeController) resync() {
glog.V(4).Infof("resyncing PV controller")
pvcs, err := ctrl.claimLister.List(labels.NewSelector())
if err != nil {
glog.Warningf("cannot list claims: %s", err)
return
}
for _, pvc := range pvcs {
ctrl.enqueueWork(ctrl.claimQueue, pvc)
}
pvs, err := ctrl.volumeLister.List(labels.NewSelector())
if err != nil {
glog.Warningf("cannot list persistent volumes: %s", err)
return
}
for _, pv := range pvs {
ctrl.enqueueWork(ctrl.volumeQueue, pv)
}
}
// setClaimProvisioner saves
// claim.Annotations[annStorageProvisioner] = class.Provisioner
func (ctrl *PersistentVolumeController) setClaimProvisioner(claim *v1.PersistentVolumeClaim, class *storage.StorageClass) (*v1.PersistentVolumeClaim, error) {
if val, ok := claim.Annotations[annStorageProvisioner]; ok && val == class.Provisioner {
// annotation is already set, nothing to do
return claim, nil
}
// The volume from method args can be pointing to watcher cache. We must not
// modify these, therefore create a copy.
claimClone := claim.DeepCopy()
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annStorageProvisioner, class.Provisioner)
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claimClone)
if err != nil {
return newClaim, err
}
_, err = ctrl.storeClaimUpdate(newClaim)
if err != nil {
return newClaim, err
}
return newClaim, nil
}
// Stateless functions
func getClaimStatusForLogging(claim *v1.PersistentVolumeClaim) string {
bound := metav1.HasAnnotation(claim.ObjectMeta, annBindCompleted)
boundByController := metav1.HasAnnotation(claim.ObjectMeta, annBoundByController)
return fmt.Sprintf("phase: %s, bound to: %q, bindCompleted: %v, boundByController: %v", claim.Status.Phase, claim.Spec.VolumeName, bound, boundByController)
}
func getVolumeStatusForLogging(volume *v1.PersistentVolume) string {
boundByController := metav1.HasAnnotation(volume.ObjectMeta, annBoundByController)
claimName := ""
if volume.Spec.ClaimRef != nil {
claimName = fmt.Sprintf("%s/%s (uid: %s)", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, volume.Spec.ClaimRef.UID)
}
return fmt.Sprintf("phase: %s, bound to: %q, boundByController: %v", volume.Status.Phase, claimName, boundByController)
}
// isVolumeBoundToClaim returns true, if given volume is pre-bound or bound
// to specific claim. Both claim.Name and claim.Namespace must be equal.
// If claim.UID is present in volume.Spec.ClaimRef, it must be equal too.
func isVolumeBoundToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) bool {
if volume.Spec.ClaimRef == nil {
return false
}
if claim.Name != volume.Spec.ClaimRef.Name || claim.Namespace != volume.Spec.ClaimRef.Namespace {
return false
}
if volume.Spec.ClaimRef.UID != "" && claim.UID != volume.Spec.ClaimRef.UID {
return false
}
return true
}
// storeObjectUpdate updates given cache with a new object version from Informer
// callback (i.e. with events from etcd) or with an object modified by the
// controller itself. Returns "true", if the cache was updated, false if the
// object is an old version and should be ignored.
func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bool, error) {
objName, err := controller.KeyFunc(obj)
if err != nil {
return false, fmt.Errorf("Couldn't get key for object %+v: %v", obj, err)
}
oldObj, found, err := store.Get(obj)
if err != nil {
return false, fmt.Errorf("Error finding %s %q in controller cache: %v", className, objName, err)
}
objAccessor, err := meta.Accessor(obj)
if err != nil {
return false, err
}
if !found {
// This is a new object
glog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion())
if err = store.Add(obj); err != nil {
return false, fmt.Errorf("Error adding %s %q to controller cache: %v", className, objName, err)
}
return true, nil
}
oldObjAccessor, err := meta.Accessor(oldObj)
if err != nil {
return false, err
}
objResourceVersion, err := strconv.ParseInt(objAccessor.GetResourceVersion(), 10, 64)
if err != nil {
return false, fmt.Errorf("Error parsing ResourceVersion %q of %s %q: %s", objAccessor.GetResourceVersion(), className, objName, err)
}
oldObjResourceVersion, err := strconv.ParseInt(oldObjAccessor.GetResourceVersion(), 10, 64)
if err != nil {
return false, fmt.Errorf("Error parsing old ResourceVersion %q of %s %q: %s", oldObjAccessor.GetResourceVersion(), className, objName, err)
}
// Throw away only older version, let the same version pass - we do want to
// get periodic sync events.
if oldObjResourceVersion > objResourceVersion {
glog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion())
return false, nil
}
glog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion())
if err = store.Update(obj); err != nil {
return false, fmt.Errorf("Error updating %s %q in controller cache: %v", className, objName, err)
}
return true, nil
}

View File

@ -1,380 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"testing"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/controller"
)
// Test the real controller methods (add/update/delete claim/volume) with
// a fake API server.
// There is no controller API to 'initiate syncAll now', therefore these tests
// can't reliably simulate periodic sync of volumes/claims - it would be
// either very timing-sensitive or slow to wait for real periodic sync.
func TestControllerSync(t *testing.T) {
tests := []controllerTest{
// [Unit test set 5] - controller tests.
// We test the controller as if
// it was connected to real API server, i.e. we call add/update/delete
// Claim/Volume methods. Also, all changes to volumes and claims are
// sent to add/update/delete Claim/Volume as real controller would do.
{
// addClaim gets a new claim. Check it's bound to a volume.
"5-2 - complete bind",
newVolumeArray("volume5-2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
newVolumeArray("volume5-2", "1Gi", "uid5-2", "claim5-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
noclaims, /* added in testAddClaim5_2 */
newClaimArray("claim5-2", "uid5-2", "1Gi", "volume5-2", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
noevents, noerrors,
// Custom test function that generates an add event
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
claim := newClaim("claim5-2", "uid5-2", "1Gi", "", v1.ClaimPending, nil)
reactor.addClaimEvent(claim)
return nil
},
},
{
// deleteClaim with a bound claim makes bound volume released.
"5-3 - delete claim",
newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", v1.VolumeReleased, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
newClaimArray("claim5-3", "uid5-3", "1Gi", "volume5-3", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
noclaims,
noevents, noerrors,
// Custom test function that generates a delete event
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
obj := ctrl.claims.List()[0]
claim := obj.(*v1.PersistentVolumeClaim)
reactor.deleteClaimEvent(claim)
return nil
},
},
{
// deleteVolume with a bound volume. Check the claim is Lost.
"5-4 - delete volume",
newVolumeArray("volume5-4", "1Gi", "uid5-4", "claim5-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty),
novolumes,
newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", v1.ClaimLost, nil, annBoundByController, annBindCompleted),
[]string{"Warning ClaimLost"}, noerrors,
// Custom test function that generates a delete event
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
obj := ctrl.volumes.store.List()[0]
volume := obj.(*v1.PersistentVolume)
reactor.deleteVolumeEvent(volume)
return nil
},
},
}
for _, test := range tests {
glog.V(4).Infof("starting test %q", test.name)
// Initialize the controller
client := &fake.Clientset{}
fakeVolumeWatch := watch.NewFake()
client.PrependWatchReactor("persistentvolumes", core.DefaultWatchReactor(fakeVolumeWatch, nil))
fakeClaimWatch := watch.NewFake()
client.PrependWatchReactor("persistentvolumeclaims", core.DefaultWatchReactor(fakeClaimWatch, nil))
client.PrependWatchReactor("storageclasses", core.DefaultWatchReactor(watch.NewFake(), nil))
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
ctrl, err := newTestController(client, informers, true)
if err != nil {
t.Fatalf("Test %q construct persistent volume failed: %v", test.name, err)
}
reactor := newVolumeReactor(client, ctrl, fakeVolumeWatch, fakeClaimWatch, test.errors)
for _, claim := range test.initialClaims {
reactor.claims[claim.Name] = claim
go func(claim *v1.PersistentVolumeClaim) {
fakeClaimWatch.Add(claim)
}(claim)
}
for _, volume := range test.initialVolumes {
reactor.volumes[volume.Name] = volume
go func(volume *v1.PersistentVolume) {
fakeVolumeWatch.Add(volume)
}(volume)
}
// Start the controller
stopCh := make(chan struct{})
informers.Start(stopCh)
go ctrl.Run(stopCh)
// Wait for the controller to pass initial sync and fill its caches.
for !ctrl.volumeListerSynced() ||
!ctrl.claimListerSynced() ||
len(ctrl.claims.ListKeys()) < len(test.initialClaims) ||
len(ctrl.volumes.store.ListKeys()) < len(test.initialVolumes) {
time.Sleep(10 * time.Millisecond)
}
glog.V(4).Infof("controller synced, starting test")
// Call the tested function
err = test.test(ctrl, reactor, test)
if err != nil {
t.Errorf("Test %q initial test call failed: %v", test.name, err)
}
// Simulate a periodic resync, just in case some events arrived in a
// wrong order.
ctrl.resync()
err = reactor.waitTest(test)
if err != nil {
t.Errorf("Failed to run test %s: %v", test.name, err)
}
close(stopCh)
evaluateTestResults(ctrl, reactor, test, t)
}
}
func storeVersion(t *testing.T, prefix string, c cache.Store, version string, expectedReturn bool) {
pv := newVolume("pvName", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete, classEmpty)
pv.ResourceVersion = version
ret, err := storeObjectUpdate(c, pv, "volume")
if err != nil {
t.Errorf("%s: expected storeObjectUpdate to succeed, got: %v", prefix, err)
}
if expectedReturn != ret {
t.Errorf("%s: expected storeObjectUpdate to return %v, got: %v", prefix, expectedReturn, ret)
}
// find the stored version
pvObj, found, err := c.GetByKey("pvName")
if err != nil {
t.Errorf("expected volume 'pvName' in the cache, got error instead: %v", err)
}
if !found {
t.Errorf("expected volume 'pvName' in the cache but it was not found")
}
pv, ok := pvObj.(*v1.PersistentVolume)
if !ok {
t.Errorf("expected volume in the cache, got different object instead: %#v", pvObj)
}
if ret {
if pv.ResourceVersion != version {
t.Errorf("expected volume with version %s in the cache, got %s instead", version, pv.ResourceVersion)
}
} else {
if pv.ResourceVersion == version {
t.Errorf("expected volume with version other than %s in the cache, got %s instead", version, pv.ResourceVersion)
}
}
}
// TestControllerCache tests func storeObjectUpdate()
func TestControllerCache(t *testing.T) {
// Cache under test
c := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
// Store new PV
storeVersion(t, "Step1", c, "1", true)
// Store the same PV
storeVersion(t, "Step2", c, "1", true)
// Store newer PV
storeVersion(t, "Step3", c, "2", true)
// Store older PV - simulating old "PV updated" event or periodic sync with
// old data
storeVersion(t, "Step4", c, "1", false)
// Store newer PV - test integer parsing ("2" > "10" as string,
// while 2 < 10 as integers)
storeVersion(t, "Step5", c, "10", true)
}
func TestControllerCacheParsingError(t *testing.T) {
c := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
// There must be something in the cache to compare with
storeVersion(t, "Step1", c, "1", true)
pv := newVolume("pvName", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete, classEmpty)
pv.ResourceVersion = "xxx"
_, err := storeObjectUpdate(c, pv, "volume")
if err == nil {
t.Errorf("Expected parsing error, got nil instead")
}
}
func addVolumeAnnotation(volume *v1.PersistentVolume, annName, annValue string) *v1.PersistentVolume {
if volume.Annotations == nil {
volume.Annotations = make(map[string]string)
}
volume.Annotations[annName] = annValue
return volume
}
func makePVCClass(scName *string) *v1.PersistentVolumeClaim {
return &v1.PersistentVolumeClaim{
Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: scName,
},
}
}
func makeStorageClass(scName string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass {
return &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: scName,
},
VolumeBindingMode: mode,
}
}
func TestDelayBinding(t *testing.T) {
var (
classNotHere = "not-here"
classNoMode = "no-mode"
classImmediateMode = "immediate-mode"
classWaitMode = "wait-mode"
modeImmediate = storagev1.VolumeBindingImmediate
modeWait = storagev1.VolumeBindingWaitForFirstConsumer
)
tests := map[string]struct {
pvc *v1.PersistentVolumeClaim
shouldDelay bool
shouldFail bool
}{
"nil-class": {
pvc: makePVCClass(nil),
shouldDelay: false,
},
"class-not-found": {
pvc: makePVCClass(&classNotHere),
shouldDelay: false,
},
"no-mode-class": {
pvc: makePVCClass(&classNoMode),
shouldDelay: false,
shouldFail: true,
},
"immediate-mode-class": {
pvc: makePVCClass(&classImmediateMode),
shouldDelay: false,
},
"wait-mode-class": {
pvc: makePVCClass(&classWaitMode),
shouldDelay: true,
},
}
classes := []*storagev1.StorageClass{
makeStorageClass(classNoMode, nil),
makeStorageClass(classImmediateMode, &modeImmediate),
makeStorageClass(classWaitMode, &modeWait),
}
client := &fake.Clientset{}
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
classInformer := informerFactory.Storage().V1().StorageClasses()
ctrl := &PersistentVolumeController{
classLister: classInformer.Lister(),
}
for _, class := range classes {
if err := classInformer.Informer().GetIndexer().Add(class); err != nil {
t.Fatalf("Failed to add storage class %q: %v", class.Name, err)
}
}
// When volumeScheduling feature gate is disabled, should always be delayed
name := "volumeScheduling-feature-disabled"
shouldDelay, err := ctrl.shouldDelayBinding(makePVCClass(&classWaitMode))
if err != nil {
t.Errorf("Test %q returned error: %v", name, err)
}
if shouldDelay {
t.Errorf("Test %q returned true, expected false", name)
}
// Enable volumeScheduling feature gate
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false")
for name, test := range tests {
shouldDelay, err = ctrl.shouldDelayBinding(test.pvc)
if err != nil && !test.shouldFail {
t.Errorf("Test %q returned error: %v", name, err)
}
if err == nil && test.shouldFail {
t.Errorf("Test %q returned success, expected error", name)
}
if shouldDelay != test.shouldDelay {
t.Errorf("Test %q returned unexpected %v", name, test.shouldDelay)
}
}
// When dynamicProvisioningScheduling feature gate is disabled, should be delayed,
// even if the pvc has selectedNode annotation.
provisionedClaim := makePVCClass(&classWaitMode)
provisionedClaim.Annotations = map[string]string{annSelectedNode: "node-name"}
name = "dynamicProvisioningScheduling-feature-disabled"
shouldDelay, err = ctrl.shouldDelayBinding(provisionedClaim)
if err != nil {
t.Errorf("Test %q returned error: %v", name, err)
}
if !shouldDelay {
t.Errorf("Test %q returned false, expected true", name)
}
// Enable DynamicProvisioningScheduling feature gate
utilfeature.DefaultFeatureGate.Set("DynamicProvisioningScheduling=true")
defer utilfeature.DefaultFeatureGate.Set("DynamicProvisioningScheduling=false")
// When the pvc does not have selectedNode annotation, should be delayed,
// even if dynamicProvisioningScheduling feature gate is enabled.
name = "dynamicProvisioningScheduling-feature-enabled, selectedNode-annotation-not-set"
shouldDelay, err = ctrl.shouldDelayBinding(makePVCClass(&classWaitMode))
if err != nil {
t.Errorf("Test %q returned error: %v", name, err)
}
if !shouldDelay {
t.Errorf("Test %q returned false, expected true", name)
}
// Should not be delayed when dynamicProvisioningScheduling feature gate is enabled,
// and the pvc has selectedNode annotation.
name = "dynamicProvisioningScheduling-feature-enabled, selectedNode-annotation-set"
shouldDelay, err = ctrl.shouldDelayBinding(provisionedClaim)
if err != nil {
t.Errorf("Test %q returned error: %v", name, err)
}
if shouldDelay {
t.Errorf("Test %q returned true, expected false", name)
}
}

View File

@ -1,266 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"errors"
"testing"
"k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Test single call to syncVolume, expecting recycling to happen.
// 1. Fill in the controller with initial data
// 2. Call the syncVolume *once*.
// 3. Compare resulting volumes with expected volumes.
func TestRecycleSync(t *testing.T) {
runningPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "runningPod",
Namespace: testNamespace,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "vol1",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "runningClaim",
},
},
},
},
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
}
pendingPod := runningPod.DeepCopy()
pendingPod.Name = "pendingPod"
pendingPod.Status.Phase = v1.PodPending
pendingPod.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = "pendingClaim"
completedPod := runningPod.DeepCopy()
completedPod.Name = "completedPod"
completedPod.Status.Phase = v1.PodSucceeded
completedPod.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = "completedClaim"
pods := []*v1.Pod{
runningPod,
pendingPod,
completedPod,
}
tests := []controllerTest{
{
// recycle volume bound by controller
"6-1 - successful recycle",
newVolumeArray("volume6-1", "1Gi", "uid6-1", "claim6-1", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController),
newVolumeArray("volume6-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRecycle, classEmpty),
noclaims,
noclaims,
noevents, noerrors,
// Inject recycler into the controller and call syncVolume. The
// recycler simulates one recycle() call that succeeds.
wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume),
},
{
// recycle volume bound by user
"6-2 - successful recycle with prebound volume",
newVolumeArray("volume6-2", "1Gi", "uid6-2", "claim6-2", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty),
newVolumeArray("volume6-2", "1Gi", "", "claim6-2", v1.VolumeAvailable, v1.PersistentVolumeReclaimRecycle, classEmpty),
noclaims,
noclaims,
noevents, noerrors,
// Inject recycler into the controller and call syncVolume. The
// recycler simulates one recycle() call that succeeds.
wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume),
},
{
// recycle failure - plugin not found
"6-3 - plugin not found",
newVolumeArray("volume6-3", "1Gi", "uid6-3", "claim6-3", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty),
withMessage("No recycler plugin found for the volume!", newVolumeArray("volume6-3", "1Gi", "uid6-3", "claim6-3", v1.VolumeFailed, v1.PersistentVolumeReclaimRecycle, classEmpty)),
noclaims,
noclaims,
[]string{"Warning VolumeFailedRecycle"}, noerrors, testSyncVolume,
},
{
// recycle failure - Recycle returns error
"6-4 - newRecycler returns error",
newVolumeArray("volume6-4", "1Gi", "uid6-4", "claim6-4", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty),
withMessage("Recycle failed: Mock plugin error: no recycleCalls configured", newVolumeArray("volume6-4", "1Gi", "uid6-4", "claim6-4", v1.VolumeFailed, v1.PersistentVolumeReclaimRecycle, classEmpty)),
noclaims,
noclaims,
[]string{"Warning VolumeFailedRecycle"}, noerrors,
wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume),
},
{
// recycle failure - recycle returns error
"6-5 - recycle returns error",
newVolumeArray("volume6-5", "1Gi", "uid6-5", "claim6-5", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty),
withMessage("Recycle failed: Mock recycle error", newVolumeArray("volume6-5", "1Gi", "uid6-5", "claim6-5", v1.VolumeFailed, v1.PersistentVolumeReclaimRecycle, classEmpty)),
noclaims,
noclaims,
[]string{"Warning VolumeFailedRecycle"}, noerrors,
wrapTestWithReclaimCalls(operationRecycle, []error{errors.New("Mock recycle error")}, testSyncVolume),
},
{
// recycle success(?) - volume is deleted before doRecycle() starts
"6-6 - volume is deleted before recycling",
newVolumeArray("volume6-6", "1Gi", "uid6-6", "claim6-6", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty),
novolumes,
noclaims,
noclaims,
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
// Delete the volume before recycle operation starts
reactor.lock.Lock()
delete(reactor.volumes, "volume6-6")
reactor.lock.Unlock()
}),
},
{
// recycle success(?) - volume is recycled by previous recycler just
// at the time new doRecycle() starts. This simulates "volume no
// longer needs recycling, skipping".
"6-7 - volume is deleted before recycling",
newVolumeArray("volume6-7", "1Gi", "uid6-7", "claim6-7", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController),
newVolumeArray("volume6-7", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRecycle, classEmpty),
noclaims,
noclaims,
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
// Mark the volume as Available before the recycler starts
reactor.lock.Lock()
volume := reactor.volumes["volume6-7"]
volume.Spec.ClaimRef = nil
volume.Status.Phase = v1.VolumeAvailable
volume.Annotations = nil
reactor.lock.Unlock()
}),
},
{
// recycle success(?) - volume bound by user is recycled by previous
// recycler just at the time new doRecycle() starts. This simulates
// "volume no longer needs recycling, skipping" with volume bound by
// user.
"6-8 - prebound volume is deleted before recycling",
newVolumeArray("volume6-8", "1Gi", "uid6-8", "claim6-8", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty),
newVolumeArray("volume6-8", "1Gi", "", "claim6-8", v1.VolumeAvailable, v1.PersistentVolumeReclaimRecycle, classEmpty),
noclaims,
noclaims,
noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
// Mark the volume as Available before the recycler starts
reactor.lock.Lock()
volume := reactor.volumes["volume6-8"]
volume.Spec.ClaimRef.UID = ""
volume.Status.Phase = v1.VolumeAvailable
reactor.lock.Unlock()
}),
},
{
// recycle success - volume bound by user is recycled, while a new
// claim is created with another UID.
"6-9 - prebound volume is recycled while the claim exists",
newVolumeArray("volume6-9", "1Gi", "uid6-9", "claim6-9", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty),
newVolumeArray("volume6-9", "1Gi", "", "claim6-9", v1.VolumeAvailable, v1.PersistentVolumeReclaimRecycle, classEmpty),
newClaimArray("claim6-9", "uid6-9-x", "10Gi", "", v1.ClaimPending, nil),
newClaimArray("claim6-9", "uid6-9-x", "10Gi", "", v1.ClaimPending, nil),
noevents, noerrors,
// Inject recycler into the controller and call syncVolume. The
// recycler simulates one recycle() call that succeeds.
wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume),
},
{
// volume has unknown reclaim policy - failure expected
"6-10 - unknown reclaim policy",
newVolumeArray("volume6-10", "1Gi", "uid6-10", "claim6-10", v1.VolumeBound, "Unknown", classEmpty),
withMessage("Volume has unrecognized PersistentVolumeReclaimPolicy", newVolumeArray("volume6-10", "1Gi", "uid6-10", "claim6-10", v1.VolumeFailed, "Unknown", classEmpty)),
noclaims,
noclaims,
[]string{"Warning VolumeUnknownReclaimPolicy"}, noerrors, testSyncVolume,
},
{
// volume is used by a running pod - failure expected
"6-11 - used by running pod",
newVolumeArray("volume6-11", "1Gi", "uid6-11", "runningClaim", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController),
newVolumeArray("volume6-11", "1Gi", "uid6-11", "runningClaim", v1.VolumeReleased, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController),
noclaims,
noclaims,
[]string{"Normal VolumeFailedRecycle"}, noerrors, testSyncVolume,
},
{
// volume is used by a pending pod - failure expected
"6-12 - used by pending pod",
newVolumeArray("volume6-12", "1Gi", "uid6-12", "pendingClaim", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController),
newVolumeArray("volume6-12", "1Gi", "uid6-12", "pendingClaim", v1.VolumeReleased, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController),
noclaims,
noclaims,
[]string{"Normal VolumeFailedRecycle"}, noerrors, testSyncVolume,
},
{
// volume is used by a completed pod - recycle succeeds
"6-13 - used by completed pod",
newVolumeArray("volume6-13", "1Gi", "uid6-13", "completedClaim", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController),
newVolumeArray("volume6-13", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRecycle, classEmpty),
noclaims,
noclaims,
noevents, noerrors,
// Inject recycler into the controller and call syncVolume. The
// recycler simulates one recycle() call that succeeds.
wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume),
},
}
runSyncTests(t, tests, []*storage.StorageClass{}, pods)
}
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
// volume/claims. The test follows this pattern:
// 0. Load the controller with initial data.
// 1. Call controllerTest.testCall() once as in TestSync()
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
// events). Go to 2. if these calls change anything.
// 3. When all changes are processed and no new changes were made, call
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
// 4. If some changes were done by step 3., go to 2. (simulation of
// "volume/claim updated" events, eventually performing step 3. again)
// 5. When 3. does not do any changes, finish the tests and compare final set
// of volumes/claims with expected claims/volumes and report differences.
// Some limit of calls in enforced to prevent endless loops.
func TestRecycleMultiSync(t *testing.T) {
tests := []controllerTest{
{
// recycle failure - recycle returns error. The controller should
// try again.
"7-1 - recycle returns error",
newVolumeArray("volume7-1", "1Gi", "uid7-1", "claim7-1", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty),
newVolumeArray("volume7-1", "1Gi", "", "claim7-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRecycle, classEmpty),
noclaims,
noclaims,
[]string{"Warning VolumeFailedRecycle"}, noerrors,
wrapTestWithReclaimCalls(operationRecycle, []error{errors.New("Mock recycle error"), nil}, testSyncVolume),
},
}
runMultisyncTests(t, tests, []*storage.StorageClass{}, "")
}

View File

@ -1,404 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
"strconv"
"sync"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/client-go/tools/cache"
)
// AssumeCache is a cache on top of the informer that allows for updating
// objects outside of informer events and also restoring the informer
// cache's version of the object. Objects are assumed to be
// Kubernetes API objects that implement meta.Interface
type AssumeCache interface {
// Assume updates the object in-memory only
Assume(obj interface{}) error
// Restore the informer cache's version of the object
Restore(objName string)
// Get the object by name
Get(objName string) (interface{}, error)
// List all the objects in the cache
List(indexObj interface{}) []interface{}
}
type errWrongType struct {
typeName string
object interface{}
}
func (e *errWrongType) Error() string {
return fmt.Sprintf("could not convert object to type %v: %+v", e.typeName, e.object)
}
type errNotFound struct {
typeName string
objectName string
}
func (e *errNotFound) Error() string {
return fmt.Sprintf("could not find %v %q", e.typeName, e.objectName)
}
type errObjectName struct {
detailedErr error
}
func (e *errObjectName) Error() string {
return fmt.Sprintf("failed to get object name: %v", e.detailedErr)
}
// assumeCache stores two pointers to represent a single object:
// * The pointer to the informer object.
// * The pointer to the latest object, which could be the same as
// the informer object, or an in-memory object.
//
// An informer update always overrides the latest object pointer.
//
// Assume() only updates the latest object pointer.
// Restore() sets the latest object pointer back to the informer object.
// Get/List() always returns the latest object pointer.
type assumeCache struct {
mutex sync.Mutex
// describes the object stored
description string
// Stores objInfo pointers
store cache.Indexer
// Index function for object
indexFunc cache.IndexFunc
indexName string
}
type objInfo struct {
// name of the object
name string
// Latest version of object could be cached-only or from informer
latestObj interface{}
// Latest object from informer
apiObj interface{}
}
func objInfoKeyFunc(obj interface{}) (string, error) {
objInfo, ok := obj.(*objInfo)
if !ok {
return "", &errWrongType{"objInfo", obj}
}
return objInfo.name, nil
}
func (c *assumeCache) objInfoIndexFunc(obj interface{}) ([]string, error) {
objInfo, ok := obj.(*objInfo)
if !ok {
return []string{""}, &errWrongType{"objInfo", obj}
}
return c.indexFunc(objInfo.latestObj)
}
func NewAssumeCache(informer cache.SharedIndexInformer, description, indexName string, indexFunc cache.IndexFunc) *assumeCache {
c := &assumeCache{
description: description,
indexFunc: indexFunc,
indexName: indexName,
}
c.store = cache.NewIndexer(objInfoKeyFunc, cache.Indexers{indexName: c.objInfoIndexFunc})
// Unit tests don't use informers
if informer != nil {
informer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: c.add,
UpdateFunc: c.update,
DeleteFunc: c.delete,
},
)
}
return c
}
func (c *assumeCache) add(obj interface{}) {
if obj == nil {
return
}
name, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
glog.Errorf("add failed: %v", &errObjectName{err})
return
}
c.mutex.Lock()
defer c.mutex.Unlock()
if objInfo, _ := c.getObjInfo(name); objInfo != nil {
newVersion, err := c.getObjVersion(name, obj)
if err != nil {
glog.Errorf("add: couldn't get object version: %v", err)
return
}
storedVersion, err := c.getObjVersion(name, objInfo.latestObj)
if err != nil {
glog.Errorf("add: couldn't get stored object version: %v", err)
return
}
// Only update object if version is newer.
// This is so we don't override assumed objects due to informer resync.
if newVersion <= storedVersion {
glog.V(10).Infof("Skip adding %v %v to assume cache because version %v is not newer than %v", c.description, name, newVersion, storedVersion)
return
}
}
objInfo := &objInfo{name: name, latestObj: obj, apiObj: obj}
c.store.Update(objInfo)
glog.V(10).Infof("Adding %v %v to assume cache: %+v ", c.description, name, obj)
}
func (c *assumeCache) update(oldObj interface{}, newObj interface{}) {
c.add(newObj)
}
func (c *assumeCache) delete(obj interface{}) {
if obj == nil {
return
}
name, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
glog.Errorf("delete failed: %v", &errObjectName{err})
return
}
c.mutex.Lock()
defer c.mutex.Unlock()
objInfo := &objInfo{name: name}
err = c.store.Delete(objInfo)
if err != nil {
glog.Errorf("delete: failed to delete %v %v: %v", c.description, name, err)
}
}
func (c *assumeCache) getObjVersion(name string, obj interface{}) (int64, error) {
objAccessor, err := meta.Accessor(obj)
if err != nil {
return -1, err
}
objResourceVersion, err := strconv.ParseInt(objAccessor.GetResourceVersion(), 10, 64)
if err != nil {
return -1, fmt.Errorf("error parsing ResourceVersion %q for %v %q: %s", objAccessor.GetResourceVersion(), c.description, name, err)
}
return objResourceVersion, nil
}
func (c *assumeCache) getObjInfo(name string) (*objInfo, error) {
obj, ok, err := c.store.GetByKey(name)
if err != nil {
return nil, err
}
if !ok {
return nil, &errNotFound{c.description, name}
}
objInfo, ok := obj.(*objInfo)
if !ok {
return nil, &errWrongType{"objInfo", obj}
}
return objInfo, nil
}
func (c *assumeCache) Get(objName string) (interface{}, error) {
c.mutex.Lock()
defer c.mutex.Unlock()
objInfo, err := c.getObjInfo(objName)
if err != nil {
return nil, err
}
return objInfo.latestObj, nil
}
func (c *assumeCache) List(indexObj interface{}) []interface{} {
c.mutex.Lock()
defer c.mutex.Unlock()
allObjs := []interface{}{}
objs, err := c.store.Index(c.indexName, &objInfo{latestObj: indexObj})
if err != nil {
glog.Errorf("list index error: %v", err)
return nil
}
for _, obj := range objs {
objInfo, ok := obj.(*objInfo)
if !ok {
glog.Errorf("list error: %v", &errWrongType{"objInfo", obj})
continue
}
allObjs = append(allObjs, objInfo.latestObj)
}
return allObjs
}
func (c *assumeCache) Assume(obj interface{}) error {
name, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
return &errObjectName{err}
}
c.mutex.Lock()
defer c.mutex.Unlock()
objInfo, err := c.getObjInfo(name)
if err != nil {
return err
}
newVersion, err := c.getObjVersion(name, obj)
if err != nil {
return err
}
storedVersion, err := c.getObjVersion(name, objInfo.latestObj)
if err != nil {
return err
}
if newVersion < storedVersion {
return fmt.Errorf("%v %q is out of sync", c.description, name)
}
// Only update the cached object
objInfo.latestObj = obj
glog.V(4).Infof("Assumed %v %q, version %v", c.description, name, newVersion)
return nil
}
func (c *assumeCache) Restore(objName string) {
c.mutex.Lock()
defer c.mutex.Unlock()
objInfo, err := c.getObjInfo(objName)
if err != nil {
// This could be expected if object got deleted
glog.V(5).Infof("Restore %v %q warning: %v", c.description, objName, err)
} else {
objInfo.latestObj = objInfo.apiObj
glog.V(4).Infof("Restored %v %q", c.description, objName)
}
}
// PVAssumeCache is a AssumeCache for PersistentVolume objects
type PVAssumeCache interface {
AssumeCache
GetPV(pvName string) (*v1.PersistentVolume, error)
ListPVs(storageClassName string) []*v1.PersistentVolume
}
type pvAssumeCache struct {
*assumeCache
}
func pvStorageClassIndexFunc(obj interface{}) ([]string, error) {
if pv, ok := obj.(*v1.PersistentVolume); ok {
return []string{pv.Spec.StorageClassName}, nil
}
return []string{""}, fmt.Errorf("object is not a v1.PersistentVolume: %v", obj)
}
func NewPVAssumeCache(informer cache.SharedIndexInformer) PVAssumeCache {
return &pvAssumeCache{assumeCache: NewAssumeCache(informer, "v1.PersistentVolume", "storageclass", pvStorageClassIndexFunc)}
}
func (c *pvAssumeCache) GetPV(pvName string) (*v1.PersistentVolume, error) {
obj, err := c.Get(pvName)
if err != nil {
return nil, err
}
pv, ok := obj.(*v1.PersistentVolume)
if !ok {
return nil, &errWrongType{"v1.PersistentVolume", obj}
}
return pv, nil
}
func (c *pvAssumeCache) ListPVs(storageClassName string) []*v1.PersistentVolume {
objs := c.List(&v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
StorageClassName: storageClassName,
},
})
pvs := []*v1.PersistentVolume{}
for _, obj := range objs {
pv, ok := obj.(*v1.PersistentVolume)
if !ok {
glog.Errorf("ListPVs: %v", &errWrongType{"v1.PersistentVolume", obj})
}
pvs = append(pvs, pv)
}
return pvs
}
// PVCAssumeCache is a AssumeCache for PersistentVolumeClaim objects
type PVCAssumeCache interface {
AssumeCache
// GetPVC returns the PVC from the cache with the same
// namespace and the same name of the specified pod.
// pvcKey is the result of MetaNamespaceKeyFunc on PVC obj
GetPVC(pvcKey string) (*v1.PersistentVolumeClaim, error)
}
type pvcAssumeCache struct {
*assumeCache
}
func NewPVCAssumeCache(informer cache.SharedIndexInformer) PVCAssumeCache {
return &pvcAssumeCache{assumeCache: NewAssumeCache(informer, "v1.PersistentVolumeClaim", "namespace", cache.MetaNamespaceIndexFunc)}
}
func (c *pvcAssumeCache) GetPVC(pvcKey string) (*v1.PersistentVolumeClaim, error) {
obj, err := c.Get(pvcKey)
if err != nil {
return nil, err
}
pvc, ok := obj.(*v1.PersistentVolumeClaim)
if !ok {
return nil, &errWrongType{"v1.PersistentVolumeClaim", obj}
}
return pvc, nil
}

View File

@ -1,472 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func makePV(name, version, storageClass string) *v1.PersistentVolume {
return &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: name,
ResourceVersion: version,
},
Spec: v1.PersistentVolumeSpec{
StorageClassName: storageClass,
},
}
}
func verifyListPVs(t *testing.T, cache PVAssumeCache, expectedPVs map[string]*v1.PersistentVolume, storageClassName string) {
pvList := cache.ListPVs(storageClassName)
if len(pvList) != len(expectedPVs) {
t.Errorf("ListPVs() returned %v PVs, expected %v", len(pvList), len(expectedPVs))
}
for _, pv := range pvList {
expectedPV, ok := expectedPVs[pv.Name]
if !ok {
t.Errorf("ListPVs() returned unexpected PV %q", pv.Name)
}
if expectedPV != pv {
t.Errorf("ListPVs() returned PV %p, expected %p", pv, expectedPV)
}
}
}
func verifyPV(cache PVAssumeCache, name string, expectedPV *v1.PersistentVolume) error {
pv, err := cache.GetPV(name)
if err != nil {
return err
}
if pv != expectedPV {
return fmt.Errorf("GetPV() returned %p, expected %p", pv, expectedPV)
}
return nil
}
func TestAssumePV(t *testing.T) {
scenarios := map[string]struct {
oldPV *v1.PersistentVolume
newPV *v1.PersistentVolume
shouldSucceed bool
}{
"success-same-version": {
oldPV: makePV("pv1", "5", ""),
newPV: makePV("pv1", "5", ""),
shouldSucceed: true,
},
"success-storageclass-same-version": {
oldPV: makePV("pv1", "5", "class1"),
newPV: makePV("pv1", "5", "class1"),
shouldSucceed: true,
},
"success-new-higher-version": {
oldPV: makePV("pv1", "5", ""),
newPV: makePV("pv1", "6", ""),
shouldSucceed: true,
},
"fail-old-not-found": {
oldPV: makePV("pv2", "5", ""),
newPV: makePV("pv1", "5", ""),
shouldSucceed: false,
},
"fail-new-lower-version": {
oldPV: makePV("pv1", "5", ""),
newPV: makePV("pv1", "4", ""),
shouldSucceed: false,
},
"fail-new-bad-version": {
oldPV: makePV("pv1", "5", ""),
newPV: makePV("pv1", "a", ""),
shouldSucceed: false,
},
"fail-old-bad-version": {
oldPV: makePV("pv1", "a", ""),
newPV: makePV("pv1", "5", ""),
shouldSucceed: false,
},
}
for name, scenario := range scenarios {
cache := NewPVAssumeCache(nil)
internal_cache, ok := cache.(*pvAssumeCache)
if !ok {
t.Fatalf("Failed to get internal cache")
}
// Add oldPV to cache
internal_cache.add(scenario.oldPV)
if err := verifyPV(cache, scenario.oldPV.Name, scenario.oldPV); err != nil {
t.Errorf("Failed to GetPV() after initial update: %v", err)
continue
}
// Assume newPV
err := cache.Assume(scenario.newPV)
if scenario.shouldSucceed && err != nil {
t.Errorf("Test %q failed: Assume() returned error %v", name, err)
}
if !scenario.shouldSucceed && err == nil {
t.Errorf("Test %q failed: Assume() returned success but expected error", name)
}
// Check that GetPV returns correct PV
expectedPV := scenario.newPV
if !scenario.shouldSucceed {
expectedPV = scenario.oldPV
}
if err := verifyPV(cache, scenario.oldPV.Name, expectedPV); err != nil {
t.Errorf("Failed to GetPV() after initial update: %v", err)
}
}
}
func TestRestorePV(t *testing.T) {
cache := NewPVAssumeCache(nil)
internal_cache, ok := cache.(*pvAssumeCache)
if !ok {
t.Fatalf("Failed to get internal cache")
}
oldPV := makePV("pv1", "5", "")
newPV := makePV("pv1", "5", "")
// Restore PV that doesn't exist
cache.Restore("nothing")
// Add oldPV to cache
internal_cache.add(oldPV)
if err := verifyPV(cache, oldPV.Name, oldPV); err != nil {
t.Fatalf("Failed to GetPV() after initial update: %v", err)
}
// Restore PV
cache.Restore(oldPV.Name)
if err := verifyPV(cache, oldPV.Name, oldPV); err != nil {
t.Fatalf("Failed to GetPV() after iniital restore: %v", err)
}
// Assume newPV
if err := cache.Assume(newPV); err != nil {
t.Fatalf("Assume() returned error %v", err)
}
if err := verifyPV(cache, oldPV.Name, newPV); err != nil {
t.Fatalf("Failed to GetPV() after Assume: %v", err)
}
// Restore PV
cache.Restore(oldPV.Name)
if err := verifyPV(cache, oldPV.Name, oldPV); err != nil {
t.Fatalf("Failed to GetPV() after restore: %v", err)
}
}
func TestBasicPVCache(t *testing.T) {
cache := NewPVAssumeCache(nil)
internal_cache, ok := cache.(*pvAssumeCache)
if !ok {
t.Fatalf("Failed to get internal cache")
}
// Get object that doesn't exist
pv, err := cache.GetPV("nothere")
if err == nil {
t.Errorf("GetPV() returned unexpected success")
}
if pv != nil {
t.Errorf("GetPV() returned unexpected PV %q", pv.Name)
}
// Add a bunch of PVs
pvs := map[string]*v1.PersistentVolume{}
for i := 0; i < 10; i++ {
pv := makePV(fmt.Sprintf("test-pv%v", i), "1", "")
pvs[pv.Name] = pv
internal_cache.add(pv)
}
// List them
verifyListPVs(t, cache, pvs, "")
// Update a PV
updatedPV := makePV("test-pv3", "2", "")
pvs[updatedPV.Name] = updatedPV
internal_cache.update(nil, updatedPV)
// List them
verifyListPVs(t, cache, pvs, "")
// Delete a PV
deletedPV := pvs["test-pv7"]
delete(pvs, deletedPV.Name)
internal_cache.delete(deletedPV)
// List them
verifyListPVs(t, cache, pvs, "")
}
func TestPVCacheWithStorageClasses(t *testing.T) {
cache := NewPVAssumeCache(nil)
internal_cache, ok := cache.(*pvAssumeCache)
if !ok {
t.Fatalf("Failed to get internal cache")
}
// Add a bunch of PVs
pvs1 := map[string]*v1.PersistentVolume{}
for i := 0; i < 10; i++ {
pv := makePV(fmt.Sprintf("test-pv%v", i), "1", "class1")
pvs1[pv.Name] = pv
internal_cache.add(pv)
}
// Add a bunch of PVs
pvs2 := map[string]*v1.PersistentVolume{}
for i := 0; i < 10; i++ {
pv := makePV(fmt.Sprintf("test2-pv%v", i), "1", "class2")
pvs2[pv.Name] = pv
internal_cache.add(pv)
}
// List them
verifyListPVs(t, cache, pvs1, "class1")
verifyListPVs(t, cache, pvs2, "class2")
// Update a PV
updatedPV := makePV("test-pv3", "2", "class1")
pvs1[updatedPV.Name] = updatedPV
internal_cache.update(nil, updatedPV)
// List them
verifyListPVs(t, cache, pvs1, "class1")
verifyListPVs(t, cache, pvs2, "class2")
// Delete a PV
deletedPV := pvs1["test-pv7"]
delete(pvs1, deletedPV.Name)
internal_cache.delete(deletedPV)
// List them
verifyListPVs(t, cache, pvs1, "class1")
verifyListPVs(t, cache, pvs2, "class2")
}
func TestAssumeUpdatePVCache(t *testing.T) {
cache := NewPVAssumeCache(nil)
internal_cache, ok := cache.(*pvAssumeCache)
if !ok {
t.Fatalf("Failed to get internal cache")
}
pvName := "test-pv0"
// Add a PV
pv := makePV(pvName, "1", "")
internal_cache.add(pv)
if err := verifyPV(cache, pvName, pv); err != nil {
t.Fatalf("failed to get PV: %v", err)
}
// Assume PV
newPV := pv.DeepCopy()
newPV.Spec.ClaimRef = &v1.ObjectReference{Name: "test-claim"}
if err := cache.Assume(newPV); err != nil {
t.Fatalf("failed to assume PV: %v", err)
}
if err := verifyPV(cache, pvName, newPV); err != nil {
t.Fatalf("failed to get PV after assume: %v", err)
}
// Add old PV
internal_cache.add(pv)
if err := verifyPV(cache, pvName, newPV); err != nil {
t.Fatalf("failed to get PV after old PV added: %v", err)
}
}
func makeClaim(name, version, namespace string) *v1.PersistentVolumeClaim {
return &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
ResourceVersion: version,
Annotations: map[string]string{},
},
}
}
func verifyPVC(cache PVCAssumeCache, pvcKey string, expectedPVC *v1.PersistentVolumeClaim) error {
pvc, err := cache.GetPVC(pvcKey)
if err != nil {
return err
}
if pvc != expectedPVC {
return fmt.Errorf("GetPVC() returned %p, expected %p", pvc, expectedPVC)
}
return nil
}
func TestAssumePVC(t *testing.T) {
scenarios := map[string]struct {
oldPVC *v1.PersistentVolumeClaim
newPVC *v1.PersistentVolumeClaim
shouldSucceed bool
}{
"success-same-version": {
oldPVC: makeClaim("pvc1", "5", "ns1"),
newPVC: makeClaim("pvc1", "5", "ns1"),
shouldSucceed: true,
},
"success-new-higher-version": {
oldPVC: makeClaim("pvc1", "5", "ns1"),
newPVC: makeClaim("pvc1", "6", "ns1"),
shouldSucceed: true,
},
"fail-old-not-found": {
oldPVC: makeClaim("pvc2", "5", "ns1"),
newPVC: makeClaim("pvc1", "5", "ns1"),
shouldSucceed: false,
},
"fail-new-lower-version": {
oldPVC: makeClaim("pvc1", "5", "ns1"),
newPVC: makeClaim("pvc1", "4", "ns1"),
shouldSucceed: false,
},
"fail-new-bad-version": {
oldPVC: makeClaim("pvc1", "5", "ns1"),
newPVC: makeClaim("pvc1", "a", "ns1"),
shouldSucceed: false,
},
"fail-old-bad-version": {
oldPVC: makeClaim("pvc1", "a", "ns1"),
newPVC: makeClaim("pvc1", "5", "ns1"),
shouldSucceed: false,
},
}
for name, scenario := range scenarios {
cache := NewPVCAssumeCache(nil)
internal_cache, ok := cache.(*pvcAssumeCache)
if !ok {
t.Fatalf("Failed to get internal cache")
}
// Add oldPVC to cache
internal_cache.add(scenario.oldPVC)
if err := verifyPVC(cache, getPVCName(scenario.oldPVC), scenario.oldPVC); err != nil {
t.Errorf("Failed to GetPVC() after initial update: %v", err)
continue
}
// Assume newPVC
err := cache.Assume(scenario.newPVC)
if scenario.shouldSucceed && err != nil {
t.Errorf("Test %q failed: Assume() returned error %v", name, err)
}
if !scenario.shouldSucceed && err == nil {
t.Errorf("Test %q failed: Assume() returned success but expected error", name)
}
// Check that GetPVC returns correct PVC
expectedPV := scenario.newPVC
if !scenario.shouldSucceed {
expectedPV = scenario.oldPVC
}
if err := verifyPVC(cache, getPVCName(scenario.oldPVC), expectedPV); err != nil {
t.Errorf("Failed to GetPVC() after initial update: %v", err)
}
}
}
func TestRestorePVC(t *testing.T) {
cache := NewPVCAssumeCache(nil)
internal_cache, ok := cache.(*pvcAssumeCache)
if !ok {
t.Fatalf("Failed to get internal cache")
}
oldPVC := makeClaim("pvc1", "5", "ns1")
newPVC := makeClaim("pvc1", "5", "ns1")
// Restore PVC that doesn't exist
cache.Restore("nothing")
// Add oldPVC to cache
internal_cache.add(oldPVC)
if err := verifyPVC(cache, getPVCName(oldPVC), oldPVC); err != nil {
t.Fatalf("Failed to GetPVC() after initial update: %v", err)
}
// Restore PVC
cache.Restore(getPVCName(oldPVC))
if err := verifyPVC(cache, getPVCName(oldPVC), oldPVC); err != nil {
t.Fatalf("Failed to GetPVC() after iniital restore: %v", err)
}
// Assume newPVC
if err := cache.Assume(newPVC); err != nil {
t.Fatalf("Assume() returned error %v", err)
}
if err := verifyPVC(cache, getPVCName(oldPVC), newPVC); err != nil {
t.Fatalf("Failed to GetPVC() after Assume: %v", err)
}
// Restore PVC
cache.Restore(getPVCName(oldPVC))
if err := verifyPVC(cache, getPVCName(oldPVC), oldPVC); err != nil {
t.Fatalf("Failed to GetPVC() after restore: %v", err)
}
}
func TestAssumeUpdatePVCCache(t *testing.T) {
cache := NewPVCAssumeCache(nil)
internal_cache, ok := cache.(*pvcAssumeCache)
if !ok {
t.Fatalf("Failed to get internal cache")
}
pvcName := "test-pvc0"
pvcNamespace := "test-ns"
// Add a PVC
pvc := makeClaim(pvcName, "1", pvcNamespace)
internal_cache.add(pvc)
if err := verifyPVC(cache, getPVCName(pvc), pvc); err != nil {
t.Fatalf("failed to get PVC: %v", err)
}
// Assume PVC
newPVC := pvc.DeepCopy()
newPVC.Annotations["volume.alpha.kubernetes.io/selected-node"] = "test-node"
if err := cache.Assume(newPVC); err != nil {
t.Fatalf("failed to assume PVC: %v", err)
}
if err := verifyPVC(cache, getPVCName(pvc), newPVC); err != nil {
t.Fatalf("failed to get PVC after assume: %v", err)
}
// Add old PVC
internal_cache.add(pvc)
if err := verifyPVC(cache, getPVCName(pvc), newPVC); err != nil {
t.Fatalf("failed to get PVC after old PVC added: %v", err)
}
}

View File

@ -1,553 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
"sort"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilfeature "k8s.io/apiserver/pkg/util/feature"
coreinformers "k8s.io/client-go/informers/core/v1"
storageinformers "k8s.io/client-go/informers/storage/v1"
clientset "k8s.io/client-go/kubernetes"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
// SchedulerVolumeBinder is used by the scheduler to handle PVC/PV binding
// and dynamic provisioning. The binding decisions are integrated into the pod scheduling
// workflow so that the PV NodeAffinity is also considered along with the pod's other
// scheduling requirements.
//
// This integrates into the existing default scheduler workflow as follows:
// 1. The scheduler takes a Pod off the scheduler queue and processes it serially:
// a. Invokes all predicate functions, parallelized across nodes. FindPodVolumes() is invoked here.
// b. Invokes all priority functions. Future/TBD
// c. Selects the best node for the Pod.
// d. Cache the node selection for the Pod. (Assume phase)
// i. If PVC binding is required, cache in-memory only:
// * Updated PV objects for prebinding to the corresponding PVCs.
// * For the pod, which PVs need API updates.
// AssumePodVolumes() is invoked here. Then BindPodVolumes() is called asynchronously by the
// scheduler. After BindPodVolumes() is complete, the Pod is added back to the scheduler queue
// to be processed again until all PVCs are bound.
// ii. If PVC binding is not required, cache the Pod->Node binding in the scheduler's pod cache,
// and asynchronously bind the Pod to the Node. This is handled in the scheduler and not here.
// 2. Once the assume operation is done, the scheduler processes the next Pod in the scheduler queue
// while the actual binding operation occurs in the background.
type SchedulerVolumeBinder interface {
// FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the node.
//
// If a PVC is bound, it checks if the PV's NodeAffinity matches the Node.
// Otherwise, it tries to find an available PV to bind to the PVC.
//
// It returns true if all of the Pod's PVCs have matching PVs or can be dynamic provisioned,
// and returns true if bound volumes satisfy the PV NodeAffinity.
//
// This function is called by the volume binding scheduler predicate and can be called in parallel
FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisified, boundVolumesSatisfied bool, err error)
// AssumePodVolumes will:
// 1. Take the PV matches for unbound PVCs and update the PV cache assuming
// that the PV is prebound to the PVC.
// 2. Take the PVCs that need provisioning and update the PVC cache with related
// annotations set.
//
// It returns true if all volumes are fully bound, and returns true if any volume binding/provisioning
// API operation needs to be done afterwards.
//
// This function will modify assumedPod with the node name.
// This function is called serially.
AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound bool, bindingRequired bool, err error)
// BindPodVolumes will:
// 1. Initiate the volume binding by making the API call to prebind the PV
// to its matching PVC.
// 2. Trigger the volume provisioning by making the API call to set related
// annotations on the PVC
//
// This function can be called in parallel.
BindPodVolumes(assumedPod *v1.Pod) error
// GetBindingsCache returns the cache used (if any) to store volume binding decisions.
GetBindingsCache() PodBindingCache
}
type volumeBinder struct {
ctrl *PersistentVolumeController
pvcCache PVCAssumeCache
pvCache PVAssumeCache
// Stores binding decisions that were made in FindPodVolumes for use in AssumePodVolumes.
// AssumePodVolumes modifies the bindings again for use in BindPodVolumes.
podBindingCache PodBindingCache
}
// NewVolumeBinder sets up all the caches needed for the scheduler to make volume binding decisions.
func NewVolumeBinder(
kubeClient clientset.Interface,
pvcInformer coreinformers.PersistentVolumeClaimInformer,
pvInformer coreinformers.PersistentVolumeInformer,
storageClassInformer storageinformers.StorageClassInformer) SchedulerVolumeBinder {
// TODO: find better way...
ctrl := &PersistentVolumeController{
kubeClient: kubeClient,
classLister: storageClassInformer.Lister(),
}
b := &volumeBinder{
ctrl: ctrl,
pvcCache: NewPVCAssumeCache(pvcInformer.Informer()),
pvCache: NewPVAssumeCache(pvInformer.Informer()),
podBindingCache: NewPodBindingCache(),
}
return b
}
func (b *volumeBinder) GetBindingsCache() PodBindingCache {
return b.podBindingCache
}
// FindPodVolumes caches the matching PVs and PVCs to provision per node in podBindingCache
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisfied, boundVolumesSatisfied bool, err error) {
podName := getPodName(pod)
// Warning: Below log needs high verbosity as it can be printed several times (#60933).
glog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name)
// Initialize to true for pods that don't have volumes
unboundVolumesSatisfied = true
boundVolumesSatisfied = true
// The pod's volumes need to be processed in one call to avoid the race condition where
// volumes can get bound/provisioned in between calls.
boundClaims, claimsToBind, unboundClaimsImmediate, err := b.getPodVolumes(pod)
if err != nil {
return false, false, err
}
// Immediate claims should be bound
if len(unboundClaimsImmediate) > 0 {
return false, false, fmt.Errorf("pod has unbound PersistentVolumeClaims")
}
// Check PV node affinity on bound volumes
if len(boundClaims) > 0 {
boundVolumesSatisfied, err = b.checkBoundClaims(boundClaims, node, podName)
if err != nil {
return false, false, err
}
}
if len(claimsToBind) > 0 {
var claimsToProvision []*v1.PersistentVolumeClaim
unboundVolumesSatisfied, claimsToProvision, err = b.findMatchingVolumes(pod, claimsToBind, node)
if err != nil {
return false, false, err
}
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
// Try to provision for unbound volumes
if !unboundVolumesSatisfied {
unboundVolumesSatisfied, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
if err != nil {
return false, false, err
}
}
}
}
return unboundVolumesSatisfied, boundVolumesSatisfied, nil
}
// AssumePodVolumes will take the cached matching PVs and PVCs to provision
// in podBindingCache for the chosen node, and:
// 1. Update the pvCache with the new prebound PV.
// 2. Update the pvcCache with the new PVCs with annotations set
// It will update podBindingCache again with the PVs and PVCs that need an API update.
func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (allFullyBound, bindingRequired bool, err error) {
podName := getPodName(assumedPod)
glog.V(4).Infof("AssumePodVolumes for pod %q, node %q", podName, nodeName)
if allBound := b.arePodVolumesBound(assumedPod); allBound {
glog.V(4).Infof("AssumePodVolumes for pod %q, node %q: all PVCs bound and nothing to do", podName, nodeName)
return true, false, nil
}
assumedPod.Spec.NodeName = nodeName
// Assume PV
claimsToBind := b.podBindingCache.GetBindings(assumedPod, nodeName)
newBindings := []*bindingInfo{}
for _, binding := range claimsToBind {
newPV, dirty, err := b.ctrl.getBindVolumeToClaim(binding.pv, binding.pvc)
glog.V(5).Infof("AssumePodVolumes: getBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v",
podName,
binding.pv.Name,
binding.pvc.Name,
newPV,
dirty,
err)
if err != nil {
b.revertAssumedPVs(newBindings)
return false, true, err
}
if dirty {
err = b.pvCache.Assume(newPV)
if err != nil {
b.revertAssumedPVs(newBindings)
return false, true, err
}
newBindings = append(newBindings, &bindingInfo{pv: newPV, pvc: binding.pvc})
}
}
// Don't update cached bindings if no API updates are needed. This can happen if we
// previously updated the PV object and are waiting for the PV controller to finish binding.
if len(newBindings) != 0 {
bindingRequired = true
b.podBindingCache.UpdateBindings(assumedPod, nodeName, newBindings)
}
// Assume PVCs
claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, nodeName)
newProvisionedPVCs := []*v1.PersistentVolumeClaim{}
for _, claim := range claimsToProvision {
// The claims from method args can be pointing to watcher cache. We must not
// modify these, therefore create a copy.
claimClone := claim.DeepCopy()
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annSelectedNode, nodeName)
err = b.pvcCache.Assume(claimClone)
if err != nil {
b.revertAssumedPVs(newBindings)
b.revertAssumedPVCs(newProvisionedPVCs)
return
}
newProvisionedPVCs = append(newProvisionedPVCs, claimClone)
}
if len(newProvisionedPVCs) != 0 {
bindingRequired = true
b.podBindingCache.UpdateProvisionedPVCs(assumedPod, nodeName, newProvisionedPVCs)
}
return
}
// BindPodVolumes gets the cached bindings and PVCs to provision in podBindingCache
// and makes the API update for those PVs/PVCs.
func (b *volumeBinder) BindPodVolumes(assumedPod *v1.Pod) error {
podName := getPodName(assumedPod)
glog.V(4).Infof("BindPodVolumes for pod %q", podName)
bindings := b.podBindingCache.GetBindings(assumedPod, assumedPod.Spec.NodeName)
claimsToProvision := b.podBindingCache.GetProvisionedPVCs(assumedPod, assumedPod.Spec.NodeName)
// Do the actual prebinding. Let the PV controller take care of the rest
// There is no API rollback if the actual binding fails
for i, bindingInfo := range bindings {
glog.V(5).Infof("BindPodVolumes: Pod %q, binding PV %q to PVC %q", podName, bindingInfo.pv.Name, bindingInfo.pvc.Name)
_, err := b.ctrl.updateBindVolumeToClaim(bindingInfo.pv, bindingInfo.pvc, false)
if err != nil {
// only revert assumed cached updates for volumes we haven't successfully bound
b.revertAssumedPVs(bindings[i:])
// Revert all of the assumed cached updates for claims,
// since no actual API update will be done
b.revertAssumedPVCs(claimsToProvision)
return err
}
}
// Update claims objects to trigger volume provisioning. Let the PV controller take care of the rest
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
for i, claim := range claimsToProvision {
if _, err := b.ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil {
glog.V(4).Infof("updating PersistentVolumeClaim[%s] failed: %v", getPVCName(claim), err)
// only revert assumed cached updates for claims we haven't successfully updated
b.revertAssumedPVCs(claimsToProvision[i:])
return err
}
}
return nil
}
func getPodName(pod *v1.Pod) string {
return pod.Namespace + "/" + pod.Name
}
func getPVCName(pvc *v1.PersistentVolumeClaim) string {
return pvc.Namespace + "/" + pvc.Name
}
func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume, checkFullyBound bool) (bool, *v1.PersistentVolumeClaim, error) {
if vol.PersistentVolumeClaim == nil {
return true, nil, nil
}
pvcName := vol.PersistentVolumeClaim.ClaimName
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: pvcName,
Namespace: namespace,
},
}
pvc, err := b.pvcCache.GetPVC(getPVCName(claim))
if err != nil || pvc == nil {
return false, nil, fmt.Errorf("error getting PVC %q: %v", pvcName, err)
}
pvName := pvc.Spec.VolumeName
if pvName != "" {
if checkFullyBound {
if metav1.HasAnnotation(pvc.ObjectMeta, annBindCompleted) {
glog.V(5).Infof("PVC %q is fully bound to PV %q", getPVCName(pvc), pvName)
return true, pvc, nil
} else {
glog.V(5).Infof("PVC %q is not fully bound to PV %q", getPVCName(pvc), pvName)
return false, pvc, nil
}
}
glog.V(5).Infof("PVC %q is bound or prebound to PV %q", getPVCName(pvc), pvName)
return true, pvc, nil
}
glog.V(5).Infof("PVC %q is not bound", getPVCName(pvc))
return false, pvc, nil
}
// arePodVolumesBound returns true if all volumes are fully bound
func (b *volumeBinder) arePodVolumesBound(pod *v1.Pod) bool {
for _, vol := range pod.Spec.Volumes {
if isBound, _, _ := b.isVolumeBound(pod.Namespace, &vol, true); !isBound {
// Pod has at least one PVC that needs binding
return false
}
}
return true
}
// getPodVolumes returns a pod's PVCs separated into bound (including prebound), unbound with delayed binding,
// and unbound with immediate binding
func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentVolumeClaim, unboundClaims []*bindingInfo, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) {
boundClaims = []*v1.PersistentVolumeClaim{}
unboundClaimsImmediate = []*v1.PersistentVolumeClaim{}
unboundClaims = []*bindingInfo{}
for _, vol := range pod.Spec.Volumes {
volumeBound, pvc, err := b.isVolumeBound(pod.Namespace, &vol, false)
if err != nil {
return nil, nil, nil, err
}
if pvc == nil {
continue
}
if volumeBound {
boundClaims = append(boundClaims, pvc)
} else {
delayBinding, err := b.ctrl.shouldDelayBinding(pvc)
if err != nil {
return nil, nil, nil, err
}
if delayBinding {
// Scheduler path
unboundClaims = append(unboundClaims, &bindingInfo{pvc: pvc})
} else {
// Immediate binding should have already been bound
unboundClaimsImmediate = append(unboundClaimsImmediate, pvc)
}
}
}
return boundClaims, unboundClaims, unboundClaimsImmediate, nil
}
func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, podName string) (bool, error) {
for _, pvc := range claims {
pvName := pvc.Spec.VolumeName
pv, err := b.pvCache.GetPV(pvName)
if err != nil {
return false, err
}
err = volumeutil.CheckNodeAffinity(pv, node.Labels)
if err != nil {
glog.V(4).Infof("PersistentVolume %q, Node %q mismatch for Pod %q: %v", pvName, node.Name, err.Error(), podName)
return false, nil
}
glog.V(5).Infof("PersistentVolume %q, Node %q matches for Pod %q", pvName, node.Name, podName)
}
glog.V(4).Infof("All bound volumes for Pod %q match with Node %q", podName, node.Name)
return true, nil
}
// findMatchingVolumes tries to find matching volumes for given claims,
// and return unbound claims for further provision.
func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingInfo, node *v1.Node) (foundMatches bool, unboundClaims []*v1.PersistentVolumeClaim, err error) {
podName := getPodName(pod)
// Sort all the claims by increasing size request to get the smallest fits
sort.Sort(byPVCSize(claimsToBind))
chosenPVs := map[string]*v1.PersistentVolume{}
foundMatches = true
matchedClaims := []*bindingInfo{}
for _, bindingInfo := range claimsToBind {
// Get storage class name from each PVC
storageClassName := ""
storageClass := bindingInfo.pvc.Spec.StorageClassName
if storageClass != nil {
storageClassName = *storageClass
}
allPVs := b.pvCache.ListPVs(storageClassName)
// Find a matching PV
bindingInfo.pv, err = findMatchingVolume(bindingInfo.pvc, allPVs, node, chosenPVs, true)
if err != nil {
return false, nil, err
}
if bindingInfo.pv == nil {
glog.V(4).Infof("No matching volumes for Pod %q, PVC %q on node %q", podName, getPVCName(bindingInfo.pvc), node.Name)
unboundClaims = append(unboundClaims, bindingInfo.pvc)
foundMatches = false
continue
}
// matching PV needs to be excluded so we don't select it again
chosenPVs[bindingInfo.pv.Name] = bindingInfo.pv
matchedClaims = append(matchedClaims, bindingInfo)
glog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", bindingInfo.pv.Name, getPVCName(bindingInfo.pvc), node.Name, podName)
}
// Mark cache with all the matches for each PVC for this node
if len(matchedClaims) > 0 {
b.podBindingCache.UpdateBindings(pod, node.Name, matchedClaims)
}
if foundMatches {
glog.V(4).Infof("Found matching volumes for pod %q on node %q", podName, node.Name)
}
return
}
// checkVolumeProvisions checks given unbound claims (the claims have gone through func
// findMatchingVolumes, and do not have matching volumes for binding), and return true
// if all of the claims are eligible for dynamic provision.
func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied bool, err error) {
podName := getPodName(pod)
provisionedClaims := []*v1.PersistentVolumeClaim{}
for _, claim := range claimsToProvision {
className := v1helper.GetPersistentVolumeClaimClass(claim)
if className == "" {
return false, fmt.Errorf("no class for claim %q", getPVCName(claim))
}
class, err := b.ctrl.classLister.Get(className)
if err != nil {
return false, fmt.Errorf("failed to find storage class %q", className)
}
provisioner := class.Provisioner
if provisioner == "" || provisioner == notSupportedProvisioner {
glog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, getPVCName(claim))
return false, nil
}
// Check if the node can satisfy the topology requirement in the class
if !v1helper.MatchTopologySelectorTerms(class.AllowedTopologies, labels.Set(node.Labels)) {
glog.V(4).Infof("Node %q cannot satisfy provisioning topology requirements of claim %q", node.Name, getPVCName(claim))
return false, nil
}
// TODO: Check if capacity of the node domain in the storage class
// can satisfy resource requirement of given claim
provisionedClaims = append(provisionedClaims, claim)
}
glog.V(4).Infof("Provisioning for claims of pod %q that has no matching volumes on node %q ...", podName, node.Name)
// Mark cache with all the PVCs that need provisioning for this node
b.podBindingCache.UpdateProvisionedPVCs(pod, node.Name, provisionedClaims)
return true, nil
}
func (b *volumeBinder) revertAssumedPVs(bindings []*bindingInfo) {
for _, bindingInfo := range bindings {
b.pvCache.Restore(bindingInfo.pv.Name)
}
}
func (b *volumeBinder) revertAssumedPVCs(claims []*v1.PersistentVolumeClaim) {
for _, claim := range claims {
b.pvcCache.Restore(getPVCName(claim))
}
}
type bindingInfo struct {
// Claim that needs to be bound
pvc *v1.PersistentVolumeClaim
// Proposed PV to bind to this claim
pv *v1.PersistentVolume
}
// Used in unit test errors
func (b bindingInfo) String() string {
pvcName := ""
pvName := ""
if b.pvc != nil {
pvcName = getPVCName(b.pvc)
}
if b.pv != nil {
pvName = b.pv.Name
}
return fmt.Sprintf("[PVC %q, PV %q]", pvcName, pvName)
}
type byPVCSize []*bindingInfo
func (a byPVCSize) Len() int {
return len(a)
}
func (a byPVCSize) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (a byPVCSize) Less(i, j int) bool {
iSize := a[i].pvc.Spec.Resources.Requests[v1.ResourceStorage]
jSize := a[j].pvc.Spec.Resources.Requests[v1.ResourceStorage]
// return true if iSize is less than jSize
return iSize.Cmp(jSize) == -1
}

View File

@ -1,150 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"sync"
"k8s.io/api/core/v1"
)
// podBindingCache stores PV binding decisions per pod per node.
// Pod entries are removed when the Pod is deleted or updated to
// no longer be schedulable.
type PodBindingCache interface {
// UpdateBindings will update the cache with the given bindings for the
// pod and node.
UpdateBindings(pod *v1.Pod, node string, bindings []*bindingInfo)
// GetBindings will return the cached bindings for the given pod and node.
GetBindings(pod *v1.Pod, node string) []*bindingInfo
// UpdateProvisionedPVCs will update the cache with the given provisioning decisions
// for the pod and node.
UpdateProvisionedPVCs(pod *v1.Pod, node string, provisionings []*v1.PersistentVolumeClaim)
// GetProvisionedPVCs will return the cached provisioning decisions for the given pod and node.
GetProvisionedPVCs(pod *v1.Pod, node string) []*v1.PersistentVolumeClaim
// DeleteBindings will remove all cached bindings and provisionings for the given pod.
// TODO: separate the func if it is needed to delete bindings/provisionings individually
DeleteBindings(pod *v1.Pod)
}
type podBindingCache struct {
mutex sync.Mutex
// Key = pod name
// Value = nodeDecisions
bindingDecisions map[string]nodeDecisions
}
// Key = nodeName
// Value = bindings & provisioned PVCs of the node
type nodeDecisions map[string]nodeDecision
// A decision includes bindingInfo and provisioned PVCs of the node
type nodeDecision struct {
bindings []*bindingInfo
provisionings []*v1.PersistentVolumeClaim
}
func NewPodBindingCache() PodBindingCache {
return &podBindingCache{bindingDecisions: map[string]nodeDecisions{}}
}
func (c *podBindingCache) DeleteBindings(pod *v1.Pod) {
c.mutex.Lock()
defer c.mutex.Unlock()
podName := getPodName(pod)
delete(c.bindingDecisions, podName)
}
func (c *podBindingCache) UpdateBindings(pod *v1.Pod, node string, bindings []*bindingInfo) {
c.mutex.Lock()
defer c.mutex.Unlock()
podName := getPodName(pod)
decisions, ok := c.bindingDecisions[podName]
if !ok {
decisions = nodeDecisions{}
c.bindingDecisions[podName] = decisions
}
decision, ok := decisions[node]
if !ok {
decision = nodeDecision{
bindings: bindings,
}
} else {
decision.bindings = bindings
}
decisions[node] = decision
}
func (c *podBindingCache) GetBindings(pod *v1.Pod, node string) []*bindingInfo {
c.mutex.Lock()
defer c.mutex.Unlock()
podName := getPodName(pod)
decisions, ok := c.bindingDecisions[podName]
if !ok {
return nil
}
decision, ok := decisions[node]
if !ok {
return nil
}
return decision.bindings
}
func (c *podBindingCache) UpdateProvisionedPVCs(pod *v1.Pod, node string, pvcs []*v1.PersistentVolumeClaim) {
c.mutex.Lock()
defer c.mutex.Unlock()
podName := getPodName(pod)
decisions, ok := c.bindingDecisions[podName]
if !ok {
decisions = nodeDecisions{}
c.bindingDecisions[podName] = decisions
}
decision, ok := decisions[node]
if !ok {
decision = nodeDecision{
provisionings: pvcs,
}
} else {
decision.provisionings = pvcs
}
decisions[node] = decision
}
func (c *podBindingCache) GetProvisionedPVCs(pod *v1.Pod, node string) []*v1.PersistentVolumeClaim {
c.mutex.Lock()
defer c.mutex.Unlock()
podName := getPodName(pod)
decisions, ok := c.bindingDecisions[podName]
if !ok {
return nil
}
decision, ok := decisions[node]
if !ok {
return nil
}
return decision.provisionings
}

View File

@ -1,144 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"reflect"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestUpdateGetBindings(t *testing.T) {
scenarios := map[string]struct {
updateBindings []*bindingInfo
updateProvisionings []*v1.PersistentVolumeClaim
updatePod string
updateNode string
getBindings []*bindingInfo
getProvisionings []*v1.PersistentVolumeClaim
getPod string
getNode string
}{
"no-pod": {
getPod: "pod1",
getNode: "node1",
},
"no-node": {
updatePod: "pod1",
updateNode: "node1",
updateBindings: []*bindingInfo{},
updateProvisionings: []*v1.PersistentVolumeClaim{},
getPod: "pod1",
getNode: "node2",
},
"binding-exists": {
updatePod: "pod1",
updateNode: "node1",
updateBindings: []*bindingInfo{{pvc: &v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "pvc1"}}}},
updateProvisionings: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "pvc2"}}},
getPod: "pod1",
getNode: "node1",
getBindings: []*bindingInfo{{pvc: &v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "pvc1"}}}},
getProvisionings: []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "pvc2"}}},
},
}
for name, scenario := range scenarios {
cache := NewPodBindingCache()
// Perform updates
updatePod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: scenario.updatePod, Namespace: "ns"}}
cache.UpdateBindings(updatePod, scenario.updateNode, scenario.updateBindings)
cache.UpdateProvisionedPVCs(updatePod, scenario.updateNode, scenario.updateProvisionings)
// Verify updated bindings
bindings := cache.GetBindings(updatePod, scenario.updateNode)
if !reflect.DeepEqual(bindings, scenario.updateBindings) {
t.Errorf("Test %v failed: returned bindings after update different. Got %+v, expected %+v", name, bindings, scenario.updateBindings)
}
// Verify updated provisionings
provisionings := cache.GetProvisionedPVCs(updatePod, scenario.updateNode)
if !reflect.DeepEqual(provisionings, scenario.updateProvisionings) {
t.Errorf("Test %v failed: returned provisionings after update different. Got %+v, expected %+v", name, provisionings, scenario.updateProvisionings)
}
// Get bindings
getPod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: scenario.getPod, Namespace: "ns"}}
bindings = cache.GetBindings(getPod, scenario.getNode)
if !reflect.DeepEqual(bindings, scenario.getBindings) {
t.Errorf("Test %v failed: unexpected bindings returned. Got %+v, expected %+v", name, bindings, scenario.updateBindings)
}
// Get provisionings
provisionings = cache.GetProvisionedPVCs(getPod, scenario.getNode)
if !reflect.DeepEqual(provisionings, scenario.getProvisionings) {
t.Errorf("Test %v failed: unexpected bindings returned. Got %+v, expected %+v", name, provisionings, scenario.getProvisionings)
}
}
}
func TestDeleteBindings(t *testing.T) {
initialBindings := []*bindingInfo{{pvc: &v1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "pvc1"}}}}
initialProvisionings := []*v1.PersistentVolumeClaim{{ObjectMeta: metav1.ObjectMeta{Name: "pvc2"}}}
cache := NewPodBindingCache()
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "ns"}}
// Get nil bindings and provisionings
bindings := cache.GetBindings(pod, "node1")
if bindings != nil {
t.Errorf("Test failed: expected initial nil bindings, got %+v", bindings)
}
provisionings := cache.GetProvisionedPVCs(pod, "node1")
if provisionings != nil {
t.Errorf("Test failed: expected initial nil provisionings, got %+v", provisionings)
}
// Delete nothing
cache.DeleteBindings(pod)
// Perform updates
cache.UpdateBindings(pod, "node1", initialBindings)
cache.UpdateProvisionedPVCs(pod, "node1", initialProvisionings)
// Get bindings and provisionings
bindings = cache.GetBindings(pod, "node1")
if !reflect.DeepEqual(bindings, initialBindings) {
t.Errorf("Test failed: expected bindings %+v, got %+v", initialBindings, bindings)
}
provisionings = cache.GetProvisionedPVCs(pod, "node1")
if !reflect.DeepEqual(provisionings, initialProvisionings) {
t.Errorf("Test failed: expected provisionings %+v, got %+v", initialProvisionings, provisionings)
}
// Delete
cache.DeleteBindings(pod)
// Get bindings and provisionings
bindings = cache.GetBindings(pod, "node1")
if bindings != nil {
t.Errorf("Test failed: expected nil bindings, got %+v", bindings)
}
provisionings = cache.GetProvisionedPVCs(pod, "node1")
if provisionings != nil {
t.Errorf("Test failed: expected nil provisionings, got %+v", provisionings)
}
}

View File

@ -1,63 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"k8s.io/api/core/v1"
)
type FakeVolumeBinderConfig struct {
AllBound bool
FindUnboundSatsified bool
FindBoundSatsified bool
FindErr error
AssumeBindingRequired bool
AssumeErr error
BindErr error
}
// NewVolumeBinder sets up all the caches needed for the scheduler to make
// topology-aware volume binding decisions.
func NewFakeVolumeBinder(config *FakeVolumeBinderConfig) *FakeVolumeBinder {
return &FakeVolumeBinder{
config: config,
}
}
type FakeVolumeBinder struct {
config *FakeVolumeBinderConfig
AssumeCalled bool
BindCalled bool
}
func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisfied, boundVolumesSatsified bool, err error) {
return b.config.FindUnboundSatsified, b.config.FindBoundSatsified, b.config.FindErr
}
func (b *FakeVolumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (bool, bool, error) {
b.AssumeCalled = true
return b.config.AllBound, b.config.AssumeBindingRequired, b.config.AssumeErr
}
func (b *FakeVolumeBinder) BindPodVolumes(assumedPod *v1.Pod) error {
b.BindCalled = true
return b.config.BindErr
}
func (b *FakeVolumeBinder) GetBindingsCache() PodBindingCache {
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,130 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolume
import (
"fmt"
"net"
authenticationv1 "k8s.io/api/authentication/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
vol "k8s.io/kubernetes/pkg/volume"
)
// VolumeHost interface implementation for PersistentVolumeController.
var _ vol.VolumeHost = &PersistentVolumeController{}
func (ctrl *PersistentVolumeController) GetPluginDir(pluginName string) string {
return ""
}
func (ctrl *PersistentVolumeController) GetVolumeDevicePluginDir(pluginName string) string {
return ""
}
func (ctrl *PersistentVolumeController) GetPodsDir() string {
return ""
}
func (ctrl *PersistentVolumeController) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
return ""
}
func (ctrl *PersistentVolumeController) GetPodPluginDir(podUID types.UID, pluginName string) string {
return ""
}
func (ctrl *PersistentVolumeController) GetPodVolumeDeviceDir(ppodUID types.UID, pluginName string) string {
return ""
}
func (ctrl *PersistentVolumeController) GetKubeClient() clientset.Interface {
return ctrl.kubeClient
}
func (ctrl *PersistentVolumeController) NewWrapperMounter(volName string, spec vol.Spec, pod *v1.Pod, opts vol.VolumeOptions) (vol.Mounter, error) {
return nil, fmt.Errorf("PersistentVolumeController.NewWrapperMounter is not implemented")
}
func (ctrl *PersistentVolumeController) NewWrapperUnmounter(volName string, spec vol.Spec, podUID types.UID) (vol.Unmounter, error) {
return nil, fmt.Errorf("PersistentVolumeController.NewWrapperMounter is not implemented")
}
func (ctrl *PersistentVolumeController) GetCloudProvider() cloudprovider.Interface {
return ctrl.cloud
}
func (ctrl *PersistentVolumeController) GetMounter(pluginName string) mount.Interface {
return nil
}
func (ctrl *PersistentVolumeController) GetWriter() io.Writer {
return nil
}
func (ctrl *PersistentVolumeController) GetHostName() string {
return ""
}
func (ctrl *PersistentVolumeController) GetHostIP() (net.IP, error) {
return nil, fmt.Errorf("PersistentVolumeController.GetHostIP() is not implemented")
}
func (ctrl *PersistentVolumeController) GetNodeAllocatable() (v1.ResourceList, error) {
return v1.ResourceList{}, nil
}
func (ctrl *PersistentVolumeController) GetSecretFunc() func(namespace, name string) (*v1.Secret, error) {
return func(_, _ string) (*v1.Secret, error) {
return nil, fmt.Errorf("GetSecret unsupported in PersistentVolumeController")
}
}
func (ctrl *PersistentVolumeController) GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error) {
return func(_, _ string) (*v1.ConfigMap, error) {
return nil, fmt.Errorf("GetConfigMap unsupported in PersistentVolumeController")
}
}
func (ctrl *PersistentVolumeController) GetServiceAccountTokenFunc() func(_, _ string, _ *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
return func(_, _ string, _ *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
return nil, fmt.Errorf("GetServiceAccountToken unsupported in PersistentVolumeController")
}
}
func (adc *PersistentVolumeController) GetExec(pluginName string) mount.Exec {
return mount.NewOsExec()
}
func (ctrl *PersistentVolumeController) GetNodeLabels() (map[string]string, error) {
return nil, fmt.Errorf("GetNodeLabels() unsupported in PersistentVolumeController")
}
func (ctrl *PersistentVolumeController) GetNodeName() types.NodeName {
return ""
}
func (ctrl *PersistentVolumeController) GetEventRecorder() record.EventRecorder {
return ctrl.eventRecorder
}