mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor updates
This commit is contained in:
11
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/BUILD
generated
vendored
@ -24,6 +24,7 @@ go_library(
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/volume/events:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume/metrics:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/goroutinemap:go_default_library",
|
||||
"//pkg/util/goroutinemap/exponentialbackoff:go_default_library",
|
||||
@ -31,6 +32,7 @@ go_library(
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/recyclerclient:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
@ -41,6 +43,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
@ -71,14 +74,14 @@ go_test(
|
||||
"scheduler_binder_cache_test.go",
|
||||
"scheduler_binder_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/recyclerclient:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1:go_default_library",
|
||||
@ -95,6 +98,7 @@ go_test(
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
@ -114,6 +118,7 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/controller/volume/persistentvolume/metrics:all-srcs",
|
||||
"//pkg/controller/volume/persistentvolume/options:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
|
30
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/binder_test.go
generated
vendored
30
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/binder_test.go
generated
vendored
@ -198,6 +198,32 @@ func TestSync(t *testing.T) {
|
||||
newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", v1.ClaimBound, &classWait, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim binds pre-bound PVC only to the volume it points to,
|
||||
// even if there is smaller volume available
|
||||
"1-15 - successful prebound PVC",
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-15_1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolume("volume1-15_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
},
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-15_1", "10Gi", "uid1-15", "claim1-15", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController),
|
||||
newVolume("volume1-15_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
},
|
||||
newClaimArray("claim1-15", "uid1-15", "1Gi", "volume1-15_1", v1.ClaimPending, nil),
|
||||
withExpectedCapacity("10Gi", newClaimArray("claim1-15", "uid1-15", "1Gi", "volume1-15_1", v1.ClaimBound, nil, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim does not bind pre-bound PVC to PV with different AccessMode
|
||||
"1-16 - successful prebound PVC",
|
||||
// PV has ReadWriteOnce
|
||||
newVolumeArray("volume1-16", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
newVolumeArray("volume1-16", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty),
|
||||
claimWithAccessMode([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, newClaimArray("claim1-16", "uid1-16", "1Gi", "volume1-16", v1.ClaimPending, nil)),
|
||||
claimWithAccessMode([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, newClaimArray("claim1-16", "uid1-16", "1Gi", "volume1-16", v1.ClaimPending, nil)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
|
||||
// [Unit test set 2] User asked for a specific PV.
|
||||
// Test the binding when pv.ClaimRef is already set by controller or
|
||||
@ -598,7 +624,7 @@ func TestSync(t *testing.T) {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: classWait},
|
||||
VolumeBindingMode: &modeWait,
|
||||
},
|
||||
})
|
||||
}, []*v1.Pod{})
|
||||
}
|
||||
|
||||
func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
@ -750,7 +776,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
}
|
||||
defer utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
|
||||
runSyncTests(t, tests, []*storage.StorageClass{})
|
||||
runSyncTests(t, tests, []*storage.StorageClass{}, []*v1.Pod{})
|
||||
}
|
||||
|
||||
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/delete_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/delete_test.go
generated
vendored
@ -192,7 +192,7 @@ func TestDeleteSync(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
runSyncTests(t, tests, []*storage.StorageClass{})
|
||||
runSyncTests(t, tests, []*storage.StorageClass{}, []*v1.Pod{})
|
||||
}
|
||||
|
||||
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
|
||||
|
20
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/framework_test.go
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/framework_test.go
generated
vendored
@ -41,6 +41,7 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
@ -48,6 +49,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
vol "k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
||||
)
|
||||
|
||||
// This is a unit test framework for persistent volume controller.
|
||||
@ -609,6 +611,7 @@ func newTestController(kubeClient clientset.Interface, informerFactory informers
|
||||
VolumeInformer: informerFactory.Core().V1().PersistentVolumes(),
|
||||
ClaimInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informerFactory.Storage().V1().StorageClasses(),
|
||||
PodInformer: informerFactory.Core().V1().Pods(),
|
||||
EventRecorder: record.NewFakeRecorder(1000),
|
||||
EnableDynamicProvisioning: enableDynamicProvisioning,
|
||||
}
|
||||
@ -802,6 +805,13 @@ func claimWithAnnotation(name, value string, claims []*v1.PersistentVolumeClaim)
|
||||
return claims
|
||||
}
|
||||
|
||||
// claimWithAccessMode saves given access into given claims.
|
||||
// Meant to be used to compose claims specified inline in a test.
|
||||
func claimWithAccessMode(modes []v1.PersistentVolumeAccessMode, claims []*v1.PersistentVolumeClaim) []*v1.PersistentVolumeClaim {
|
||||
claims[0].Spec.AccessModes = modes
|
||||
return claims
|
||||
}
|
||||
|
||||
func testSyncClaim(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
||||
return ctrl.syncClaim(test.initialClaims[0])
|
||||
}
|
||||
@ -932,7 +942,7 @@ func evaluateTestResults(ctrl *PersistentVolumeController, reactor *volumeReacto
|
||||
// 2. Call the tested function (syncClaim/syncVolume) via
|
||||
// controllerTest.testCall *once*.
|
||||
// 3. Compare resulting volumes and claims with expected volumes and claims.
|
||||
func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass) {
|
||||
func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) {
|
||||
for _, test := range tests {
|
||||
glog.V(4).Infof("starting test %q", test.name)
|
||||
|
||||
@ -959,6 +969,12 @@ func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storag
|
||||
}
|
||||
ctrl.classLister = storagelisters.NewStorageClassLister(indexer)
|
||||
|
||||
podIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
|
||||
for _, pod := range pods {
|
||||
podIndexer.Add(pod)
|
||||
}
|
||||
ctrl.podLister = corelisters.NewPodLister(podIndexer)
|
||||
|
||||
// Run the tested functions
|
||||
err = test.test(ctrl, reactor, test)
|
||||
if err != nil {
|
||||
@ -1247,7 +1263,7 @@ func (plugin *mockVolumePlugin) GetMetrics() (*vol.Metrics, error) {
|
||||
|
||||
// Recycler interfaces
|
||||
|
||||
func (plugin *mockVolumePlugin) Recycle(pvName string, spec *vol.Spec, eventRecorder vol.RecycleEventRecorder) error {
|
||||
func (plugin *mockVolumePlugin) Recycle(pvName string, spec *vol.Spec, eventRecorder recyclerclient.RecycleEventRecorder) error {
|
||||
if len(plugin.recycleCalls) == 0 {
|
||||
return fmt.Errorf("Mock plugin error: no recycleCalls configured")
|
||||
}
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index.go
generated
vendored
@ -28,7 +28,6 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
@ -169,6 +168,13 @@ func findMatchingVolume(
|
||||
continue
|
||||
}
|
||||
|
||||
// check if PV's DeletionTimeStamp is set, if so, skip this volume.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) {
|
||||
if volume.ObjectMeta.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
nodeAffinityValid := true
|
||||
if node != nil {
|
||||
// Scheduler path, check that the PV NodeAffinity
|
||||
@ -314,7 +320,7 @@ func (pvIndex *persistentVolumeOrderedIndex) allPossibleMatchingAccessModes(requ
|
||||
keys := pvIndex.store.ListIndexFuncValues("accessmodes")
|
||||
for _, key := range keys {
|
||||
indexedModes := v1helper.GetAccessModesFromString(key)
|
||||
if volume.AccessModesContainedInAll(indexedModes, requestedModes) {
|
||||
if volumeutil.AccessModesContainedInAll(indexedModes, requestedModes) {
|
||||
matchedModes = append(matchedModes, indexedModes)
|
||||
}
|
||||
}
|
||||
|
199
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index_test.go
generated
vendored
199
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/index_test.go
generated
vendored
@ -20,8 +20,6 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -29,8 +27,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
ref "k8s.io/client-go/tools/reference"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
func makePVC(size string, modfn func(*v1.PersistentVolumeClaim)) *v1.PersistentVolumeClaim {
|
||||
@ -307,7 +304,7 @@ func TestAllPossibleAccessModes(t *testing.T) {
|
||||
t.Errorf("Expected 3 arrays of modes that match RWO, but got %v", len(possibleModes))
|
||||
}
|
||||
for _, m := range possibleModes {
|
||||
if !volume.AccessModesContains(m, v1.ReadWriteOnce) {
|
||||
if !util.AccessModesContains(m, v1.ReadWriteOnce) {
|
||||
t.Errorf("AccessModes does not contain %s", v1.ReadWriteOnce)
|
||||
}
|
||||
}
|
||||
@ -316,7 +313,7 @@ func TestAllPossibleAccessModes(t *testing.T) {
|
||||
if len(possibleModes) != 1 {
|
||||
t.Errorf("Expected 1 array of modes that match RWX, but got %v", len(possibleModes))
|
||||
}
|
||||
if !volume.AccessModesContains(possibleModes[0], v1.ReadWriteMany) {
|
||||
if !util.AccessModesContains(possibleModes[0], v1.ReadWriteMany) {
|
||||
t.Errorf("AccessModes does not contain %s", v1.ReadWriteOnce)
|
||||
}
|
||||
|
||||
@ -680,9 +677,8 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "affinity-pv",
|
||||
Name: "affinity001",
|
||||
Annotations: getAnnotationWithNodeAffinity("key1", "value1"),
|
||||
UID: "affinity-pv",
|
||||
Name: "affinity001",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
@ -696,13 +692,13 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
StorageClassName: classWait,
|
||||
NodeAffinity: getVolumeNodeAffinity("key1", "value1"),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "affinity-pv2",
|
||||
Name: "affinity002",
|
||||
Annotations: getAnnotationWithNodeAffinity("key1", "value1"),
|
||||
UID: "affinity-pv2",
|
||||
Name: "affinity002",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
@ -716,13 +712,13 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
StorageClassName: classWait,
|
||||
NodeAffinity: getVolumeNodeAffinity("key1", "value1"),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "affinity-prebound",
|
||||
Name: "affinity003",
|
||||
Annotations: getAnnotationWithNodeAffinity("key1", "value1"),
|
||||
UID: "affinity-prebound",
|
||||
Name: "affinity003",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
@ -737,13 +733,13 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
},
|
||||
StorageClassName: classWait,
|
||||
ClaimRef: &v1.ObjectReference{Name: "claim02", Namespace: "myns"},
|
||||
NodeAffinity: getVolumeNodeAffinity("key1", "value1"),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "affinity-pv3",
|
||||
Name: "affinity003",
|
||||
Annotations: getAnnotationWithNodeAffinity("key1", "value3"),
|
||||
UID: "affinity-pv3",
|
||||
Name: "affinity003",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
@ -757,6 +753,7 @@ func createTestVolumes() []*v1.PersistentVolume {
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
StorageClassName: classWait,
|
||||
NodeAffinity: getVolumeNodeAffinity("key1", "value3"),
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -776,9 +773,9 @@ func testVolume(name, size string) *v1.PersistentVolume {
|
||||
}
|
||||
}
|
||||
|
||||
func getAnnotationWithNodeAffinity(key string, value string) map[string]string {
|
||||
affinity := &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
func getVolumeNodeAffinity(key string, value string) *v1.VolumeNodeAffinity {
|
||||
return &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
@ -792,14 +789,6 @@ func getAnnotationWithNodeAffinity(key string, value string) map[string]string {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
annotations := map[string]string{}
|
||||
err := helper.StorageNodeAffinityToAlphaAnnotation(annotations, affinity)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to get node affinity annotation: %v", err)
|
||||
}
|
||||
|
||||
return annotations
|
||||
}
|
||||
|
||||
func createVolumeModeBlockTestVolume() *v1.PersistentVolume {
|
||||
@ -854,18 +843,22 @@ func createTestVolOrderedIndex(pv *v1.PersistentVolume) persistentVolumeOrderedI
|
||||
return volFile
|
||||
}
|
||||
|
||||
func toggleBlockVolumeFeature(toggleFlag bool, t *testing.T) {
|
||||
func toggleFeature(toggleFlag bool, featureName string, t *testing.T) {
|
||||
var valueStr string
|
||||
if toggleFlag {
|
||||
// Enable alpha feature BlockVolume
|
||||
err := utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
// Enable feature
|
||||
valueStr = featureName + "=true"
|
||||
err := utilfeature.DefaultFeatureGate.Set(valueStr)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to enable feature gate for BlockVolume: %v", err)
|
||||
t.Errorf("Failed to enable feature gate for %s: %v", featureName, err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err := utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
// Disable feature
|
||||
valueStr = featureName + "=false"
|
||||
err := utilfeature.DefaultFeatureGate.Set(valueStr)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to disable feature gate for BlockVolume: %v", err)
|
||||
t.Errorf("Failed to disable feature gate for %s: %v", featureName, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -935,7 +928,7 @@ func TestAlphaVolumeModeCheck(t *testing.T) {
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
toggleBlockVolumeFeature(scenario.enableBlock, t)
|
||||
toggleFeature(scenario.enableBlock, "BlockVolume", t)
|
||||
expectedMisMatch, err := checkVolumeModeMisMatches(&scenario.pvc.Spec, &scenario.vol.Spec)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected failure for checkVolumeModeMisMatches: %v", err)
|
||||
@ -950,7 +943,7 @@ func TestAlphaVolumeModeCheck(t *testing.T) {
|
||||
}
|
||||
|
||||
// make sure feature gate is turned off
|
||||
toggleBlockVolumeFeature(false, t)
|
||||
toggleFeature(false, "BlockVolume", t)
|
||||
}
|
||||
|
||||
func TestAlphaFilteringVolumeModes(t *testing.T) {
|
||||
@ -1028,7 +1021,7 @@ func TestAlphaFilteringVolumeModes(t *testing.T) {
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
toggleBlockVolumeFeature(scenario.enableBlock, t)
|
||||
toggleFeature(scenario.enableBlock, "BlockVolume", t)
|
||||
pvmatch, err := scenario.vol.findBestMatchForClaim(scenario.pvc, false)
|
||||
// expected to match but either got an error or no returned pvmatch
|
||||
if pvmatch == nil && scenario.isExpectedMatch {
|
||||
@ -1047,7 +1040,135 @@ func TestAlphaFilteringVolumeModes(t *testing.T) {
|
||||
}
|
||||
|
||||
// make sure feature gate is turned off
|
||||
toggleBlockVolumeFeature(false, t)
|
||||
toggleFeature(false, "BlockVolume", t)
|
||||
}
|
||||
|
||||
func TestAlphaStorageObjectInUseProtectionFiltering(t *testing.T) {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv1",
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("1G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
},
|
||||
}
|
||||
|
||||
pvToDelete := pv.DeepCopy()
|
||||
now := metav1.Now()
|
||||
pvToDelete.ObjectMeta.DeletionTimestamp = &now
|
||||
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc1",
|
||||
Namespace: "myns",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("1G")}},
|
||||
},
|
||||
}
|
||||
|
||||
satisfyingTestCases := map[string]struct {
|
||||
isExpectedMatch bool
|
||||
vol *v1.PersistentVolume
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
enableStorageObjectInUseProtection bool
|
||||
}{
|
||||
"feature enabled - pv deletionTimeStamp not set": {
|
||||
isExpectedMatch: true,
|
||||
vol: pv,
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: true,
|
||||
},
|
||||
"feature enabled - pv deletionTimeStamp set": {
|
||||
isExpectedMatch: false,
|
||||
vol: pvToDelete,
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: true,
|
||||
},
|
||||
"feature disabled - pv deletionTimeStamp not set": {
|
||||
isExpectedMatch: true,
|
||||
vol: pv,
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: false,
|
||||
},
|
||||
"feature disabled - pv deletionTimeStamp set": {
|
||||
isExpectedMatch: true,
|
||||
vol: pvToDelete,
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: false,
|
||||
},
|
||||
}
|
||||
|
||||
for name, testCase := range satisfyingTestCases {
|
||||
toggleFeature(testCase.enableStorageObjectInUseProtection, "StorageObjectInUseProtection", t)
|
||||
err := checkVolumeSatisfyClaim(testCase.vol, testCase.pvc)
|
||||
// expected to match but got an error
|
||||
if err != nil && testCase.isExpectedMatch {
|
||||
t.Errorf("%s: expected to match but got an error: %v", name, err)
|
||||
}
|
||||
// not expected to match but did
|
||||
if err == nil && !testCase.isExpectedMatch {
|
||||
t.Errorf("%s: not expected to match but did", name)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
filteringTestCases := map[string]struct {
|
||||
isExpectedMatch bool
|
||||
vol persistentVolumeOrderedIndex
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
enableStorageObjectInUseProtection bool
|
||||
}{
|
||||
"feature enabled - pv deletionTimeStamp not set": {
|
||||
isExpectedMatch: true,
|
||||
vol: createTestVolOrderedIndex(pv),
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: true,
|
||||
},
|
||||
"feature enabled - pv deletionTimeStamp set": {
|
||||
isExpectedMatch: false,
|
||||
vol: createTestVolOrderedIndex(pvToDelete),
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: true,
|
||||
},
|
||||
"feature disabled - pv deletionTimeStamp not set": {
|
||||
isExpectedMatch: true,
|
||||
vol: createTestVolOrderedIndex(pv),
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: false,
|
||||
},
|
||||
"feature disabled - pv deletionTimeStamp set": {
|
||||
isExpectedMatch: true,
|
||||
vol: createTestVolOrderedIndex(pvToDelete),
|
||||
pvc: pvc,
|
||||
enableStorageObjectInUseProtection: false,
|
||||
},
|
||||
}
|
||||
for name, testCase := range filteringTestCases {
|
||||
toggleFeature(testCase.enableStorageObjectInUseProtection, "StorageObjectInUseProtection", t)
|
||||
pvmatch, err := testCase.vol.findBestMatchForClaim(testCase.pvc, false)
|
||||
// expected to match but either got an error or no returned pvmatch
|
||||
if pvmatch == nil && testCase.isExpectedMatch {
|
||||
t.Errorf("Unexpected failure for testcase, no matching volume: %s", name)
|
||||
}
|
||||
if err != nil && testCase.isExpectedMatch {
|
||||
t.Errorf("Unexpected failure for testcase: %s - %+v", name, err)
|
||||
}
|
||||
// expected to not match but either got an error or a returned pvmatch
|
||||
if pvmatch != nil && !testCase.isExpectedMatch {
|
||||
t.Errorf("Unexpected failure for testcase, expected no matching volume: %s", name)
|
||||
}
|
||||
if err != nil && !testCase.isExpectedMatch {
|
||||
t.Errorf("Unexpected failure for testcase: %s - %+v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// make sure feature gate is turned off
|
||||
toggleFeature(false, "StorageObjectInUseProtection", t)
|
||||
}
|
||||
|
||||
func TestFindingPreboundVolumes(t *testing.T) {
|
||||
|
30
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/BUILD
generated
vendored
Normal file
30
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/BUILD
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["metrics.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics",
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
184
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/metrics.go
generated
vendored
Normal file
184
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/metrics.go
generated
vendored
Normal file
@ -0,0 +1,184 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
// Subsystem names.
|
||||
pvControllerSubsystem = "pv_collector"
|
||||
|
||||
// Metric names.
|
||||
boundPVKey = "bound_pv_count"
|
||||
unboundPVKey = "unbound_pv_count"
|
||||
boundPVCKey = "bound_pvc_count"
|
||||
unboundPVCKey = "unbound_pvc_count"
|
||||
|
||||
// Label names.
|
||||
namespaceLabel = "namespace"
|
||||
storageClassLabel = "storage_class"
|
||||
)
|
||||
|
||||
var registerMetrics sync.Once
|
||||
|
||||
// PVLister used to list persistent volumes.
|
||||
type PVLister interface {
|
||||
List() []interface{}
|
||||
}
|
||||
|
||||
// PVCLister used to list persistent volume claims.
|
||||
type PVCLister interface {
|
||||
List() []interface{}
|
||||
}
|
||||
|
||||
// Register all metrics for pv controller.
|
||||
func Register(pvLister PVLister, pvcLister PVCLister) {
|
||||
registerMetrics.Do(func() {
|
||||
prometheus.MustRegister(newPVAndPVCCountCollector(pvLister, pvcLister))
|
||||
})
|
||||
}
|
||||
|
||||
func newPVAndPVCCountCollector(pvLister PVLister, pvcLister PVCLister) *pvAndPVCCountCollector {
|
||||
return &pvAndPVCCountCollector{pvLister, pvcLister}
|
||||
}
|
||||
|
||||
// Custom collector for current pod and container counts.
|
||||
type pvAndPVCCountCollector struct {
|
||||
// Cache for accessing information about PersistentVolumes.
|
||||
pvLister PVLister
|
||||
// Cache for accessing information about PersistentVolumeClaims.
|
||||
pvcLister PVCLister
|
||||
}
|
||||
|
||||
var (
|
||||
boundPVCountDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("", pvControllerSubsystem, boundPVKey),
|
||||
"Gauge measuring number of persistent volume currently bound",
|
||||
[]string{storageClassLabel}, nil)
|
||||
unboundPVCountDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("", pvControllerSubsystem, unboundPVKey),
|
||||
"Gauge measuring number of persistent volume currently unbound",
|
||||
[]string{storageClassLabel}, nil)
|
||||
|
||||
boundPVCCountDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("", pvControllerSubsystem, boundPVCKey),
|
||||
"Gauge measuring number of persistent volume claim currently bound",
|
||||
[]string{namespaceLabel}, nil)
|
||||
unboundPVCCountDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("", pvControllerSubsystem, unboundPVCKey),
|
||||
"Gauge measuring number of persistent volume claim currently unbound",
|
||||
[]string{namespaceLabel}, nil)
|
||||
)
|
||||
|
||||
func (collector *pvAndPVCCountCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- boundPVCountDesc
|
||||
ch <- unboundPVCountDesc
|
||||
ch <- boundPVCCountDesc
|
||||
ch <- unboundPVCCountDesc
|
||||
}
|
||||
|
||||
func (collector *pvAndPVCCountCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
collector.pvCollect(ch)
|
||||
collector.pvcCollect(ch)
|
||||
}
|
||||
|
||||
func (collector *pvAndPVCCountCollector) pvCollect(ch chan<- prometheus.Metric) {
|
||||
boundNumberByStorageClass := make(map[string]int)
|
||||
unboundNumberByStorageClass := make(map[string]int)
|
||||
for _, pvObj := range collector.pvLister.List() {
|
||||
pv, ok := pvObj.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if pv.Status.Phase == v1.VolumeBound {
|
||||
boundNumberByStorageClass[pv.Spec.StorageClassName]++
|
||||
} else {
|
||||
unboundNumberByStorageClass[pv.Spec.StorageClassName]++
|
||||
}
|
||||
}
|
||||
for storageClassName, number := range boundNumberByStorageClass {
|
||||
metric, err := prometheus.NewConstMetric(
|
||||
boundPVCountDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(number),
|
||||
storageClassName)
|
||||
if err != nil {
|
||||
glog.Warningf("Create bound pv number metric failed: %v", err)
|
||||
continue
|
||||
}
|
||||
ch <- metric
|
||||
}
|
||||
for storageClassName, number := range unboundNumberByStorageClass {
|
||||
metric, err := prometheus.NewConstMetric(
|
||||
unboundPVCountDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(number),
|
||||
storageClassName)
|
||||
if err != nil {
|
||||
glog.Warningf("Create unbound pv number metric failed: %v", err)
|
||||
continue
|
||||
}
|
||||
ch <- metric
|
||||
}
|
||||
}
|
||||
|
||||
func (collector *pvAndPVCCountCollector) pvcCollect(ch chan<- prometheus.Metric) {
|
||||
boundNumberByNamespace := make(map[string]int)
|
||||
unboundNumberByNamespace := make(map[string]int)
|
||||
for _, pvcObj := range collector.pvcLister.List() {
|
||||
pvc, ok := pvcObj.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if pvc.Status.Phase == v1.ClaimBound {
|
||||
boundNumberByNamespace[pvc.Namespace]++
|
||||
} else {
|
||||
unboundNumberByNamespace[pvc.Namespace]++
|
||||
}
|
||||
}
|
||||
for namespace, number := range boundNumberByNamespace {
|
||||
metric, err := prometheus.NewConstMetric(
|
||||
boundPVCCountDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(number),
|
||||
namespace)
|
||||
if err != nil {
|
||||
glog.Warningf("Create bound pvc number metric failed: %v", err)
|
||||
continue
|
||||
}
|
||||
ch <- metric
|
||||
}
|
||||
for namespace, number := range unboundNumberByNamespace {
|
||||
metric, err := prometheus.NewConstMetric(
|
||||
unboundPVCCountDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(number),
|
||||
namespace)
|
||||
if err != nil {
|
||||
glog.Warningf("Create unbound pvc number metric failed: %v", err)
|
||||
continue
|
||||
}
|
||||
ch <- metric
|
||||
}
|
||||
}
|
4
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/provision_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/provision_test.go
generated
vendored
@ -172,7 +172,7 @@ func TestProvisionSync(t *testing.T) {
|
||||
newClaimArray("claim11-6", "uid11-6", "1Gi", "volume11-6", v1.ClaimBound, &classGold, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors,
|
||||
// No provisioning plugin confingure - makes the test fail when
|
||||
// the controller errorneously tries to provision something
|
||||
// the controller erroneously tries to provision something
|
||||
wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
|
||||
},
|
||||
{
|
||||
@ -416,7 +416,7 @@ func TestProvisionSync(t *testing.T) {
|
||||
noerrors, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
|
||||
},
|
||||
}
|
||||
runSyncTests(t, tests, storageClasses)
|
||||
runSyncTests(t, tests, storageClasses, []*v1.Pod{})
|
||||
}
|
||||
|
||||
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
|
||||
|
84
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go
generated
vendored
84
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go
generated
vendored
@ -26,6 +26,8 @@ import (
|
||||
storage "k8s.io/api/storage/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
@ -43,6 +45,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
|
||||
vol "k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
@ -161,6 +164,8 @@ type PersistentVolumeController struct {
|
||||
claimListerSynced cache.InformerSynced
|
||||
classLister storagelisters.StorageClassLister
|
||||
classListerSynced cache.InformerSynced
|
||||
podLister corelisters.PodLister
|
||||
podListerSynced cache.InformerSynced
|
||||
|
||||
kubeClient clientset.Interface
|
||||
eventRecorder record.EventRecorder
|
||||
@ -233,24 +238,35 @@ func (ctrl *PersistentVolumeController) syncClaim(claim *v1.PersistentVolumeClai
|
||||
func checkVolumeSatisfyClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) error {
|
||||
requestedQty := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
requestedSize := requestedQty.Value()
|
||||
isMisMatch, err := checkVolumeModeMisMatches(&claim.Spec, &volume.Spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking if volumeMode was a mismatch: %v", err)
|
||||
|
||||
// check if PV's DeletionTimeStamp is set, if so, return error.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) {
|
||||
if volume.ObjectMeta.DeletionTimestamp != nil {
|
||||
return fmt.Errorf("the volume is marked for deletion")
|
||||
}
|
||||
}
|
||||
|
||||
volumeQty := volume.Spec.Capacity[v1.ResourceStorage]
|
||||
volumeSize := volumeQty.Value()
|
||||
if volumeSize < requestedSize {
|
||||
return fmt.Errorf("Storage capacity of volume[%s] requested by claim[%v] is not enough", volume.Name, claimToClaimKey(claim))
|
||||
return fmt.Errorf("requested PV is too small")
|
||||
}
|
||||
|
||||
requestedClass := v1helper.GetPersistentVolumeClaimClass(claim)
|
||||
if v1helper.GetPersistentVolumeClass(volume) != requestedClass {
|
||||
return fmt.Errorf("Class of volume[%s] is not the same as claim[%v]", volume.Name, claimToClaimKey(claim))
|
||||
return fmt.Errorf("storageClasseName does not match")
|
||||
}
|
||||
|
||||
isMisMatch, err := checkVolumeModeMisMatches(&claim.Spec, &volume.Spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking volumeMode: %v", err)
|
||||
}
|
||||
if isMisMatch {
|
||||
return fmt.Errorf("VolumeMode[%v] of volume[%s] is incompatible with VolumeMode[%v] of claim[%v]", volume.Spec.VolumeMode, volume.Name, claim.Spec.VolumeMode, claim.Name)
|
||||
return fmt.Errorf("incompatible volumeMode")
|
||||
}
|
||||
|
||||
if !checkAccessModes(claim, volume) {
|
||||
return fmt.Errorf("incompatible accessMode")
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -362,8 +378,9 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(claim *v1.PersistentVol
|
||||
glog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume is unbound, binding", claimToClaimKey(claim))
|
||||
if err = checkVolumeSatisfyClaim(volume, claim); err != nil {
|
||||
glog.V(4).Infof("Can't bind the claim to volume %q: %v", volume.Name, err)
|
||||
//send a event
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.VolumeMismatch, "Volume's size is smaller than requested or volume's class does not match with claim")
|
||||
//send an event
|
||||
msg := fmt.Sprintf("Cannot bind to requested volume %q: %s", volume.Name, err)
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.VolumeMismatch, msg)
|
||||
//volume does not satisfy the requirements of the claim
|
||||
if _, err = ctrl.updateClaimStatus(claim, v1.ClaimPending, nil); err != nil {
|
||||
return err
|
||||
@ -802,7 +819,7 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV
|
||||
// API server. The claim is not modified in this method!
|
||||
func (ctrl *PersistentVolumeController) updateBindVolumeToClaim(volumeClone *v1.PersistentVolume, claim *v1.PersistentVolumeClaim, updateCache bool) (*v1.PersistentVolume, error) {
|
||||
glog.V(2).Infof("claim %q bound to volume %q", claimToClaimKey(claim), volumeClone.Name)
|
||||
newVol, err := ctrl.kubeClient.Core().PersistentVolumes().Update(volumeClone)
|
||||
newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(volumeClone)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimToClaimKey(claim), err)
|
||||
return newVol, err
|
||||
@ -961,7 +978,7 @@ func (ctrl *PersistentVolumeController) bind(volume *v1.PersistentVolume, claim
|
||||
func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume) error {
|
||||
glog.V(4).Infof("updating PersistentVolume[%s]: rolling back binding from %q", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef))
|
||||
|
||||
// Save the PV only when any modification is neccessary.
|
||||
// Save the PV only when any modification is necessary.
|
||||
volumeClone := volume.DeepCopy()
|
||||
|
||||
if metav1.HasAnnotation(volume.ObjectMeta, annBoundByController) {
|
||||
@ -1041,7 +1058,7 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{})
|
||||
// so read current volume state now.
|
||||
newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
glog.V(3).Infof("error reading peristent volume %q: %v", volume.Name, err)
|
||||
glog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err)
|
||||
return
|
||||
}
|
||||
needsReclaim, err := ctrl.isVolumeReleased(newVolume)
|
||||
@ -1053,6 +1070,17 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(arg interface{})
|
||||
glog.V(3).Infof("volume %q no longer needs recycling, skipping", volume.Name)
|
||||
return
|
||||
}
|
||||
pods, used, err := ctrl.isVolumeUsed(newVolume)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("can't recycle volume %q: %v", volume.Name, err)
|
||||
return
|
||||
}
|
||||
if used {
|
||||
msg := fmt.Sprintf("Volume is used by pods: %s", strings.Join(pods, ","))
|
||||
glog.V(3).Infof("can't recycle volume %q: %s", volume.Name, msg)
|
||||
ctrl.eventRecorder.Event(volume, v1.EventTypeNormal, events.VolumeFailedRecycle, msg)
|
||||
return
|
||||
}
|
||||
|
||||
// Use the newest volume copy, this will save us from version conflicts on
|
||||
// saving.
|
||||
@ -1120,7 +1148,7 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(arg interface{}) e
|
||||
// read current volume state now.
|
||||
newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
glog.V(3).Infof("error reading peristent volume %q: %v", volume.Name, err)
|
||||
glog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err)
|
||||
return nil
|
||||
}
|
||||
needsReclaim, err := ctrl.isVolumeReleased(newVolume)
|
||||
@ -1221,6 +1249,32 @@ func (ctrl *PersistentVolumeController) isVolumeReleased(volume *v1.PersistentVo
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// isVolumeUsed returns list of pods that use given PV.
|
||||
func (ctrl *PersistentVolumeController) isVolumeUsed(pv *v1.PersistentVolume) ([]string, bool, error) {
|
||||
if pv.Spec.ClaimRef == nil {
|
||||
return nil, false, nil
|
||||
}
|
||||
claimName := pv.Spec.ClaimRef.Name
|
||||
|
||||
podNames := sets.NewString()
|
||||
pods, err := ctrl.podLister.Pods(pv.Spec.ClaimRef.Namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("error listing pods: %s", err)
|
||||
}
|
||||
for _, pod := range pods {
|
||||
if util.IsPodTerminated(pod, pod.Status) {
|
||||
continue
|
||||
}
|
||||
for i := range pod.Spec.Volumes {
|
||||
usedPV := &pod.Spec.Volumes[i]
|
||||
if usedPV.PersistentVolumeClaim != nil && usedPV.PersistentVolumeClaim.ClaimName == claimName {
|
||||
podNames.Insert(pod.Namespace + "/" + pod.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return podNames.List(), podNames.Len() != 0, nil
|
||||
}
|
||||
|
||||
// doDeleteVolume finds appropriate delete plugin and deletes given volume. It
|
||||
// returns 'true', when the volume was deleted and 'false' when the volume
|
||||
// cannot be deleted because of the deleter is external. No error should be
|
||||
@ -1250,7 +1304,7 @@ func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolu
|
||||
|
||||
opComplete := util.OperationCompleteHook(plugin.GetPluginName(), "volume_delete")
|
||||
err = deleter.Delete()
|
||||
opComplete(err)
|
||||
opComplete(&err)
|
||||
if err != nil {
|
||||
// Deleter failed
|
||||
return false, err
|
||||
@ -1373,7 +1427,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claimObj interfa
|
||||
|
||||
opComplete := util.OperationCompleteHook(plugin.GetPluginName(), "volume_provision")
|
||||
volume, err = provisioner.Provision()
|
||||
opComplete(err)
|
||||
opComplete(&err)
|
||||
if err != nil {
|
||||
strerr := fmt.Sprintf("Failed to provision volume with StorageClass %q: %v", storageClass.Name, err)
|
||||
glog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err)
|
||||
@ -1496,7 +1550,7 @@ func (ctrl *PersistentVolumeController) scheduleOperation(operationName string,
|
||||
|
||||
// newRecyclerEventRecorder returns a RecycleEventRecorder that sends all events
|
||||
// to given volume.
|
||||
func (ctrl *PersistentVolumeController) newRecyclerEventRecorder(volume *v1.PersistentVolume) vol.RecycleEventRecorder {
|
||||
func (ctrl *PersistentVolumeController) newRecyclerEventRecorder(volume *v1.PersistentVolume) recyclerclient.RecycleEventRecorder {
|
||||
return func(eventtype, message string) {
|
||||
ctrl.eventRecorder.Eventf(volume, eventtype, events.RecyclerPod, "Recycler pod: %s", message)
|
||||
}
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go
generated
vendored
@ -40,6 +40,7 @@ import (
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
||||
vol "k8s.io/kubernetes/pkg/volume"
|
||||
|
||||
@ -61,6 +62,7 @@ type ControllerParameters struct {
|
||||
VolumeInformer coreinformers.PersistentVolumeInformer
|
||||
ClaimInformer coreinformers.PersistentVolumeClaimInformer
|
||||
ClassInformer storageinformers.StorageClassInformer
|
||||
PodInformer coreinformers.PodInformer
|
||||
EventRecorder record.EventRecorder
|
||||
EnableDynamicProvisioning bool
|
||||
}
|
||||
@ -118,6 +120,8 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
|
||||
|
||||
controller.classLister = p.ClassInformer.Lister()
|
||||
controller.classListerSynced = p.ClassInformer.Informer().HasSynced
|
||||
controller.podLister = p.PodInformer.Lister()
|
||||
controller.podListerSynced = p.PodInformer.Informer().HasSynced
|
||||
return controller, nil
|
||||
}
|
||||
|
||||
@ -262,9 +266,9 @@ func (ctrl *PersistentVolumeController) Run(stopCh <-chan struct{}) {
|
||||
defer ctrl.volumeQueue.ShutDown()
|
||||
|
||||
glog.Infof("Starting persistent volume controller")
|
||||
defer glog.Infof("Shutting down peristent volume controller")
|
||||
defer glog.Infof("Shutting down persistent volume controller")
|
||||
|
||||
if !controller.WaitForCacheSync("persistent volume", stopCh, ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced) {
|
||||
if !controller.WaitForCacheSync("persistent volume", stopCh, ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
@ -274,6 +278,8 @@ func (ctrl *PersistentVolumeController) Run(stopCh <-chan struct{}) {
|
||||
go wait.Until(ctrl.volumeWorker, time.Second, stopCh)
|
||||
go wait.Until(ctrl.claimWorker, time.Second, stopCh)
|
||||
|
||||
metrics.Register(ctrl.volumes.store, ctrl.claims)
|
||||
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
|
71
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/recycle_test.go
generated
vendored
71
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/recycle_test.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Test single call to syncVolume, expecting recycling to happen.
|
||||
@ -29,6 +30,44 @@ import (
|
||||
// 2. Call the syncVolume *once*.
|
||||
// 3. Compare resulting volumes with expected volumes.
|
||||
func TestRecycleSync(t *testing.T) {
|
||||
runningPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "runningPod",
|
||||
Namespace: testNamespace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "runningClaim",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
},
|
||||
}
|
||||
|
||||
pendingPod := runningPod.DeepCopy()
|
||||
pendingPod.Name = "pendingPod"
|
||||
pendingPod.Status.Phase = v1.PodPending
|
||||
pendingPod.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = "pendingClaim"
|
||||
|
||||
completedPod := runningPod.DeepCopy()
|
||||
completedPod.Name = "completedPod"
|
||||
completedPod.Status.Phase = v1.PodSucceeded
|
||||
completedPod.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = "completedClaim"
|
||||
|
||||
pods := []*v1.Pod{
|
||||
runningPod,
|
||||
pendingPod,
|
||||
completedPod,
|
||||
}
|
||||
|
||||
tests := []controllerTest{
|
||||
{
|
||||
// recycle volume bound by controller
|
||||
@ -160,8 +199,38 @@ func TestRecycleSync(t *testing.T) {
|
||||
noclaims,
|
||||
[]string{"Warning VolumeUnknownReclaimPolicy"}, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// volume is used by a running pod - failure expected
|
||||
"6-11 - used by running pod",
|
||||
newVolumeArray("volume6-11", "1Gi", "uid6-11", "runningClaim", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController),
|
||||
newVolumeArray("volume6-11", "1Gi", "uid6-11", "runningClaim", v1.VolumeReleased, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController),
|
||||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Normal VolumeFailedRecycle"}, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// volume is used by a pending pod - failure expected
|
||||
"6-12 - used by pending pod",
|
||||
newVolumeArray("volume6-12", "1Gi", "uid6-12", "pendingClaim", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController),
|
||||
newVolumeArray("volume6-12", "1Gi", "uid6-12", "pendingClaim", v1.VolumeReleased, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController),
|
||||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Normal VolumeFailedRecycle"}, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// volume is used by a completed pod - recycle succeeds
|
||||
"6-13 - used by completed pod",
|
||||
newVolumeArray("volume6-13", "1Gi", "uid6-13", "completedClaim", v1.VolumeBound, v1.PersistentVolumeReclaimRecycle, classEmpty, annBoundByController),
|
||||
newVolumeArray("volume6-13", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRecycle, classEmpty),
|
||||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors,
|
||||
// Inject recycler into the controller and call syncVolume. The
|
||||
// recycler simulates one recycle() call that succeeds.
|
||||
wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume),
|
||||
},
|
||||
}
|
||||
runSyncTests(t, tests, []*storage.StorageClass{})
|
||||
runSyncTests(t, tests, []*storage.StorageClass{}, pods)
|
||||
}
|
||||
|
||||
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
|
||||
|
@ -43,7 +43,7 @@ type AssumeCache interface {
|
||||
Get(objName string) (interface{}, error)
|
||||
|
||||
// List all the objects in the cache
|
||||
List() []interface{}
|
||||
List(indexObj interface{}) []interface{}
|
||||
}
|
||||
|
||||
type errWrongType struct {
|
||||
@ -89,7 +89,11 @@ type assumeCache struct {
|
||||
description string
|
||||
|
||||
// Stores objInfo pointers
|
||||
store cache.Store
|
||||
store cache.Indexer
|
||||
|
||||
// Index function for object
|
||||
indexFunc cache.IndexFunc
|
||||
indexName string
|
||||
}
|
||||
|
||||
type objInfo struct {
|
||||
@ -111,9 +115,21 @@ func objInfoKeyFunc(obj interface{}) (string, error) {
|
||||
return objInfo.name, nil
|
||||
}
|
||||
|
||||
func NewAssumeCache(informer cache.SharedIndexInformer, description string) *assumeCache {
|
||||
// TODO: index by storageclass
|
||||
c := &assumeCache{store: cache.NewStore(objInfoKeyFunc), description: description}
|
||||
func (c *assumeCache) objInfoIndexFunc(obj interface{}) ([]string, error) {
|
||||
objInfo, ok := obj.(*objInfo)
|
||||
if !ok {
|
||||
return []string{""}, &errWrongType{"objInfo", obj}
|
||||
}
|
||||
return c.indexFunc(objInfo.latestObj)
|
||||
}
|
||||
|
||||
func NewAssumeCache(informer cache.SharedIndexInformer, description, indexName string, indexFunc cache.IndexFunc) *assumeCache {
|
||||
c := &assumeCache{
|
||||
description: description,
|
||||
indexFunc: indexFunc,
|
||||
indexName: indexName,
|
||||
}
|
||||
c.store = cache.NewIndexer(objInfoKeyFunc, cache.Indexers{indexName: c.objInfoIndexFunc})
|
||||
|
||||
// Unit tests don't use informers
|
||||
if informer != nil {
|
||||
@ -211,12 +227,18 @@ func (c *assumeCache) Get(objName string) (interface{}, error) {
|
||||
return objInfo.latestObj, nil
|
||||
}
|
||||
|
||||
func (c *assumeCache) List() []interface{} {
|
||||
func (c *assumeCache) List(indexObj interface{}) []interface{} {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
allObjs := []interface{}{}
|
||||
for _, obj := range c.store.List() {
|
||||
objs, err := c.store.Index(c.indexName, &objInfo{latestObj: indexObj})
|
||||
if err != nil {
|
||||
glog.Errorf("list index error: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, obj := range objs {
|
||||
objInfo, ok := obj.(*objInfo)
|
||||
if !ok {
|
||||
glog.Errorf("list error: %v", &errWrongType{"objInfo", obj})
|
||||
@ -280,15 +302,22 @@ type PVAssumeCache interface {
|
||||
AssumeCache
|
||||
|
||||
GetPV(pvName string) (*v1.PersistentVolume, error)
|
||||
ListPVs() []*v1.PersistentVolume
|
||||
ListPVs(storageClassName string) []*v1.PersistentVolume
|
||||
}
|
||||
|
||||
type pvAssumeCache struct {
|
||||
*assumeCache
|
||||
}
|
||||
|
||||
func pvStorageClassIndexFunc(obj interface{}) ([]string, error) {
|
||||
if pv, ok := obj.(*v1.PersistentVolume); ok {
|
||||
return []string{pv.Spec.StorageClassName}, nil
|
||||
}
|
||||
return []string{""}, fmt.Errorf("object is not a v1.PersistentVolume: %v", obj)
|
||||
}
|
||||
|
||||
func NewPVAssumeCache(informer cache.SharedIndexInformer) PVAssumeCache {
|
||||
return &pvAssumeCache{assumeCache: NewAssumeCache(informer, "v1.PersistentVolume")}
|
||||
return &pvAssumeCache{assumeCache: NewAssumeCache(informer, "v1.PersistentVolume", "storageclass", pvStorageClassIndexFunc)}
|
||||
}
|
||||
|
||||
func (c *pvAssumeCache) GetPV(pvName string) (*v1.PersistentVolume, error) {
|
||||
@ -304,8 +333,12 @@ func (c *pvAssumeCache) GetPV(pvName string) (*v1.PersistentVolume, error) {
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
func (c *pvAssumeCache) ListPVs() []*v1.PersistentVolume {
|
||||
objs := c.List()
|
||||
func (c *pvAssumeCache) ListPVs(storageClassName string) []*v1.PersistentVolume {
|
||||
objs := c.List(&v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
StorageClassName: storageClassName,
|
||||
},
|
||||
})
|
||||
pvs := []*v1.PersistentVolume{}
|
||||
for _, obj := range objs {
|
||||
pv, ok := obj.(*v1.PersistentVolume)
|
||||
|
@ -24,8 +24,16 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func makePV(name, version string) *v1.PersistentVolume {
|
||||
return &v1.PersistentVolume{ObjectMeta: metav1.ObjectMeta{Name: name, ResourceVersion: version}}
|
||||
func makePV(name, version, storageClass string) *v1.PersistentVolume {
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
ResourceVersion: version,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
StorageClassName: storageClass,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestAssumePV(t *testing.T) {
|
||||
@ -35,33 +43,38 @@ func TestAssumePV(t *testing.T) {
|
||||
shouldSucceed bool
|
||||
}{
|
||||
"success-same-version": {
|
||||
oldPV: makePV("pv1", "5"),
|
||||
newPV: makePV("pv1", "5"),
|
||||
oldPV: makePV("pv1", "5", ""),
|
||||
newPV: makePV("pv1", "5", ""),
|
||||
shouldSucceed: true,
|
||||
},
|
||||
"success-storageclass-same-version": {
|
||||
oldPV: makePV("pv1", "5", "class1"),
|
||||
newPV: makePV("pv1", "5", "class1"),
|
||||
shouldSucceed: true,
|
||||
},
|
||||
"success-new-higher-version": {
|
||||
oldPV: makePV("pv1", "5"),
|
||||
newPV: makePV("pv1", "6"),
|
||||
oldPV: makePV("pv1", "5", ""),
|
||||
newPV: makePV("pv1", "6", ""),
|
||||
shouldSucceed: true,
|
||||
},
|
||||
"fail-old-not-found": {
|
||||
oldPV: makePV("pv2", "5"),
|
||||
newPV: makePV("pv1", "5"),
|
||||
oldPV: makePV("pv2", "5", ""),
|
||||
newPV: makePV("pv1", "5", ""),
|
||||
shouldSucceed: false,
|
||||
},
|
||||
"fail-new-lower-version": {
|
||||
oldPV: makePV("pv1", "5"),
|
||||
newPV: makePV("pv1", "4"),
|
||||
oldPV: makePV("pv1", "5", ""),
|
||||
newPV: makePV("pv1", "4", ""),
|
||||
shouldSucceed: false,
|
||||
},
|
||||
"fail-new-bad-version": {
|
||||
oldPV: makePV("pv1", "5"),
|
||||
newPV: makePV("pv1", "a"),
|
||||
oldPV: makePV("pv1", "5", ""),
|
||||
newPV: makePV("pv1", "a", ""),
|
||||
shouldSucceed: false,
|
||||
},
|
||||
"fail-old-bad-version": {
|
||||
oldPV: makePV("pv1", "a"),
|
||||
newPV: makePV("pv1", "5"),
|
||||
oldPV: makePV("pv1", "a", ""),
|
||||
newPV: makePV("pv1", "5", ""),
|
||||
shouldSucceed: false,
|
||||
},
|
||||
}
|
||||
@ -107,8 +120,8 @@ func TestRestorePV(t *testing.T) {
|
||||
t.Fatalf("Failed to get internal cache")
|
||||
}
|
||||
|
||||
oldPV := makePV("pv1", "5")
|
||||
newPV := makePV("pv1", "5")
|
||||
oldPV := makePV("pv1", "5", "")
|
||||
newPV := makePV("pv1", "5", "")
|
||||
|
||||
// Restore PV that doesn't exist
|
||||
cache.Restore("nothing")
|
||||
@ -159,21 +172,21 @@ func TestBasicPVCache(t *testing.T) {
|
||||
// Add a bunch of PVs
|
||||
pvs := map[string]*v1.PersistentVolume{}
|
||||
for i := 0; i < 10; i++ {
|
||||
pv := makePV(fmt.Sprintf("test-pv%v", i), "1")
|
||||
pv := makePV(fmt.Sprintf("test-pv%v", i), "1", "")
|
||||
pvs[pv.Name] = pv
|
||||
internal_cache.add(pv)
|
||||
}
|
||||
|
||||
// List them
|
||||
verifyListPVs(t, cache, pvs)
|
||||
verifyListPVs(t, cache, pvs, "")
|
||||
|
||||
// Update a PV
|
||||
updatedPV := makePV("test-pv3", "2")
|
||||
updatedPV := makePV("test-pv3", "2", "")
|
||||
pvs[updatedPV.Name] = updatedPV
|
||||
internal_cache.update(nil, updatedPV)
|
||||
|
||||
// List them
|
||||
verifyListPVs(t, cache, pvs)
|
||||
verifyListPVs(t, cache, pvs, "")
|
||||
|
||||
// Delete a PV
|
||||
deletedPV := pvs["test-pv7"]
|
||||
@ -181,11 +194,57 @@ func TestBasicPVCache(t *testing.T) {
|
||||
internal_cache.delete(deletedPV)
|
||||
|
||||
// List them
|
||||
verifyListPVs(t, cache, pvs)
|
||||
verifyListPVs(t, cache, pvs, "")
|
||||
}
|
||||
|
||||
func verifyListPVs(t *testing.T, cache PVAssumeCache, expectedPVs map[string]*v1.PersistentVolume) {
|
||||
pvList := cache.ListPVs()
|
||||
func TestPVCacheWithStorageClasses(t *testing.T) {
|
||||
cache := NewPVAssumeCache(nil)
|
||||
internal_cache, ok := cache.(*pvAssumeCache)
|
||||
if !ok {
|
||||
t.Fatalf("Failed to get internal cache")
|
||||
}
|
||||
|
||||
// Add a bunch of PVs
|
||||
pvs1 := map[string]*v1.PersistentVolume{}
|
||||
for i := 0; i < 10; i++ {
|
||||
pv := makePV(fmt.Sprintf("test-pv%v", i), "1", "class1")
|
||||
pvs1[pv.Name] = pv
|
||||
internal_cache.add(pv)
|
||||
}
|
||||
|
||||
// Add a bunch of PVs
|
||||
pvs2 := map[string]*v1.PersistentVolume{}
|
||||
for i := 0; i < 10; i++ {
|
||||
pv := makePV(fmt.Sprintf("test2-pv%v", i), "1", "class2")
|
||||
pvs2[pv.Name] = pv
|
||||
internal_cache.add(pv)
|
||||
}
|
||||
|
||||
// List them
|
||||
verifyListPVs(t, cache, pvs1, "class1")
|
||||
verifyListPVs(t, cache, pvs2, "class2")
|
||||
|
||||
// Update a PV
|
||||
updatedPV := makePV("test-pv3", "2", "class1")
|
||||
pvs1[updatedPV.Name] = updatedPV
|
||||
internal_cache.update(nil, updatedPV)
|
||||
|
||||
// List them
|
||||
verifyListPVs(t, cache, pvs1, "class1")
|
||||
verifyListPVs(t, cache, pvs2, "class2")
|
||||
|
||||
// Delete a PV
|
||||
deletedPV := pvs1["test-pv7"]
|
||||
delete(pvs1, deletedPV.Name)
|
||||
internal_cache.delete(deletedPV)
|
||||
|
||||
// List them
|
||||
verifyListPVs(t, cache, pvs1, "class1")
|
||||
verifyListPVs(t, cache, pvs2, "class2")
|
||||
}
|
||||
|
||||
func verifyListPVs(t *testing.T, cache PVAssumeCache, expectedPVs map[string]*v1.PersistentVolume, storageClassName string) {
|
||||
pvList := cache.ListPVs(storageClassName)
|
||||
if len(pvList) != len(expectedPVs) {
|
||||
t.Errorf("ListPVs() returned %v PVs, expected %v", len(pvList), len(expectedPVs))
|
||||
}
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/scheduler_binder.go
generated
vendored
@ -350,10 +350,17 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*bindingI
|
||||
// Sort all the claims by increasing size request to get the smallest fits
|
||||
sort.Sort(byPVCSize(claimsToBind))
|
||||
|
||||
allPVs := b.pvCache.ListPVs()
|
||||
chosenPVs := map[string]*v1.PersistentVolume{}
|
||||
|
||||
for _, bindingInfo := range claimsToBind {
|
||||
// Get storage class name from each PVC
|
||||
storageClassName := ""
|
||||
storageClass := bindingInfo.pvc.Spec.StorageClassName
|
||||
if storageClass != nil {
|
||||
storageClassName = *storageClass
|
||||
}
|
||||
allPVs := b.pvCache.ListPVs(storageClassName)
|
||||
|
||||
// Find a matching PV
|
||||
bindingInfo.pv, err = findMatchingVolume(bindingInfo.pvc, allPVs, node, chosenPVs, true)
|
||||
if err != nil {
|
||||
|
@ -86,7 +86,7 @@ func TestDeleteBindings(t *testing.T) {
|
||||
// Get nil bindings
|
||||
bindings := cache.GetBindings(pod, "node1")
|
||||
if bindings != nil {
|
||||
t.Errorf("Test failed: expected inital nil bindings, got %+v", bindings)
|
||||
t.Errorf("Test failed: expected initial nil bindings, got %+v", bindings)
|
||||
}
|
||||
|
||||
// Delete nothing
|
||||
|
@ -331,7 +331,7 @@ func makeTestPV(name, node, capacity, version string, boundToPVC *v1.PersistentV
|
||||
},
|
||||
}
|
||||
if node != "" {
|
||||
pv.Annotations = getAnnotationWithNodeAffinity("key1", node)
|
||||
pv.Spec.NodeAffinity = getVolumeNodeAffinity("key1", node)
|
||||
}
|
||||
|
||||
if boundToPVC != nil {
|
||||
|
Reference in New Issue
Block a user