mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
Migrate from snapClient.VolumesnapshotV1alpha1Client to
snapClient.SnapshotV1alpha1Client and also update kube dependency Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
3bc6771df8
commit
22ff5c0911
34
vendor/k8s.io/kubernetes/pkg/controller/volume/events/event.go
generated
vendored
34
vendor/k8s.io/kubernetes/pkg/controller/volume/events/event.go
generated
vendored
@ -1,34 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package events
|
||||
|
||||
const (
|
||||
// volume relevant event reasons
|
||||
FailedBinding = "FailedBinding"
|
||||
VolumeMismatch = "VolumeMismatch"
|
||||
VolumeFailedRecycle = "VolumeFailedRecycle"
|
||||
VolumeRecycled = "VolumeRecycled"
|
||||
RecyclerPod = "RecyclerPod"
|
||||
VolumeDelete = "VolumeDelete"
|
||||
VolumeFailedDelete = "VolumeFailedDelete"
|
||||
ExternalProvisioning = "ExternalProvisioning"
|
||||
ProvisioningFailed = "ProvisioningFailed"
|
||||
ProvisioningCleanupFailed = "ProvisioningCleanupFailed"
|
||||
ProvisioningSucceeded = "ProvisioningSucceeded"
|
||||
WaitForFirstConsumer = "WaitForFirstConsumer"
|
||||
ExternalExpanding = "ExternalExpanding"
|
||||
)
|
211
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/metrics.go
generated
vendored
211
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics/metrics.go
generated
vendored
@ -1,211 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
// Subsystem names.
|
||||
pvControllerSubsystem = "pv_collector"
|
||||
|
||||
// Metric names.
|
||||
boundPVKey = "bound_pv_count"
|
||||
unboundPVKey = "unbound_pv_count"
|
||||
boundPVCKey = "bound_pvc_count"
|
||||
unboundPVCKey = "unbound_pvc_count"
|
||||
|
||||
// Label names.
|
||||
namespaceLabel = "namespace"
|
||||
storageClassLabel = "storage_class"
|
||||
)
|
||||
|
||||
var registerMetrics sync.Once
|
||||
|
||||
// PVLister used to list persistent volumes.
|
||||
type PVLister interface {
|
||||
List() []interface{}
|
||||
}
|
||||
|
||||
// PVCLister used to list persistent volume claims.
|
||||
type PVCLister interface {
|
||||
List() []interface{}
|
||||
}
|
||||
|
||||
// Register all metrics for pv controller.
|
||||
func Register(pvLister PVLister, pvcLister PVCLister) {
|
||||
registerMetrics.Do(func() {
|
||||
prometheus.MustRegister(newPVAndPVCCountCollector(pvLister, pvcLister))
|
||||
prometheus.MustRegister(volumeOperationMetric)
|
||||
prometheus.MustRegister(volumeOperationErrorsMetric)
|
||||
})
|
||||
}
|
||||
|
||||
func newPVAndPVCCountCollector(pvLister PVLister, pvcLister PVCLister) *pvAndPVCCountCollector {
|
||||
return &pvAndPVCCountCollector{pvLister, pvcLister}
|
||||
}
|
||||
|
||||
// Custom collector for current pod and container counts.
|
||||
type pvAndPVCCountCollector struct {
|
||||
// Cache for accessing information about PersistentVolumes.
|
||||
pvLister PVLister
|
||||
// Cache for accessing information about PersistentVolumeClaims.
|
||||
pvcLister PVCLister
|
||||
}
|
||||
|
||||
var (
|
||||
boundPVCountDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("", pvControllerSubsystem, boundPVKey),
|
||||
"Gauge measuring number of persistent volume currently bound",
|
||||
[]string{storageClassLabel}, nil)
|
||||
unboundPVCountDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("", pvControllerSubsystem, unboundPVKey),
|
||||
"Gauge measuring number of persistent volume currently unbound",
|
||||
[]string{storageClassLabel}, nil)
|
||||
|
||||
boundPVCCountDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("", pvControllerSubsystem, boundPVCKey),
|
||||
"Gauge measuring number of persistent volume claim currently bound",
|
||||
[]string{namespaceLabel}, nil)
|
||||
unboundPVCCountDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName("", pvControllerSubsystem, unboundPVCKey),
|
||||
"Gauge measuring number of persistent volume claim currently unbound",
|
||||
[]string{namespaceLabel}, nil)
|
||||
|
||||
volumeOperationMetric = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "volume_operation_total_seconds",
|
||||
Help: "Total volume operation time",
|
||||
},
|
||||
[]string{"plugin_name", "operation_name"})
|
||||
volumeOperationErrorsMetric = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "volume_operation_total_errors",
|
||||
Help: "Total volume operation erros",
|
||||
},
|
||||
[]string{"plugin_name", "operation_name"})
|
||||
)
|
||||
|
||||
func (collector *pvAndPVCCountCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- boundPVCountDesc
|
||||
ch <- unboundPVCountDesc
|
||||
ch <- boundPVCCountDesc
|
||||
ch <- unboundPVCCountDesc
|
||||
}
|
||||
|
||||
func (collector *pvAndPVCCountCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
collector.pvCollect(ch)
|
||||
collector.pvcCollect(ch)
|
||||
}
|
||||
|
||||
func (collector *pvAndPVCCountCollector) pvCollect(ch chan<- prometheus.Metric) {
|
||||
boundNumberByStorageClass := make(map[string]int)
|
||||
unboundNumberByStorageClass := make(map[string]int)
|
||||
for _, pvObj := range collector.pvLister.List() {
|
||||
pv, ok := pvObj.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if pv.Status.Phase == v1.VolumeBound {
|
||||
boundNumberByStorageClass[pv.Spec.StorageClassName]++
|
||||
} else {
|
||||
unboundNumberByStorageClass[pv.Spec.StorageClassName]++
|
||||
}
|
||||
}
|
||||
for storageClassName, number := range boundNumberByStorageClass {
|
||||
metric, err := prometheus.NewConstMetric(
|
||||
boundPVCountDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(number),
|
||||
storageClassName)
|
||||
if err != nil {
|
||||
klog.Warningf("Create bound pv number metric failed: %v", err)
|
||||
continue
|
||||
}
|
||||
ch <- metric
|
||||
}
|
||||
for storageClassName, number := range unboundNumberByStorageClass {
|
||||
metric, err := prometheus.NewConstMetric(
|
||||
unboundPVCountDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(number),
|
||||
storageClassName)
|
||||
if err != nil {
|
||||
klog.Warningf("Create unbound pv number metric failed: %v", err)
|
||||
continue
|
||||
}
|
||||
ch <- metric
|
||||
}
|
||||
}
|
||||
|
||||
func (collector *pvAndPVCCountCollector) pvcCollect(ch chan<- prometheus.Metric) {
|
||||
boundNumberByNamespace := make(map[string]int)
|
||||
unboundNumberByNamespace := make(map[string]int)
|
||||
for _, pvcObj := range collector.pvcLister.List() {
|
||||
pvc, ok := pvcObj.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if pvc.Status.Phase == v1.ClaimBound {
|
||||
boundNumberByNamespace[pvc.Namespace]++
|
||||
} else {
|
||||
unboundNumberByNamespace[pvc.Namespace]++
|
||||
}
|
||||
}
|
||||
for namespace, number := range boundNumberByNamespace {
|
||||
metric, err := prometheus.NewConstMetric(
|
||||
boundPVCCountDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(number),
|
||||
namespace)
|
||||
if err != nil {
|
||||
klog.Warningf("Create bound pvc number metric failed: %v", err)
|
||||
continue
|
||||
}
|
||||
ch <- metric
|
||||
}
|
||||
for namespace, number := range unboundNumberByNamespace {
|
||||
metric, err := prometheus.NewConstMetric(
|
||||
unboundPVCCountDesc,
|
||||
prometheus.GaugeValue,
|
||||
float64(number),
|
||||
namespace)
|
||||
if err != nil {
|
||||
klog.Warningf("Create unbound pvc number metric failed: %v", err)
|
||||
continue
|
||||
}
|
||||
ch <- metric
|
||||
}
|
||||
}
|
||||
|
||||
// RecordVolumeOperationMetric records the latency and errors of volume operations.
|
||||
func RecordVolumeOperationMetric(pluginName, opName string, timeTaken float64, err error) {
|
||||
if pluginName == "" {
|
||||
pluginName = "N/A"
|
||||
}
|
||||
if err != nil {
|
||||
volumeOperationErrorsMetric.WithLabelValues(pluginName, opName).Inc()
|
||||
return
|
||||
}
|
||||
volumeOperationMetric.WithLabelValues(pluginName, opName).Observe(timeTaken)
|
||||
}
|
1695
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go
generated
vendored
1695
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller.go
generated
vendored
File diff suppressed because it is too large
Load Diff
523
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go
generated
vendored
523
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/pv_controller_base.go
generated
vendored
@ -1,523 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
storageinformers "k8s.io/client-go/informers/storage/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
||||
vol "k8s.io/kubernetes/pkg/volume"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// This file contains the controller base functionality, i.e. framework to
|
||||
// process PV/PVC added/updated/deleted events. The real binding, provisioning,
|
||||
// recycling and deleting is done in pv_controller.go
|
||||
|
||||
// ControllerParameters contains arguments for creation of a new
|
||||
// PersistentVolume controller.
|
||||
type ControllerParameters struct {
|
||||
KubeClient clientset.Interface
|
||||
SyncPeriod time.Duration
|
||||
VolumePlugins []vol.VolumePlugin
|
||||
Cloud cloudprovider.Interface
|
||||
ClusterName string
|
||||
VolumeInformer coreinformers.PersistentVolumeInformer
|
||||
ClaimInformer coreinformers.PersistentVolumeClaimInformer
|
||||
ClassInformer storageinformers.StorageClassInformer
|
||||
PodInformer coreinformers.PodInformer
|
||||
NodeInformer coreinformers.NodeInformer
|
||||
EventRecorder record.EventRecorder
|
||||
EnableDynamicProvisioning bool
|
||||
}
|
||||
|
||||
// NewController creates a new PersistentVolume controller
|
||||
func NewController(p ControllerParameters) (*PersistentVolumeController, error) {
|
||||
eventRecorder := p.EventRecorder
|
||||
if eventRecorder == nil {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartLogging(klog.Infof)
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: p.KubeClient.CoreV1().Events("")})
|
||||
eventRecorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"})
|
||||
}
|
||||
|
||||
controller := &PersistentVolumeController{
|
||||
volumes: newPersistentVolumeOrderedIndex(),
|
||||
claims: cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc),
|
||||
kubeClient: p.KubeClient,
|
||||
eventRecorder: eventRecorder,
|
||||
runningOperations: goroutinemap.NewGoRoutineMap(true /* exponentialBackOffOnError */),
|
||||
cloud: p.Cloud,
|
||||
enableDynamicProvisioning: p.EnableDynamicProvisioning,
|
||||
clusterName: p.ClusterName,
|
||||
createProvisionedPVRetryCount: createProvisionedPVRetryCount,
|
||||
createProvisionedPVInterval: createProvisionedPVInterval,
|
||||
claimQueue: workqueue.NewNamed("claims"),
|
||||
volumeQueue: workqueue.NewNamed("volumes"),
|
||||
resyncPeriod: p.SyncPeriod,
|
||||
}
|
||||
|
||||
// Prober is nil because PV is not aware of Flexvolume.
|
||||
if err := controller.volumePluginMgr.InitPlugins(p.VolumePlugins, nil /* prober */, controller); err != nil {
|
||||
return nil, fmt.Errorf("Could not initialize volume plugins for PersistentVolume Controller: %v", err)
|
||||
}
|
||||
|
||||
p.VolumeInformer.Informer().AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) { controller.enqueueWork(controller.volumeQueue, obj) },
|
||||
UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.volumeQueue, newObj) },
|
||||
DeleteFunc: func(obj interface{}) { controller.enqueueWork(controller.volumeQueue, obj) },
|
||||
},
|
||||
)
|
||||
controller.volumeLister = p.VolumeInformer.Lister()
|
||||
controller.volumeListerSynced = p.VolumeInformer.Informer().HasSynced
|
||||
|
||||
p.ClaimInformer.Informer().AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) { controller.enqueueWork(controller.claimQueue, obj) },
|
||||
UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.claimQueue, newObj) },
|
||||
DeleteFunc: func(obj interface{}) { controller.enqueueWork(controller.claimQueue, obj) },
|
||||
},
|
||||
)
|
||||
controller.claimLister = p.ClaimInformer.Lister()
|
||||
controller.claimListerSynced = p.ClaimInformer.Informer().HasSynced
|
||||
|
||||
controller.classLister = p.ClassInformer.Lister()
|
||||
controller.classListerSynced = p.ClassInformer.Informer().HasSynced
|
||||
controller.podLister = p.PodInformer.Lister()
|
||||
controller.podListerSynced = p.PodInformer.Informer().HasSynced
|
||||
controller.NodeLister = p.NodeInformer.Lister()
|
||||
controller.NodeListerSynced = p.NodeInformer.Informer().HasSynced
|
||||
return controller, nil
|
||||
}
|
||||
|
||||
// initializeCaches fills all controller caches with initial data from etcd in
|
||||
// order to have the caches already filled when first addClaim/addVolume to
|
||||
// perform initial synchronization of the controller.
|
||||
func (ctrl *PersistentVolumeController) initializeCaches(volumeLister corelisters.PersistentVolumeLister, claimLister corelisters.PersistentVolumeClaimLister) {
|
||||
volumeList, err := volumeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
|
||||
return
|
||||
}
|
||||
for _, volume := range volumeList {
|
||||
volumeClone := volume.DeepCopy()
|
||||
if _, err = ctrl.storeVolumeUpdate(volumeClone); err != nil {
|
||||
klog.Errorf("error updating volume cache: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
claimList, err := claimLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
klog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
|
||||
return
|
||||
}
|
||||
for _, claim := range claimList {
|
||||
if _, err = ctrl.storeClaimUpdate(claim.DeepCopy()); err != nil {
|
||||
klog.Errorf("error updating claim cache: %v", err)
|
||||
}
|
||||
}
|
||||
klog.V(4).Infof("controller initialized")
|
||||
}
|
||||
|
||||
// enqueueWork adds volume or claim to given work queue.
|
||||
func (ctrl *PersistentVolumeController) enqueueWork(queue workqueue.Interface, obj interface{}) {
|
||||
// Beware of "xxx deleted" events
|
||||
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
|
||||
obj = unknown.Obj
|
||||
}
|
||||
objName, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get key from object: %v", err)
|
||||
return
|
||||
}
|
||||
klog.V(5).Infof("enqueued %q for sync", objName)
|
||||
queue.Add(objName)
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) storeVolumeUpdate(volume interface{}) (bool, error) {
|
||||
return storeObjectUpdate(ctrl.volumes.store, volume, "volume")
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) storeClaimUpdate(claim interface{}) (bool, error) {
|
||||
return storeObjectUpdate(ctrl.claims, claim, "claim")
|
||||
}
|
||||
|
||||
// updateVolume runs in worker thread and handles "volume added",
|
||||
// "volume updated" and "periodic sync" events.
|
||||
func (ctrl *PersistentVolumeController) updateVolume(volume *v1.PersistentVolume) {
|
||||
// Store the new volume version in the cache and do not process it if this
|
||||
// is an old version.
|
||||
new, err := ctrl.storeVolumeUpdate(volume)
|
||||
if err != nil {
|
||||
klog.Errorf("%v", err)
|
||||
}
|
||||
if !new {
|
||||
return
|
||||
}
|
||||
|
||||
err = ctrl.syncVolume(volume)
|
||||
if err != nil {
|
||||
if errors.IsConflict(err) {
|
||||
// Version conflict error happens quite often and the controller
|
||||
// recovers from it easily.
|
||||
klog.V(3).Infof("could not sync volume %q: %+v", volume.Name, err)
|
||||
} else {
|
||||
klog.Errorf("could not sync volume %q: %+v", volume.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deleteVolume runs in worker thread and handles "volume deleted" event.
|
||||
func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume) {
|
||||
_ = ctrl.volumes.store.Delete(volume)
|
||||
klog.V(4).Infof("volume %q deleted", volume.Name)
|
||||
|
||||
if volume.Spec.ClaimRef == nil {
|
||||
return
|
||||
}
|
||||
// sync the claim when its volume is deleted. Explicitly syncing the
|
||||
// claim here in response to volume deletion prevents the claim from
|
||||
// waiting until the next sync period for its Lost status.
|
||||
claimKey := claimrefToClaimKey(volume.Spec.ClaimRef)
|
||||
klog.V(5).Infof("deleteVolume[%s]: scheduling sync of claim %q", volume.Name, claimKey)
|
||||
ctrl.claimQueue.Add(claimKey)
|
||||
}
|
||||
|
||||
// updateClaim runs in worker thread and handles "claim added",
|
||||
// "claim updated" and "periodic sync" events.
|
||||
func (ctrl *PersistentVolumeController) updateClaim(claim *v1.PersistentVolumeClaim) {
|
||||
// Store the new claim version in the cache and do not process it if this is
|
||||
// an old version.
|
||||
new, err := ctrl.storeClaimUpdate(claim)
|
||||
if err != nil {
|
||||
klog.Errorf("%v", err)
|
||||
}
|
||||
if !new {
|
||||
return
|
||||
}
|
||||
err = ctrl.syncClaim(claim)
|
||||
if err != nil {
|
||||
if errors.IsConflict(err) {
|
||||
// Version conflict error happens quite often and the controller
|
||||
// recovers from it easily.
|
||||
klog.V(3).Infof("could not sync claim %q: %+v", claimToClaimKey(claim), err)
|
||||
} else {
|
||||
klog.Errorf("could not sync volume %q: %+v", claimToClaimKey(claim), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deleteClaim runs in worker thread and handles "claim deleted" event.
|
||||
func (ctrl *PersistentVolumeController) deleteClaim(claim *v1.PersistentVolumeClaim) {
|
||||
_ = ctrl.claims.Delete(claim)
|
||||
klog.V(4).Infof("claim %q deleted", claimToClaimKey(claim))
|
||||
|
||||
volumeName := claim.Spec.VolumeName
|
||||
if volumeName == "" {
|
||||
klog.V(5).Infof("deleteClaim[%q]: volume not bound", claimToClaimKey(claim))
|
||||
return
|
||||
}
|
||||
// sync the volume when its claim is deleted. Explicitly sync'ing the
|
||||
// volume here in response to claim deletion prevents the volume from
|
||||
// waiting until the next sync period for its Release.
|
||||
klog.V(5).Infof("deleteClaim[%q]: scheduling sync of volume %s", claimToClaimKey(claim), volumeName)
|
||||
ctrl.volumeQueue.Add(volumeName)
|
||||
}
|
||||
|
||||
// Run starts all of this controller's control loops
|
||||
func (ctrl *PersistentVolumeController) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer ctrl.claimQueue.ShutDown()
|
||||
defer ctrl.volumeQueue.ShutDown()
|
||||
|
||||
klog.Infof("Starting persistent volume controller")
|
||||
defer klog.Infof("Shutting down persistent volume controller")
|
||||
|
||||
if !controller.WaitForCacheSync("persistent volume", stopCh, ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced, ctrl.NodeListerSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
ctrl.initializeCaches(ctrl.volumeLister, ctrl.claimLister)
|
||||
|
||||
go wait.Until(ctrl.resync, ctrl.resyncPeriod, stopCh)
|
||||
go wait.Until(ctrl.volumeWorker, time.Second, stopCh)
|
||||
go wait.Until(ctrl.claimWorker, time.Second, stopCh)
|
||||
|
||||
metrics.Register(ctrl.volumes.store, ctrl.claims)
|
||||
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
// volumeWorker processes items from volumeQueue. It must run only once,
|
||||
// syncVolume is not assured to be reentrant.
|
||||
func (ctrl *PersistentVolumeController) volumeWorker() {
|
||||
workFunc := func() bool {
|
||||
keyObj, quit := ctrl.volumeQueue.Get()
|
||||
if quit {
|
||||
return true
|
||||
}
|
||||
defer ctrl.volumeQueue.Done(keyObj)
|
||||
key := keyObj.(string)
|
||||
klog.V(5).Infof("volumeWorker[%s]", key)
|
||||
|
||||
_, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("error getting name of volume %q to get volume from informer: %v", key, err)
|
||||
return false
|
||||
}
|
||||
volume, err := ctrl.volumeLister.Get(name)
|
||||
if err == nil {
|
||||
// The volume still exists in informer cache, the event must have
|
||||
// been add/update/sync
|
||||
ctrl.updateVolume(volume)
|
||||
return false
|
||||
}
|
||||
if !errors.IsNotFound(err) {
|
||||
klog.V(2).Infof("error getting volume %q from informer: %v", key, err)
|
||||
return false
|
||||
}
|
||||
|
||||
// The volume is not in informer cache, the event must have been
|
||||
// "delete"
|
||||
volumeObj, found, err := ctrl.volumes.store.GetByKey(key)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("error getting volume %q from cache: %v", key, err)
|
||||
return false
|
||||
}
|
||||
if !found {
|
||||
// The controller has already processed the delete event and
|
||||
// deleted the volume from its cache
|
||||
klog.V(2).Infof("deletion of volume %q was already processed", key)
|
||||
return false
|
||||
}
|
||||
volume, ok := volumeObj.(*v1.PersistentVolume)
|
||||
if !ok {
|
||||
klog.Errorf("expected volume, got %+v", volumeObj)
|
||||
return false
|
||||
}
|
||||
ctrl.deleteVolume(volume)
|
||||
return false
|
||||
}
|
||||
for {
|
||||
if quit := workFunc(); quit {
|
||||
klog.Infof("volume worker queue shutting down")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// claimWorker processes items from claimQueue. It must run only once,
|
||||
// syncClaim is not reentrant.
|
||||
func (ctrl *PersistentVolumeController) claimWorker() {
|
||||
workFunc := func() bool {
|
||||
keyObj, quit := ctrl.claimQueue.Get()
|
||||
if quit {
|
||||
return true
|
||||
}
|
||||
defer ctrl.claimQueue.Done(keyObj)
|
||||
key := keyObj.(string)
|
||||
klog.V(5).Infof("claimWorker[%s]", key)
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("error getting namespace & name of claim %q to get claim from informer: %v", key, err)
|
||||
return false
|
||||
}
|
||||
claim, err := ctrl.claimLister.PersistentVolumeClaims(namespace).Get(name)
|
||||
if err == nil {
|
||||
// The claim still exists in informer cache, the event must have
|
||||
// been add/update/sync
|
||||
ctrl.updateClaim(claim)
|
||||
return false
|
||||
}
|
||||
if !errors.IsNotFound(err) {
|
||||
klog.V(2).Infof("error getting claim %q from informer: %v", key, err)
|
||||
return false
|
||||
}
|
||||
|
||||
// The claim is not in informer cache, the event must have been "delete"
|
||||
claimObj, found, err := ctrl.claims.GetByKey(key)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("error getting claim %q from cache: %v", key, err)
|
||||
return false
|
||||
}
|
||||
if !found {
|
||||
// The controller has already processed the delete event and
|
||||
// deleted the claim from its cache
|
||||
klog.V(2).Infof("deletion of claim %q was already processed", key)
|
||||
return false
|
||||
}
|
||||
claim, ok := claimObj.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
klog.Errorf("expected claim, got %+v", claimObj)
|
||||
return false
|
||||
}
|
||||
ctrl.deleteClaim(claim)
|
||||
return false
|
||||
}
|
||||
for {
|
||||
if quit := workFunc(); quit {
|
||||
klog.Infof("claim worker queue shutting down")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// resync supplements short resync period of shared informers - we don't want
|
||||
// all consumers of PV/PVC shared informer to have a short resync period,
|
||||
// therefore we do our own.
|
||||
func (ctrl *PersistentVolumeController) resync() {
|
||||
klog.V(4).Infof("resyncing PV controller")
|
||||
|
||||
pvcs, err := ctrl.claimLister.List(labels.NewSelector())
|
||||
if err != nil {
|
||||
klog.Warningf("cannot list claims: %s", err)
|
||||
return
|
||||
}
|
||||
for _, pvc := range pvcs {
|
||||
ctrl.enqueueWork(ctrl.claimQueue, pvc)
|
||||
}
|
||||
|
||||
pvs, err := ctrl.volumeLister.List(labels.NewSelector())
|
||||
if err != nil {
|
||||
klog.Warningf("cannot list persistent volumes: %s", err)
|
||||
return
|
||||
}
|
||||
for _, pv := range pvs {
|
||||
ctrl.enqueueWork(ctrl.volumeQueue, pv)
|
||||
}
|
||||
}
|
||||
|
||||
// setClaimProvisioner saves
|
||||
// claim.Annotations[annStorageProvisioner] = class.Provisioner
|
||||
func (ctrl *PersistentVolumeController) setClaimProvisioner(claim *v1.PersistentVolumeClaim, provisionerName string) (*v1.PersistentVolumeClaim, error) {
|
||||
if val, ok := claim.Annotations[annStorageProvisioner]; ok && val == provisionerName {
|
||||
// annotation is already set, nothing to do
|
||||
return claim, nil
|
||||
}
|
||||
|
||||
// The volume from method args can be pointing to watcher cache. We must not
|
||||
// modify these, therefore create a copy.
|
||||
claimClone := claim.DeepCopy()
|
||||
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annStorageProvisioner, provisionerName)
|
||||
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claimClone)
|
||||
if err != nil {
|
||||
return newClaim, err
|
||||
}
|
||||
_, err = ctrl.storeClaimUpdate(newClaim)
|
||||
if err != nil {
|
||||
return newClaim, err
|
||||
}
|
||||
return newClaim, nil
|
||||
}
|
||||
|
||||
// Stateless functions
|
||||
|
||||
func getClaimStatusForLogging(claim *v1.PersistentVolumeClaim) string {
|
||||
bound := metav1.HasAnnotation(claim.ObjectMeta, annBindCompleted)
|
||||
boundByController := metav1.HasAnnotation(claim.ObjectMeta, annBoundByController)
|
||||
|
||||
return fmt.Sprintf("phase: %s, bound to: %q, bindCompleted: %v, boundByController: %v", claim.Status.Phase, claim.Spec.VolumeName, bound, boundByController)
|
||||
}
|
||||
|
||||
func getVolumeStatusForLogging(volume *v1.PersistentVolume) string {
|
||||
boundByController := metav1.HasAnnotation(volume.ObjectMeta, annBoundByController)
|
||||
claimName := ""
|
||||
if volume.Spec.ClaimRef != nil {
|
||||
claimName = fmt.Sprintf("%s/%s (uid: %s)", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, volume.Spec.ClaimRef.UID)
|
||||
}
|
||||
return fmt.Sprintf("phase: %s, bound to: %q, boundByController: %v", volume.Status.Phase, claimName, boundByController)
|
||||
}
|
||||
|
||||
// storeObjectUpdate updates given cache with a new object version from Informer
|
||||
// callback (i.e. with events from etcd) or with an object modified by the
|
||||
// controller itself. Returns "true", if the cache was updated, false if the
|
||||
// object is an old version and should be ignored.
|
||||
func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bool, error) {
|
||||
objName, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
}
|
||||
oldObj, found, err := store.Get(obj)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Error finding %s %q in controller cache: %v", className, objName, err)
|
||||
}
|
||||
|
||||
objAccessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !found {
|
||||
// This is a new object
|
||||
klog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion())
|
||||
if err = store.Add(obj); err != nil {
|
||||
return false, fmt.Errorf("Error adding %s %q to controller cache: %v", className, objName, err)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
oldObjAccessor, err := meta.Accessor(oldObj)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
objResourceVersion, err := strconv.ParseInt(objAccessor.GetResourceVersion(), 10, 64)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Error parsing ResourceVersion %q of %s %q: %s", objAccessor.GetResourceVersion(), className, objName, err)
|
||||
}
|
||||
oldObjResourceVersion, err := strconv.ParseInt(oldObjAccessor.GetResourceVersion(), 10, 64)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Error parsing old ResourceVersion %q of %s %q: %s", oldObjAccessor.GetResourceVersion(), className, objName, err)
|
||||
}
|
||||
|
||||
// Throw away only older version, let the same version pass - we do want to
|
||||
// get periodic sync events.
|
||||
if oldObjResourceVersion > objResourceVersion {
|
||||
klog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion())
|
||||
return false, nil
|
||||
}
|
||||
|
||||
klog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion())
|
||||
if err = store.Update(obj); err != nil {
|
||||
return false, fmt.Errorf("Error updating %s %q in controller cache: %v", className, objName, err)
|
||||
}
|
||||
return true, nil
|
||||
}
|
103
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util.go
generated
vendored
103
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util.go
generated
vendored
@ -1,103 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
"k8s.io/client-go/tools/reference"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
)
|
||||
|
||||
// IsDelayBindingMode checks if claim is in delay binding mode.
|
||||
func IsDelayBindingMode(claim *v1.PersistentVolumeClaim, classLister storagelisters.StorageClassLister) (bool, error) {
|
||||
className := v1helper.GetPersistentVolumeClaimClass(claim)
|
||||
if className == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
class, err := classLister.Get(className)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if class.VolumeBindingMode == nil {
|
||||
return false, fmt.Errorf("VolumeBindingMode not set for StorageClass %q", className)
|
||||
}
|
||||
|
||||
return *class.VolumeBindingMode == storage.VolumeBindingWaitForFirstConsumer, nil
|
||||
}
|
||||
|
||||
// GetBindVolumeToClaim returns a new volume which is bound to given claim. In
|
||||
// addition, it returns a bool which indicates whether we made modification on
|
||||
// original volume.
|
||||
func GetBindVolumeToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, bool, error) {
|
||||
dirty := false
|
||||
|
||||
// Check if the volume was already bound (either by user or by controller)
|
||||
shouldSetBoundByController := false
|
||||
if !IsVolumeBoundToClaim(volume, claim) {
|
||||
shouldSetBoundByController = true
|
||||
}
|
||||
|
||||
// The volume from method args can be pointing to watcher cache. We must not
|
||||
// modify these, therefore create a copy.
|
||||
volumeClone := volume.DeepCopy()
|
||||
|
||||
// Bind the volume to the claim if it is not bound yet
|
||||
if volume.Spec.ClaimRef == nil ||
|
||||
volume.Spec.ClaimRef.Name != claim.Name ||
|
||||
volume.Spec.ClaimRef.Namespace != claim.Namespace ||
|
||||
volume.Spec.ClaimRef.UID != claim.UID {
|
||||
|
||||
claimRef, err := reference.GetReference(scheme.Scheme, claim)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("Unexpected error getting claim reference: %v", err)
|
||||
}
|
||||
volumeClone.Spec.ClaimRef = claimRef
|
||||
dirty = true
|
||||
}
|
||||
|
||||
// Set annBoundByController if it is not set yet
|
||||
if shouldSetBoundByController && !metav1.HasAnnotation(volumeClone.ObjectMeta, annBoundByController) {
|
||||
metav1.SetMetaDataAnnotation(&volumeClone.ObjectMeta, annBoundByController, "yes")
|
||||
dirty = true
|
||||
}
|
||||
|
||||
return volumeClone, dirty, nil
|
||||
}
|
||||
|
||||
// IsVolumeBoundToClaim returns true, if given volume is pre-bound or bound
|
||||
// to specific claim. Both claim.Name and claim.Namespace must be equal.
|
||||
// If claim.UID is present in volume.Spec.ClaimRef, it must be equal too.
|
||||
func IsVolumeBoundToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) bool {
|
||||
if volume.Spec.ClaimRef == nil {
|
||||
return false
|
||||
}
|
||||
if claim.Name != volume.Spec.ClaimRef.Name || claim.Namespace != volume.Spec.ClaimRef.Namespace {
|
||||
return false
|
||||
}
|
||||
if volume.Spec.ClaimRef.UID != "" && claim.UID != volume.Spec.ClaimRef.UID {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -18,97 +18,132 @@ package persistentvolume
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
"k8s.io/client-go/tools/reference"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// persistentVolumeOrderedIndex is a cache.Store that keeps persistent volumes
|
||||
// indexed by AccessModes and ordered by storage capacity.
|
||||
type persistentVolumeOrderedIndex struct {
|
||||
store cache.Indexer
|
||||
}
|
||||
const (
|
||||
// AnnBindCompleted Annotation applies to PVCs. It indicates that the lifecycle
|
||||
// of the PVC has passed through the initial setup. This information changes how
|
||||
// we interpret some observations of the state of the objects. Value of this
|
||||
// Annotation does not matter.
|
||||
AnnBindCompleted = "pv.kubernetes.io/bind-completed"
|
||||
|
||||
func newPersistentVolumeOrderedIndex() persistentVolumeOrderedIndex {
|
||||
return persistentVolumeOrderedIndex{cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"accessmodes": accessModesIndexFunc})}
|
||||
}
|
||||
// AnnBoundByController annotation applies to PVs and PVCs. It indicates that
|
||||
// the binding (PV->PVC or PVC->PV) was installed by the controller. The
|
||||
// absence of this annotation means the binding was done by the user (i.e.
|
||||
// pre-bound). Value of this annotation does not matter.
|
||||
// External PV binders must bind PV the same way as PV controller, otherwise PV
|
||||
// controller may not handle it correctly.
|
||||
AnnBoundByController = "pv.kubernetes.io/bound-by-controller"
|
||||
|
||||
// accessModesIndexFunc is an indexing function that returns a persistent
|
||||
// volume's AccessModes as a string
|
||||
func accessModesIndexFunc(obj interface{}) ([]string, error) {
|
||||
if pv, ok := obj.(*v1.PersistentVolume); ok {
|
||||
modes := v1helper.GetAccessModesAsString(pv.Spec.AccessModes)
|
||||
return []string{modes}, nil
|
||||
}
|
||||
return []string{""}, fmt.Errorf("object is not a persistent volume: %v", obj)
|
||||
}
|
||||
// AnnSelectedNode annotation is added to a PVC that has been triggered by scheduler to
|
||||
// be dynamically provisioned. Its value is the name of the selected node.
|
||||
AnnSelectedNode = "volume.kubernetes.io/selected-node"
|
||||
|
||||
// listByAccessModes returns all volumes with the given set of
|
||||
// AccessModeTypes. The list is unsorted!
|
||||
func (pvIndex *persistentVolumeOrderedIndex) listByAccessModes(modes []v1.PersistentVolumeAccessMode) ([]*v1.PersistentVolume, error) {
|
||||
pv := &v1.PersistentVolume{
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
AccessModes: modes,
|
||||
},
|
||||
// NotSupportedProvisioner is a special provisioner name which can be set
|
||||
// in storage class to indicate dynamic provisioning is not supported by
|
||||
// the storage.
|
||||
NotSupportedProvisioner = "kubernetes.io/no-provisioner"
|
||||
|
||||
// AnnDynamicallyProvisioned annotation is added to a PV that has been dynamically provisioned by
|
||||
// Kubernetes. Its value is name of volume plugin that created the volume.
|
||||
// It serves both user (to show where a PV comes from) and Kubernetes (to
|
||||
// recognize dynamically provisioned PVs in its decisions).
|
||||
AnnDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by"
|
||||
|
||||
// AnnStorageProvisioner annotation is added to a PVC that is supposed to be dynamically
|
||||
// provisioned. Its value is name of volume plugin that is supposed to provision
|
||||
// a volume for this PVC.
|
||||
AnnStorageProvisioner = "volume.beta.kubernetes.io/storage-provisioner"
|
||||
)
|
||||
|
||||
// IsDelayBindingMode checks if claim is in delay binding mode.
|
||||
func IsDelayBindingMode(claim *v1.PersistentVolumeClaim, classLister storagelisters.StorageClassLister) (bool, error) {
|
||||
className := v1helper.GetPersistentVolumeClaimClass(claim)
|
||||
if className == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
objs, err := pvIndex.store.Index("accessmodes", pv)
|
||||
class, err := classLister.Get(className)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
volumes := make([]*v1.PersistentVolume, len(objs))
|
||||
for i, obj := range objs {
|
||||
volumes[i] = obj.(*v1.PersistentVolume)
|
||||
if class.VolumeBindingMode == nil {
|
||||
return false, fmt.Errorf("VolumeBindingMode not set for StorageClass %q", className)
|
||||
}
|
||||
|
||||
return volumes, nil
|
||||
return *class.VolumeBindingMode == storage.VolumeBindingWaitForFirstConsumer, nil
|
||||
}
|
||||
|
||||
// find returns the nearest PV from the ordered list or nil if a match is not found
|
||||
func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *v1.PersistentVolumeClaim, delayBinding bool) (*v1.PersistentVolume, error) {
|
||||
// PVs are indexed by their access modes to allow easier searching. Each
|
||||
// index is the string representation of a set of access modes. There is a
|
||||
// finite number of possible sets and PVs will only be indexed in one of
|
||||
// them (whichever index matches the PV's modes).
|
||||
//
|
||||
// A request for resources will always specify its desired access modes.
|
||||
// Any matching PV must have at least that number of access modes, but it
|
||||
// can have more. For example, a user asks for ReadWriteOnce but a GCEPD
|
||||
// is available, which is ReadWriteOnce+ReadOnlyMany.
|
||||
//
|
||||
// Searches are performed against a set of access modes, so we can attempt
|
||||
// not only the exact matching modes but also potential matches (the GCEPD
|
||||
// example above).
|
||||
allPossibleModes := pvIndex.allPossibleMatchingAccessModes(claim.Spec.AccessModes)
|
||||
// GetBindVolumeToClaim returns a new volume which is bound to given claim. In
|
||||
// addition, it returns a bool which indicates whether we made modification on
|
||||
// original volume.
|
||||
func GetBindVolumeToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, bool, error) {
|
||||
dirty := false
|
||||
|
||||
for _, modes := range allPossibleModes {
|
||||
volumes, err := pvIndex.listByAccessModes(modes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bestVol, err := findMatchingVolume(claim, volumes, nil /* node for topology binding*/, nil /* exclusion map */, delayBinding)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if bestVol != nil {
|
||||
return bestVol, nil
|
||||
}
|
||||
// Check if the volume was already bound (either by user or by controller)
|
||||
shouldSetBoundByController := false
|
||||
if !IsVolumeBoundToClaim(volume, claim) {
|
||||
shouldSetBoundByController = true
|
||||
}
|
||||
return nil, nil
|
||||
|
||||
// The volume from method args can be pointing to watcher cache. We must not
|
||||
// modify these, therefore create a copy.
|
||||
volumeClone := volume.DeepCopy()
|
||||
|
||||
// Bind the volume to the claim if it is not bound yet
|
||||
if volume.Spec.ClaimRef == nil ||
|
||||
volume.Spec.ClaimRef.Name != claim.Name ||
|
||||
volume.Spec.ClaimRef.Namespace != claim.Namespace ||
|
||||
volume.Spec.ClaimRef.UID != claim.UID {
|
||||
|
||||
claimRef, err := reference.GetReference(scheme.Scheme, claim)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("Unexpected error getting claim reference: %v", err)
|
||||
}
|
||||
volumeClone.Spec.ClaimRef = claimRef
|
||||
dirty = true
|
||||
}
|
||||
|
||||
// Set AnnBoundByController if it is not set yet
|
||||
if shouldSetBoundByController && !metav1.HasAnnotation(volumeClone.ObjectMeta, AnnBoundByController) {
|
||||
metav1.SetMetaDataAnnotation(&volumeClone.ObjectMeta, AnnBoundByController, "yes")
|
||||
dirty = true
|
||||
}
|
||||
|
||||
return volumeClone, dirty, nil
|
||||
}
|
||||
|
||||
// findMatchingVolume goes through the list of volumes to find the best matching volume
|
||||
// IsVolumeBoundToClaim returns true, if given volume is pre-bound or bound
|
||||
// to specific claim. Both claim.Name and claim.Namespace must be equal.
|
||||
// If claim.UID is present in volume.Spec.ClaimRef, it must be equal too.
|
||||
func IsVolumeBoundToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) bool {
|
||||
if volume.Spec.ClaimRef == nil {
|
||||
return false
|
||||
}
|
||||
if claim.Name != volume.Spec.ClaimRef.Name || claim.Namespace != volume.Spec.ClaimRef.Namespace {
|
||||
return false
|
||||
}
|
||||
if volume.Spec.ClaimRef.UID != "" && claim.UID != volume.Spec.ClaimRef.UID {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// FindMatchingVolume goes through the list of volumes to find the best matching volume
|
||||
// for the claim.
|
||||
//
|
||||
// This function is used by both the PV controller and scheduler.
|
||||
@ -122,7 +157,7 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *v1.PersistentVol
|
||||
// excludedVolumes is only used in the scheduler path, and is needed for evaluating multiple
|
||||
// unbound PVCs for a single Pod at one time. As each PVC finds a matching PV, the chosen
|
||||
// PV needs to be excluded from future matching.
|
||||
func findMatchingVolume(
|
||||
func FindMatchingVolume(
|
||||
claim *v1.PersistentVolumeClaim,
|
||||
volumes []*v1.PersistentVolume,
|
||||
node *v1.Node,
|
||||
@ -159,7 +194,7 @@ func findMatchingVolume(
|
||||
volumeQty := volume.Spec.Capacity[v1.ResourceStorage]
|
||||
|
||||
// check if volumeModes do not match (feature gate protected)
|
||||
isMismatch, err := checkVolumeModeMismatches(&claim.Spec, &volume.Spec)
|
||||
isMismatch, err := CheckVolumeModeMismatches(&claim.Spec, &volume.Spec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error checking if volumeMode was a mismatch: %v", err)
|
||||
}
|
||||
@ -237,7 +272,7 @@ func findMatchingVolume(
|
||||
if node != nil {
|
||||
// Scheduler path
|
||||
// Check that the access modes match
|
||||
if !checkAccessModes(claim, volume) {
|
||||
if !CheckAccessModes(claim, volume) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
@ -258,9 +293,9 @@ func findMatchingVolume(
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// checkVolumeModeMismatches is a convenience method that checks volumeMode for PersistentVolume
|
||||
// CheckVolumeModeMismatches is a convenience method that checks volumeMode for PersistentVolume
|
||||
// and PersistentVolumeClaims
|
||||
func checkVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) (bool, error) {
|
||||
func CheckVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) (bool, error) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
return false, nil
|
||||
}
|
||||
@ -278,93 +313,8 @@ func checkVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1
|
||||
return requestedVolumeMode != pvVolumeMode, nil
|
||||
}
|
||||
|
||||
// findBestMatchForClaim is a convenience method that finds a volume by the claim's AccessModes and requests for Storage
|
||||
func (pvIndex *persistentVolumeOrderedIndex) findBestMatchForClaim(claim *v1.PersistentVolumeClaim, delayBinding bool) (*v1.PersistentVolume, error) {
|
||||
return pvIndex.findByClaim(claim, delayBinding)
|
||||
}
|
||||
|
||||
// allPossibleMatchingAccessModes returns an array of AccessMode arrays that
|
||||
// can satisfy a user's requested modes.
|
||||
//
|
||||
// see comments in the Find func above regarding indexing.
|
||||
//
|
||||
// allPossibleMatchingAccessModes gets all stringified accessmodes from the
|
||||
// index and returns all those that contain at least all of the requested
|
||||
// mode.
|
||||
//
|
||||
// For example, assume the index contains 2 types of PVs where the stringified
|
||||
// accessmodes are:
|
||||
//
|
||||
// "RWO,ROX" -- some number of GCEPDs
|
||||
// "RWO,ROX,RWX" -- some number of NFS volumes
|
||||
//
|
||||
// A request for RWO could be satisfied by both sets of indexed volumes, so
|
||||
// allPossibleMatchingAccessModes returns:
|
||||
//
|
||||
// [][]v1.PersistentVolumeAccessMode {
|
||||
// []v1.PersistentVolumeAccessMode {
|
||||
// v1.ReadWriteOnce, v1.ReadOnlyMany,
|
||||
// },
|
||||
// []v1.PersistentVolumeAccessMode {
|
||||
// v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany,
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// A request for RWX can be satisfied by only one set of indexed volumes, so
|
||||
// the return is:
|
||||
//
|
||||
// [][]v1.PersistentVolumeAccessMode {
|
||||
// []v1.PersistentVolumeAccessMode {
|
||||
// v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany,
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// This func returns modes with ascending levels of modes to give the user
|
||||
// what is closest to what they actually asked for.
|
||||
func (pvIndex *persistentVolumeOrderedIndex) allPossibleMatchingAccessModes(requestedModes []v1.PersistentVolumeAccessMode) [][]v1.PersistentVolumeAccessMode {
|
||||
matchedModes := [][]v1.PersistentVolumeAccessMode{}
|
||||
keys := pvIndex.store.ListIndexFuncValues("accessmodes")
|
||||
for _, key := range keys {
|
||||
indexedModes := v1helper.GetAccessModesFromString(key)
|
||||
if volumeutil.AccessModesContainedInAll(indexedModes, requestedModes) {
|
||||
matchedModes = append(matchedModes, indexedModes)
|
||||
}
|
||||
}
|
||||
|
||||
// sort by the number of modes in each array with the fewest number of
|
||||
// modes coming first. this allows searching for volumes by the minimum
|
||||
// number of modes required of the possible matches.
|
||||
sort.Sort(byAccessModes{matchedModes})
|
||||
return matchedModes
|
||||
}
|
||||
|
||||
// byAccessModes is used to order access modes by size, with the fewest modes first
|
||||
type byAccessModes struct {
|
||||
modes [][]v1.PersistentVolumeAccessMode
|
||||
}
|
||||
|
||||
func (c byAccessModes) Less(i, j int) bool {
|
||||
return len(c.modes[i]) < len(c.modes[j])
|
||||
}
|
||||
|
||||
func (c byAccessModes) Swap(i, j int) {
|
||||
c.modes[i], c.modes[j] = c.modes[j], c.modes[i]
|
||||
}
|
||||
|
||||
func (c byAccessModes) Len() int {
|
||||
return len(c.modes)
|
||||
}
|
||||
|
||||
func claimToClaimKey(claim *v1.PersistentVolumeClaim) string {
|
||||
return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name)
|
||||
}
|
||||
|
||||
func claimrefToClaimKey(claimref *v1.ObjectReference) string {
|
||||
return fmt.Sprintf("%s/%s", claimref.Namespace, claimref.Name)
|
||||
}
|
||||
|
||||
// Returns true if PV satisfies all the PVC's requested AccessModes
|
||||
func checkAccessModes(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) bool {
|
||||
// CheckAccessModes returns true if PV satisfies all the PVC's requested AccessModes
|
||||
func CheckAccessModes(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) bool {
|
||||
pvModesMap := map[v1.PersistentVolumeAccessMode]bool{}
|
||||
for _, mode := range volume.Spec.AccessModes {
|
||||
pvModesMap[mode] = true
|
||||
@ -378,3 +328,26 @@ func checkAccessModes(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolu
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func claimToClaimKey(claim *v1.PersistentVolumeClaim) string {
|
||||
return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name)
|
||||
}
|
||||
|
||||
// GetVolumeNodeAffinity returns a VolumeNodeAffinity for given key and value.
|
||||
func GetVolumeNodeAffinity(key string, value string) *v1.VolumeNodeAffinity {
|
||||
return &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: key,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{value},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
138
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go
generated
vendored
138
vendor/k8s.io/kubernetes/pkg/controller/volume/persistentvolume/volume_host.go
generated
vendored
@ -1,138 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
vol "k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/subpath"
|
||||
)
|
||||
|
||||
// VolumeHost interface implementation for PersistentVolumeController.
|
||||
|
||||
var _ vol.VolumeHost = &PersistentVolumeController{}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetPluginDir(pluginName string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetVolumeDevicePluginDir(pluginName string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetPodsDir() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetPodPluginDir(podUID types.UID, pluginName string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetPodVolumeDeviceDir(ppodUID types.UID, pluginName string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetKubeClient() clientset.Interface {
|
||||
return ctrl.kubeClient
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) NewWrapperMounter(volName string, spec vol.Spec, pod *v1.Pod, opts vol.VolumeOptions) (vol.Mounter, error) {
|
||||
return nil, fmt.Errorf("PersistentVolumeController.NewWrapperMounter is not implemented")
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) NewWrapperUnmounter(volName string, spec vol.Spec, podUID types.UID) (vol.Unmounter, error) {
|
||||
return nil, fmt.Errorf("PersistentVolumeController.NewWrapperMounter is not implemented")
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetCloudProvider() cloudprovider.Interface {
|
||||
return ctrl.cloud
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetMounter(pluginName string) mount.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetHostName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetHostIP() (net.IP, error) {
|
||||
return nil, fmt.Errorf("PersistentVolumeController.GetHostIP() is not implemented")
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetNodeAllocatable() (v1.ResourceList, error) {
|
||||
return v1.ResourceList{}, nil
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetSecretFunc() func(namespace, name string) (*v1.Secret, error) {
|
||||
return func(_, _ string) (*v1.Secret, error) {
|
||||
return nil, fmt.Errorf("GetSecret unsupported in PersistentVolumeController")
|
||||
}
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error) {
|
||||
return func(_, _ string) (*v1.ConfigMap, error) {
|
||||
return nil, fmt.Errorf("GetConfigMap unsupported in PersistentVolumeController")
|
||||
}
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetServiceAccountTokenFunc() func(_, _ string, _ *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
|
||||
return func(_, _ string, _ *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
|
||||
return nil, fmt.Errorf("GetServiceAccountToken unsupported in PersistentVolumeController")
|
||||
}
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) DeleteServiceAccountTokenFunc() func(types.UID) {
|
||||
return func(types.UID) {
|
||||
klog.Errorf("DeleteServiceAccountToken unsupported in PersistentVolumeController")
|
||||
}
|
||||
}
|
||||
|
||||
func (adc *PersistentVolumeController) GetExec(pluginName string) mount.Exec {
|
||||
return mount.NewOsExec()
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetNodeLabels() (map[string]string, error) {
|
||||
return nil, fmt.Errorf("GetNodeLabels() unsupported in PersistentVolumeController")
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetNodeName() types.NodeName {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetEventRecorder() record.EventRecorder {
|
||||
return ctrl.eventRecorder
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetSubpather() subpath.Interface {
|
||||
// No volume plugin needs Subpaths in PV controller.
|
||||
return nil
|
||||
}
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
package scheduling
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -127,7 +127,8 @@ func (c *assumeCache) objInfoIndexFunc(obj interface{}) ([]string, error) {
|
||||
return c.indexFunc(objInfo.latestObj)
|
||||
}
|
||||
|
||||
func NewAssumeCache(informer cache.SharedIndexInformer, description, indexName string, indexFunc cache.IndexFunc) *assumeCache {
|
||||
// NewAssumeCache creates an assume cache for genernal objects.
|
||||
func NewAssumeCache(informer cache.SharedIndexInformer, description, indexName string, indexFunc cache.IndexFunc) AssumeCache {
|
||||
c := &assumeCache{
|
||||
description: description,
|
||||
indexFunc: indexFunc,
|
||||
@ -344,7 +345,7 @@ type PVAssumeCache interface {
|
||||
}
|
||||
|
||||
type pvAssumeCache struct {
|
||||
*assumeCache
|
||||
AssumeCache
|
||||
}
|
||||
|
||||
func pvStorageClassIndexFunc(obj interface{}) ([]string, error) {
|
||||
@ -354,8 +355,9 @@ func pvStorageClassIndexFunc(obj interface{}) ([]string, error) {
|
||||
return []string{""}, fmt.Errorf("object is not a v1.PersistentVolume: %v", obj)
|
||||
}
|
||||
|
||||
// NewPVAssumeCache creates a PV assume cache.
|
||||
func NewPVAssumeCache(informer cache.SharedIndexInformer) PVAssumeCache {
|
||||
return &pvAssumeCache{assumeCache: NewAssumeCache(informer, "v1.PersistentVolume", "storageclass", pvStorageClassIndexFunc)}
|
||||
return &pvAssumeCache{NewAssumeCache(informer, "v1.PersistentVolume", "storageclass", pvStorageClassIndexFunc)}
|
||||
}
|
||||
|
||||
func (c *pvAssumeCache) GetPV(pvName string) (*v1.PersistentVolume, error) {
|
||||
@ -411,11 +413,12 @@ type PVCAssumeCache interface {
|
||||
}
|
||||
|
||||
type pvcAssumeCache struct {
|
||||
*assumeCache
|
||||
AssumeCache
|
||||
}
|
||||
|
||||
// NewPVCAssumeCache creates a PVC assume cache.
|
||||
func NewPVCAssumeCache(informer cache.SharedIndexInformer) PVCAssumeCache {
|
||||
return &pvcAssumeCache{assumeCache: NewAssumeCache(informer, "v1.PersistentVolumeClaim", "namespace", cache.MetaNamespaceIndexFunc)}
|
||||
return &pvcAssumeCache{NewAssumeCache(informer, "v1.PersistentVolumeClaim", "namespace", cache.MetaNamespaceIndexFunc)}
|
||||
}
|
||||
|
||||
func (c *pvcAssumeCache) GetPVC(pvcKey string) (*v1.PersistentVolumeClaim, error) {
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
package scheduling
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@ -24,6 +24,7 @@ import (
|
||||
const VolumeSchedulerSubsystem = "scheduler_volume"
|
||||
|
||||
var (
|
||||
// VolumeBindingRequestSchedulerBinderCache tracks the number of volume binder cache operations.
|
||||
VolumeBindingRequestSchedulerBinderCache = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: VolumeSchedulerSubsystem,
|
||||
@ -32,6 +33,7 @@ var (
|
||||
},
|
||||
[]string{"operation"},
|
||||
)
|
||||
// VolumeSchedulingStageLatency tracks the latency of volume scheduling operations.
|
||||
VolumeSchedulingStageLatency = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Subsystem: VolumeSchedulerSubsystem,
|
||||
@ -41,6 +43,7 @@ var (
|
||||
},
|
||||
[]string{"operation"},
|
||||
)
|
||||
// VolumeSchedulingStageFailed tracks the number of failed volume scheduling operations.
|
||||
VolumeSchedulingStageFailed = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: VolumeSchedulerSubsystem,
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
package scheduling
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -32,6 +32,7 @@ import (
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
"k8s.io/klog"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
@ -139,15 +140,6 @@ func (b *volumeBinder) GetBindingsCache() PodBindingCache {
|
||||
return b.podBindingCache
|
||||
}
|
||||
|
||||
func podHasClaims(pod *v1.Pod) bool {
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
if vol.PersistentVolumeClaim != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// FindPodVolumes caches the matching PVs and PVCs to provision per node in podBindingCache.
|
||||
// This method intentionally takes in a *v1.Node object instead of using volumebinder.nodeInformer.
|
||||
// That's necessary because some operations will need to pass in to the predicate fake node objects.
|
||||
@ -168,32 +160,27 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
|
||||
}
|
||||
}()
|
||||
|
||||
if !podHasClaims(pod) {
|
||||
// Fast path
|
||||
return unboundVolumesSatisfied, boundVolumesSatisfied, nil
|
||||
}
|
||||
|
||||
var (
|
||||
matchedClaims []*bindingInfo
|
||||
matchedBindings []*bindingInfo
|
||||
provisionedClaims []*v1.PersistentVolumeClaim
|
||||
)
|
||||
defer func() {
|
||||
// We recreate bindings for each new schedule loop.
|
||||
if len(matchedClaims) == 0 && len(provisionedClaims) == 0 {
|
||||
if len(matchedBindings) == 0 && len(provisionedClaims) == 0 {
|
||||
// Clear cache if no claims to bind or provision for this node.
|
||||
b.podBindingCache.ClearBindings(pod, node.Name)
|
||||
return
|
||||
}
|
||||
// Although we do not distinguish nil from empty in this function, for
|
||||
// easier testing, we normalize empty to nil.
|
||||
if len(matchedClaims) == 0 {
|
||||
matchedClaims = nil
|
||||
if len(matchedBindings) == 0 {
|
||||
matchedBindings = nil
|
||||
}
|
||||
if len(provisionedClaims) == 0 {
|
||||
provisionedClaims = nil
|
||||
}
|
||||
// Mark cache with all matched and provisioned claims for this node
|
||||
b.podBindingCache.UpdateBindings(pod, node.Name, matchedClaims, provisionedClaims)
|
||||
b.podBindingCache.UpdateBindings(pod, node.Name, matchedBindings, provisionedClaims)
|
||||
}()
|
||||
|
||||
// The pod's volumes need to be processed in one call to avoid the race condition where
|
||||
@ -225,7 +212,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
|
||||
|
||||
// Filter out claims to provision
|
||||
for _, claim := range claimsToBind {
|
||||
if selectedNode, ok := claim.Annotations[annSelectedNode]; ok {
|
||||
if selectedNode, ok := claim.Annotations[pvutil.AnnSelectedNode]; ok {
|
||||
if selectedNode != node.Name {
|
||||
// Fast path, skip unmatched node
|
||||
return false, boundVolumesSatisfied, nil
|
||||
@ -239,7 +226,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
|
||||
// Find matching volumes
|
||||
if len(claimsToFindMatching) > 0 {
|
||||
var unboundClaims []*v1.PersistentVolumeClaim
|
||||
unboundVolumesSatisfied, matchedClaims, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, node)
|
||||
unboundVolumesSatisfied, matchedBindings, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, node)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
@ -288,7 +275,7 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al
|
||||
// Assume PV
|
||||
newBindings := []*bindingInfo{}
|
||||
for _, binding := range claimsToBind {
|
||||
newPV, dirty, err := GetBindVolumeToClaim(binding.pv, binding.pvc)
|
||||
newPV, dirty, err := pvutil.GetBindVolumeToClaim(binding.pv, binding.pvc)
|
||||
klog.V(5).Infof("AssumePodVolumes: GetBindVolumeToClaim for pod %q, PV %q, PVC %q. newPV %p, dirty %v, err: %v",
|
||||
podName,
|
||||
binding.pv.Name,
|
||||
@ -317,7 +304,7 @@ func (b *volumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (al
|
||||
// The claims from method args can be pointing to watcher cache. We must not
|
||||
// modify these, therefore create a copy.
|
||||
claimClone := claim.DeepCopy()
|
||||
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, annSelectedNode, nodeName)
|
||||
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, pvutil.AnnSelectedNode, nodeName)
|
||||
err = b.pvcCache.Assume(claimClone)
|
||||
if err != nil {
|
||||
b.revertAssumedPVs(newBindings)
|
||||
@ -410,14 +397,14 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl
|
||||
// TODO: does it hurt if we make an api call and nothing needs to be updated?
|
||||
claimKey := claimToClaimKey(binding.pvc)
|
||||
klog.V(2).Infof("claim %q bound to volume %q", claimKey, binding.pv.Name)
|
||||
if newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(binding.pv); err != nil {
|
||||
newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(binding.pv)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", binding.pv.Name, claimKey, err)
|
||||
return err
|
||||
} else {
|
||||
klog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", binding.pv.Name, claimKey)
|
||||
// Save updated object from apiserver for later checking.
|
||||
binding.pv = newPV
|
||||
}
|
||||
klog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", binding.pv.Name, claimKey)
|
||||
// Save updated object from apiserver for later checking.
|
||||
binding.pv = newPV
|
||||
lastProcessedBinding++
|
||||
}
|
||||
|
||||
@ -425,12 +412,12 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl
|
||||
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
|
||||
for i, claim = range claimsToProvision {
|
||||
klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim))
|
||||
if newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim); err != nil {
|
||||
newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim)
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
// Save updated object from apiserver for later checking.
|
||||
claimsToProvision[i] = newClaim
|
||||
}
|
||||
// Save updated object from apiserver for later checking.
|
||||
claimsToProvision[i] = newClaim
|
||||
lastProcessedProvisioning++
|
||||
}
|
||||
|
||||
@ -525,7 +512,7 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*bindingInfo, claim
|
||||
if pvc.Annotations == nil {
|
||||
return false, fmt.Errorf("selectedNode annotation reset for PVC %q", pvc.Name)
|
||||
}
|
||||
selectedNode := pvc.Annotations[annSelectedNode]
|
||||
selectedNode := pvc.Annotations[pvutil.AnnSelectedNode]
|
||||
if selectedNode != pod.Spec.NodeName {
|
||||
return false, fmt.Errorf("selectedNode annotation value %q not set to scheduled node %q", selectedNode, pod.Spec.NodeName)
|
||||
}
|
||||
@ -540,9 +527,8 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*bindingInfo, claim
|
||||
// And if PV does not exist because it's deleted, PVC will
|
||||
// be unbound eventually.
|
||||
return false, nil
|
||||
} else {
|
||||
return false, fmt.Errorf("failed to get pv %q from cache: %v", pvc.Spec.VolumeName, err)
|
||||
}
|
||||
return false, fmt.Errorf("failed to get pv %q from cache: %v", pvc.Spec.VolumeName, err)
|
||||
}
|
||||
if err := volumeutil.CheckNodeAffinity(pv, node.Labels); err != nil {
|
||||
return false, fmt.Errorf("pv %q node affinity doesn't match node %q: %v", pv.Name, node.Name, err)
|
||||
@ -596,7 +582,7 @@ func (b *volumeBinder) isPVCBound(namespace, pvcName string) (bool, *v1.Persiste
|
||||
}
|
||||
|
||||
func (b *volumeBinder) isPVCFullyBound(pvc *v1.PersistentVolumeClaim) bool {
|
||||
return pvc.Spec.VolumeName != "" && metav1.HasAnnotation(pvc.ObjectMeta, annBindCompleted)
|
||||
return pvc.Spec.VolumeName != "" && metav1.HasAnnotation(pvc.ObjectMeta, pvutil.AnnBindCompleted)
|
||||
}
|
||||
|
||||
// arePodVolumesBound returns true if all volumes are fully bound
|
||||
@ -612,10 +598,10 @@ func (b *volumeBinder) arePodVolumesBound(pod *v1.Pod) bool {
|
||||
|
||||
// getPodVolumes returns a pod's PVCs separated into bound, unbound with delayed binding (including provisioning)
|
||||
// and unbound with immediate binding (including prebound)
|
||||
func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentVolumeClaim, unboundClaims []*v1.PersistentVolumeClaim, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) {
|
||||
func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentVolumeClaim, unboundClaimsDelayBinding []*v1.PersistentVolumeClaim, unboundClaimsImmediate []*v1.PersistentVolumeClaim, err error) {
|
||||
boundClaims = []*v1.PersistentVolumeClaim{}
|
||||
unboundClaimsImmediate = []*v1.PersistentVolumeClaim{}
|
||||
unboundClaims = []*v1.PersistentVolumeClaim{}
|
||||
unboundClaimsDelayBinding = []*v1.PersistentVolumeClaim{}
|
||||
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
volumeBound, pvc, err := b.isVolumeBound(pod.Namespace, &vol)
|
||||
@ -628,14 +614,14 @@ func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV
|
||||
if volumeBound {
|
||||
boundClaims = append(boundClaims, pvc)
|
||||
} else {
|
||||
delayBindingMode, err := IsDelayBindingMode(pvc, b.classLister)
|
||||
delayBindingMode, err := pvutil.IsDelayBindingMode(pvc, b.classLister)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
// Prebound PVCs are treated as unbound immediate binding
|
||||
if delayBindingMode && pvc.Spec.VolumeName == "" {
|
||||
// Scheduler path
|
||||
unboundClaims = append(unboundClaims, pvc)
|
||||
unboundClaimsDelayBinding = append(unboundClaimsDelayBinding, pvc)
|
||||
} else {
|
||||
// !delayBindingMode || pvc.Spec.VolumeName != ""
|
||||
// Immediate binding should have already been bound
|
||||
@ -643,7 +629,7 @@ func (b *volumeBinder) getPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV
|
||||
}
|
||||
}
|
||||
}
|
||||
return boundClaims, unboundClaims, unboundClaimsImmediate, nil
|
||||
return boundClaims, unboundClaimsDelayBinding, unboundClaimsImmediate, nil
|
||||
}
|
||||
|
||||
func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node *v1.Node, podName string) (bool, error) {
|
||||
@ -668,7 +654,7 @@ func (b *volumeBinder) checkBoundClaims(claims []*v1.PersistentVolumeClaim, node
|
||||
|
||||
// findMatchingVolumes tries to find matching volumes for given claims,
|
||||
// and return unbound claims for further provision.
|
||||
func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (foundMatches bool, matchedClaims []*bindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) {
|
||||
func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.PersistentVolumeClaim, node *v1.Node) (foundMatches bool, bindings []*bindingInfo, unboundClaims []*v1.PersistentVolumeClaim, err error) {
|
||||
podName := getPodName(pod)
|
||||
// Sort all the claims by increasing size request to get the smallest fits
|
||||
sort.Sort(byPVCSize(claimsToBind))
|
||||
@ -676,7 +662,6 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi
|
||||
chosenPVs := map[string]*v1.PersistentVolume{}
|
||||
|
||||
foundMatches = true
|
||||
matchedClaims = []*bindingInfo{}
|
||||
|
||||
for _, pvc := range claimsToBind {
|
||||
// Get storage class name from each PVC
|
||||
@ -689,7 +674,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi
|
||||
pvcName := getPVCName(pvc)
|
||||
|
||||
// Find a matching PV
|
||||
pv, err := findMatchingVolume(pvc, allPVs, node, chosenPVs, true)
|
||||
pv, err := pvutil.FindMatchingVolume(pvc, allPVs, node, chosenPVs, true)
|
||||
if err != nil {
|
||||
return false, nil, nil, err
|
||||
}
|
||||
@ -702,7 +687,7 @@ func (b *volumeBinder) findMatchingVolumes(pod *v1.Pod, claimsToBind []*v1.Persi
|
||||
|
||||
// matching PV needs to be excluded so we don't select it again
|
||||
chosenPVs[pv.Name] = pv
|
||||
matchedClaims = append(matchedClaims, &bindingInfo{pv: pv, pvc: pvc})
|
||||
bindings = append(bindings, &bindingInfo{pv: pv, pvc: pvc})
|
||||
klog.V(5).Infof("Found matching PV %q for PVC %q on node %q for pod %q", pv.Name, pvcName, node.Name, podName)
|
||||
}
|
||||
|
||||
@ -732,7 +717,7 @@ func (b *volumeBinder) checkVolumeProvisions(pod *v1.Pod, claimsToProvision []*v
|
||||
return false, nil, fmt.Errorf("failed to find storage class %q", className)
|
||||
}
|
||||
provisioner := class.Provisioner
|
||||
if provisioner == "" || provisioner == notSupportedProvisioner {
|
||||
if provisioner == "" || provisioner == pvutil.NotSupportedProvisioner {
|
||||
klog.V(4).Infof("storage class %q of claim %q does not support dynamic provisioning", className, pvcName)
|
||||
return false, nil, nil
|
||||
}
|
||||
@ -790,3 +775,7 @@ func (a byPVCSize) Less(i, j int) bool {
|
||||
// return true if iSize is less than jSize
|
||||
return iSize.Cmp(jSize) == -1
|
||||
}
|
||||
|
||||
func claimToClaimKey(claim *v1.PersistentVolumeClaim) string {
|
||||
return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name)
|
||||
}
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
package scheduling
|
||||
|
||||
import (
|
||||
"sync"
|
||||
@ -22,7 +22,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// podBindingCache stores PV binding decisions per pod per node.
|
||||
// PodBindingCache stores PV binding decisions per pod per node.
|
||||
// Pod entries are removed when the Pod is deleted or updated to
|
||||
// no longer be schedulable.
|
||||
type PodBindingCache interface {
|
||||
@ -69,6 +69,7 @@ type nodeDecision struct {
|
||||
provisionings []*v1.PersistentVolumeClaim
|
||||
}
|
||||
|
||||
// NewPodBindingCache creates a pod binding cache.
|
||||
func NewPodBindingCache() PodBindingCache {
|
||||
return &podBindingCache{bindingDecisions: map[string]nodeDecisions{}}
|
||||
}
|
@ -14,10 +14,11 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
package scheduling
|
||||
|
||||
import "k8s.io/api/core/v1"
|
||||
|
||||
// FakeVolumeBinderConfig holds configurations for fake volume binder.
|
||||
type FakeVolumeBinderConfig struct {
|
||||
AllBound bool
|
||||
FindUnboundSatsified bool
|
||||
@ -27,7 +28,7 @@ type FakeVolumeBinderConfig struct {
|
||||
BindErr error
|
||||
}
|
||||
|
||||
// NewVolumeBinder sets up all the caches needed for the scheduler to make
|
||||
// NewFakeVolumeBinder sets up all the caches needed for the scheduler to make
|
||||
// topology-aware volume binding decisions.
|
||||
func NewFakeVolumeBinder(config *FakeVolumeBinderConfig) *FakeVolumeBinder {
|
||||
return &FakeVolumeBinder{
|
||||
@ -35,26 +36,31 @@ func NewFakeVolumeBinder(config *FakeVolumeBinderConfig) *FakeVolumeBinder {
|
||||
}
|
||||
}
|
||||
|
||||
// FakeVolumeBinder represents a fake volume binder for testing.
|
||||
type FakeVolumeBinder struct {
|
||||
config *FakeVolumeBinderConfig
|
||||
AssumeCalled bool
|
||||
BindCalled bool
|
||||
}
|
||||
|
||||
// FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes.
|
||||
func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisfied, boundVolumesSatsified bool, err error) {
|
||||
return b.config.FindUnboundSatsified, b.config.FindBoundSatsified, b.config.FindErr
|
||||
}
|
||||
|
||||
// AssumePodVolumes implements SchedulerVolumeBinder.AssumePodVolumes.
|
||||
func (b *FakeVolumeBinder) AssumePodVolumes(assumedPod *v1.Pod, nodeName string) (bool, error) {
|
||||
b.AssumeCalled = true
|
||||
return b.config.AllBound, b.config.AssumeErr
|
||||
}
|
||||
|
||||
// BindPodVolumes implements SchedulerVolumeBinder.BindPodVolumes.
|
||||
func (b *FakeVolumeBinder) BindPodVolumes(assumedPod *v1.Pod) error {
|
||||
b.BindCalled = true
|
||||
return b.config.BindErr
|
||||
}
|
||||
|
||||
// GetBindingsCache implements SchedulerVolumeBinder.GetBindingsCache.
|
||||
func (b *FakeVolumeBinder) GetBindingsCache() PodBindingCache {
|
||||
return nil
|
||||
}
|
Reference in New Issue
Block a user