mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
Changes to accommodate client-go changes and kube vendor update
to v1.18.0 Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
4c96ad3c85
commit
34fc1d847e
1
vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/BUILD
generated
vendored
@ -45,7 +45,6 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume/testing:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume/util:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_assume_cache.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_assume_cache.go
generated
vendored
@ -127,7 +127,7 @@ func (c *assumeCache) objInfoIndexFunc(obj interface{}) ([]string, error) {
|
||||
return c.indexFunc(objInfo.latestObj)
|
||||
}
|
||||
|
||||
// NewAssumeCache creates an assume cache for genernal objects.
|
||||
// NewAssumeCache creates an assume cache for general objects.
|
||||
func NewAssumeCache(informer cache.SharedIndexInformer, description, indexName string, indexFunc cache.IndexFunc) AssumeCache {
|
||||
c := &assumeCache{
|
||||
description: description,
|
||||
|
79
vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder.go
generated
vendored
79
vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package scheduling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
@ -44,6 +45,24 @@ import (
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// ConflictReason is used for the special strings which explain why
|
||||
// volume binding is impossible for a node.
|
||||
type ConflictReason string
|
||||
|
||||
// ConflictReasons contains all reasons that explain why volume binding is impossible for a node.
|
||||
type ConflictReasons []ConflictReason
|
||||
|
||||
func (reasons ConflictReasons) Len() int { return len(reasons) }
|
||||
func (reasons ConflictReasons) Less(i, j int) bool { return reasons[i] < reasons[j] }
|
||||
func (reasons ConflictReasons) Swap(i, j int) { reasons[i], reasons[j] = reasons[j], reasons[i] }
|
||||
|
||||
const (
|
||||
// ErrReasonBindConflict is used for VolumeBindingNoMatch predicate error.
|
||||
ErrReasonBindConflict ConflictReason = "node(s) didn't find available persistent volumes to bind"
|
||||
// ErrReasonNodeConflict is used for VolumeNodeAffinityConflict predicate error.
|
||||
ErrReasonNodeConflict ConflictReason = "node(s) had volume node affinity conflict"
|
||||
)
|
||||
|
||||
// InTreeToCSITranslator contains methods required to check migratable status
|
||||
// and perform translations from InTree PV's to CSI
|
||||
type InTreeToCSITranslator interface {
|
||||
@ -82,11 +101,11 @@ type SchedulerVolumeBinder interface {
|
||||
// If a PVC is bound, it checks if the PV's NodeAffinity matches the Node.
|
||||
// Otherwise, it tries to find an available PV to bind to the PVC.
|
||||
//
|
||||
// It returns true if all of the Pod's PVCs have matching PVs or can be dynamic provisioned,
|
||||
// and returns true if bound volumes satisfy the PV NodeAffinity.
|
||||
// It returns an error when something went wrong or a list of reasons why the node is
|
||||
// (currently) not usable for the pod.
|
||||
//
|
||||
// This function is called by the volume binding scheduler predicate and can be called in parallel
|
||||
FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisified, boundVolumesSatisfied bool, err error)
|
||||
FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons ConflictReasons, err error)
|
||||
|
||||
// AssumePodVolumes will:
|
||||
// 1. Take the PV matches for unbound PVCs and update the PV cache assuming
|
||||
@ -112,6 +131,9 @@ type SchedulerVolumeBinder interface {
|
||||
|
||||
// GetBindingsCache returns the cache used (if any) to store volume binding decisions.
|
||||
GetBindingsCache() PodBindingCache
|
||||
|
||||
// DeletePodBindings will delete pod's bindingDecisions in podBindingCache.
|
||||
DeletePodBindings(pod *v1.Pod)
|
||||
}
|
||||
|
||||
type volumeBinder struct {
|
||||
@ -162,18 +184,40 @@ func (b *volumeBinder) GetBindingsCache() PodBindingCache {
|
||||
return b.podBindingCache
|
||||
}
|
||||
|
||||
// DeletePodBindings will delete pod's bindingDecisions in podBindingCache.
|
||||
func (b *volumeBinder) DeletePodBindings(pod *v1.Pod) {
|
||||
cache := b.podBindingCache
|
||||
if pod != nil {
|
||||
cache.DeleteBindings(pod)
|
||||
}
|
||||
}
|
||||
|
||||
// FindPodVolumes caches the matching PVs and PVCs to provision per node in podBindingCache.
|
||||
// This method intentionally takes in a *v1.Node object instead of using volumebinder.nodeInformer.
|
||||
// That's necessary because some operations will need to pass in to the predicate fake node objects.
|
||||
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisfied, boundVolumesSatisfied bool, err error) {
|
||||
func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons ConflictReasons, err error) {
|
||||
podName := getPodName(pod)
|
||||
|
||||
// Warning: Below log needs high verbosity as it can be printed several times (#60933).
|
||||
klog.V(5).Infof("FindPodVolumes for pod %q, node %q", podName, node.Name)
|
||||
|
||||
// Initialize to true for pods that don't have volumes
|
||||
unboundVolumesSatisfied = true
|
||||
boundVolumesSatisfied = true
|
||||
// Initialize to true for pods that don't have volumes. These
|
||||
// booleans get translated into reason strings when the function
|
||||
// returns without an error.
|
||||
unboundVolumesSatisfied := true
|
||||
boundVolumesSatisfied := true
|
||||
defer func() {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !boundVolumesSatisfied {
|
||||
reasons = append(reasons, ErrReasonNodeConflict)
|
||||
}
|
||||
if !unboundVolumesSatisfied {
|
||||
reasons = append(reasons, ErrReasonBindConflict)
|
||||
}
|
||||
}()
|
||||
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
metrics.VolumeSchedulingStageLatency.WithLabelValues("predicate").Observe(time.Since(start).Seconds())
|
||||
@ -209,19 +253,19 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
|
||||
// volumes can get bound/provisioned in between calls.
|
||||
boundClaims, claimsToBind, unboundClaimsImmediate, err := b.getPodVolumes(pod)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Immediate claims should be bound
|
||||
if len(unboundClaimsImmediate) > 0 {
|
||||
return false, false, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims")
|
||||
return nil, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims")
|
||||
}
|
||||
|
||||
// Check PV node affinity on bound volumes
|
||||
if len(boundClaims) > 0 {
|
||||
boundVolumesSatisfied, err = b.checkBoundClaims(boundClaims, node, podName)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -236,8 +280,9 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
|
||||
for _, claim := range claimsToBind {
|
||||
if selectedNode, ok := claim.Annotations[pvutil.AnnSelectedNode]; ok {
|
||||
if selectedNode != node.Name {
|
||||
// Fast path, skip unmatched node
|
||||
return false, boundVolumesSatisfied, nil
|
||||
// Fast path, skip unmatched node.
|
||||
unboundVolumesSatisfied = false
|
||||
return
|
||||
}
|
||||
claimsToProvision = append(claimsToProvision, claim)
|
||||
} else {
|
||||
@ -250,7 +295,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
|
||||
var unboundClaims []*v1.PersistentVolumeClaim
|
||||
unboundVolumesSatisfied, matchedBindings, unboundClaims, err = b.findMatchingVolumes(pod, claimsToFindMatching, node)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
return nil, err
|
||||
}
|
||||
claimsToProvision = append(claimsToProvision, unboundClaims...)
|
||||
}
|
||||
@ -259,12 +304,12 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
|
||||
if len(claimsToProvision) > 0 {
|
||||
unboundVolumesSatisfied, provisionedClaims, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return unboundVolumesSatisfied, boundVolumesSatisfied, nil
|
||||
return
|
||||
}
|
||||
|
||||
// AssumePodVolumes will take the cached matching PVs and PVCs to provision
|
||||
@ -423,7 +468,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl
|
||||
// TODO: does it hurt if we make an api call and nothing needs to be updated?
|
||||
claimKey := claimToClaimKey(binding.pvc)
|
||||
klog.V(2).Infof("claim %q bound to volume %q", claimKey, binding.pv.Name)
|
||||
newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(binding.pv)
|
||||
newPV, err := b.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), binding.pv, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", binding.pv.Name, claimKey, err)
|
||||
return err
|
||||
@ -438,7 +483,7 @@ func (b *volumeBinder) bindAPIUpdate(podName string, bindings []*bindingInfo, cl
|
||||
// PV controller is expect to signal back by removing related annotations if actual provisioning fails
|
||||
for i, claim = range claimsToProvision {
|
||||
klog.V(5).Infof("bindAPIUpdate: Pod %q, PVC %q", podName, getPVCName(claim))
|
||||
newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(claim)
|
||||
newClaim, err := b.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claim, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder_fake.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder_fake.go
generated
vendored
@ -20,12 +20,11 @@ import "k8s.io/api/core/v1"
|
||||
|
||||
// FakeVolumeBinderConfig holds configurations for fake volume binder.
|
||||
type FakeVolumeBinderConfig struct {
|
||||
AllBound bool
|
||||
FindUnboundSatsified bool
|
||||
FindBoundSatsified bool
|
||||
FindErr error
|
||||
AssumeErr error
|
||||
BindErr error
|
||||
AllBound bool
|
||||
FindReasons ConflictReasons
|
||||
FindErr error
|
||||
AssumeErr error
|
||||
BindErr error
|
||||
}
|
||||
|
||||
// NewFakeVolumeBinder sets up all the caches needed for the scheduler to make
|
||||
@ -44,8 +43,8 @@ type FakeVolumeBinder struct {
|
||||
}
|
||||
|
||||
// FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes.
|
||||
func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolumesSatisfied, boundVolumesSatsified bool, err error) {
|
||||
return b.config.FindUnboundSatsified, b.config.FindBoundSatsified, b.config.FindErr
|
||||
func (b *FakeVolumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons ConflictReasons, err error) {
|
||||
return b.config.FindReasons, b.config.FindErr
|
||||
}
|
||||
|
||||
// AssumePodVolumes implements SchedulerVolumeBinder.AssumePodVolumes.
|
||||
@ -64,3 +63,6 @@ func (b *FakeVolumeBinder) BindPodVolumes(assumedPod *v1.Pod) error {
|
||||
func (b *FakeVolumeBinder) GetBindingsCache() PodBindingCache {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeletePodBindings implements SchedulerVolumeBinder.DeletePodBindings.
|
||||
func (b *FakeVolumeBinder) DeletePodBindings(pod *v1.Pod) {}
|
||||
|
Reference in New Issue
Block a user