Migrate from snapClient.VolumesnapshotV1alpha1Client to

snapClient.SnapshotV1alpha1Client and also update kube dependency

Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
Humble Chirammal
2019-06-24 14:38:09 +05:30
committed by mergify[bot]
parent 3bc6771df8
commit 22ff5c0911
1031 changed files with 34242 additions and 177906 deletions

View File

@ -20,6 +20,7 @@ import (
"fmt"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/rand"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
@ -29,16 +30,20 @@ import (
// CSIMaxVolumeLimitChecker defines predicate needed for counting CSI volumes
type CSIMaxVolumeLimitChecker struct {
pvInfo PersistentVolumeInfo
pvcInfo PersistentVolumeClaimInfo
pvInfo PersistentVolumeInfo
pvcInfo PersistentVolumeClaimInfo
scInfo StorageClassInfo
randomVolumeIDPrefix string
}
// NewCSIMaxVolumeLimitPredicate returns a predicate for counting CSI volumes
func NewCSIMaxVolumeLimitPredicate(
pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) FitPredicate {
pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, scInfo StorageClassInfo) FitPredicate {
c := &CSIMaxVolumeLimitChecker{
pvInfo: pvInfo,
pvcInfo: pvcInfo,
pvInfo: pvInfo,
pvcInfo: pvcInfo,
scInfo: scInfo,
randomVolumeIDPrefix: rand.String(32),
}
return c.attachableLimitPredicate
}
@ -129,28 +134,70 @@ func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes(
continue
}
pvName := pvc.Spec.VolumeName
// TODO - the actual handling of unbound PVCs will be fixed by late binding design.
if pvName == "" {
klog.V(4).Infof("Persistent volume had no name for claim %s/%s", namespace, pvcName)
driverName, volumeHandle := c.getCSIDriver(pvc)
// if we can't find driver name or volume handle - we don't count this volume.
if driverName == "" || volumeHandle == "" {
continue
}
pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName)
if err != nil {
klog.V(4).Infof("Unable to look up PV info for PVC %s/%s and PV %s", namespace, pvcName, pvName)
continue
}
csiSource := pv.Spec.PersistentVolumeSource.CSI
if csiSource == nil {
klog.V(4).Infof("Not considering non-CSI volume %s/%s", namespace, pvcName)
continue
}
driverName := csiSource.Driver
volumeLimitKey := volumeutil.GetCSIAttachLimitKey(driverName)
result[csiSource.VolumeHandle] = volumeLimitKey
result[volumeHandle] = volumeLimitKey
}
return nil
}
func (c *CSIMaxVolumeLimitChecker) getCSIDriver(pvc *v1.PersistentVolumeClaim) (string, string) {
pvName := pvc.Spec.VolumeName
namespace := pvc.Namespace
pvcName := pvc.Name
placeHolderCSIDriver := ""
placeHolderHandle := ""
if pvName == "" {
klog.V(5).Infof("Persistent volume had no name for claim %s/%s", namespace, pvcName)
return c.getDriverNameFromSC(pvc)
}
pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName)
if err != nil {
klog.V(4).Infof("Unable to look up PV info for PVC %s/%s and PV %s", namespace, pvcName, pvName)
// If we can't fetch PV associated with PVC, may be it got deleted
// or PVC was prebound to a PVC that hasn't been created yet.
// fallback to using StorageClass for volume counting
return c.getDriverNameFromSC(pvc)
}
csiSource := pv.Spec.PersistentVolumeSource.CSI
if csiSource == nil {
klog.V(5).Infof("Not considering non-CSI volume %s/%s", namespace, pvcName)
return placeHolderCSIDriver, placeHolderHandle
}
return csiSource.Driver, csiSource.VolumeHandle
}
func (c *CSIMaxVolumeLimitChecker) getDriverNameFromSC(pvc *v1.PersistentVolumeClaim) (string, string) {
namespace := pvc.Namespace
pvcName := pvc.Name
scName := pvc.Spec.StorageClassName
placeHolderCSIDriver := ""
placeHolderHandle := ""
if scName == nil {
// if StorageClass is not set or found, then PVC must be using immediate binding mode
// and hence it must be bound before scheduling. So it is safe to not count it.
klog.V(5).Infof("pvc %s/%s has no storageClass", namespace, pvcName)
return placeHolderCSIDriver, placeHolderHandle
}
storageClass, err := c.scInfo.GetStorageClassInfo(*scName)
if err != nil {
klog.V(5).Infof("no storage %s found for pvc %s/%s", *scName, namespace, pvcName)
return placeHolderCSIDriver, placeHolderHandle
}
// We use random prefix to avoid conflict with volume-ids. If PVC is bound in the middle
// predicate and there is another pod(on same node) that uses same volume then we will overcount
// the volume and consider both volumes as different.
volumeHandle := fmt.Sprintf("%s-%s/%s", c.randomVolumeIDPrefix, namespace, pvcName)
return storageClass.Provisioner, volumeHandle
}

View File

@ -55,13 +55,6 @@ type topologyPair struct {
value string
}
// Note that predicateMetadata and matchingPodAntiAffinityTerm need to be declared in the same file
// due to the way declarations are processed in predicate declaration unit tests.
type matchingPodAntiAffinityTerm struct {
term *v1.PodAffinityTerm
node *v1.Node
}
type podSet map[*v1.Pod]struct{}
type topologyPairSet map[topologyPair]struct{}
@ -107,13 +100,10 @@ var _ PredicateMetadata = &predicateMetadata{}
// and used to modify the return values of PredicateMetadataProducer
type predicateMetadataProducer func(pm *predicateMetadata)
var predicateMetaProducerRegisterLock sync.Mutex
var predicateMetadataProducers = make(map[string]predicateMetadataProducer)
// RegisterPredicateMetadataProducer registers a PredicateMetadataProducer.
func RegisterPredicateMetadataProducer(predicateName string, precomp predicateMetadataProducer) {
predicateMetaProducerRegisterLock.Lock()
defer predicateMetaProducerRegisterLock.Unlock()
predicateMetadataProducers[predicateName] = precomp
}
@ -399,6 +389,8 @@ func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*s
}
}
ctx, cancel := context.WithCancel(context.Background())
processNode := func(i int) {
nodeInfo := nodeInfoMap[allNodeNames[i]]
node := nodeInfo.Node()
@ -410,12 +402,13 @@ func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*s
existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, node)
if err != nil {
catchError(err)
cancel()
return
}
appendTopologyPairsMaps(existingPodTopologyMaps)
}
}
workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode)
workqueue.ParallelizeUntil(ctx, 16, len(allNodeNames), processNode)
return topologyMaps, firstError
}
@ -464,6 +457,8 @@ func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[s
}
antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity)
ctx, cancel := context.WithCancel(context.Background())
processNode := func(i int) {
nodeInfo := nodeInfoMap[allNodeNames[i]]
node := nodeInfo.Node()
@ -489,6 +484,7 @@ func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[s
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
if err != nil {
catchError(err)
cancel()
return
}
if priorityutil.PodMatchesTermsNamespaceAndSelector(existingPod, namespaces, selector) {
@ -503,7 +499,7 @@ func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[s
appendResult(node.Name, nodeTopologyPairsAffinityPodsMaps, nodeTopologyPairsAntiAffinityPodsMaps)
}
}
workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode)
workqueue.ParallelizeUntil(ctx, 16, len(allNodeNames), processNode)
return topologyPairsAffinityPodsMaps, topologyPairsAntiAffinityPodsMaps, firstError
}

View File

@ -173,11 +173,6 @@ func Ordering() []string {
return predicatesOrdering
}
// SetPredicatesOrdering sets the ordering of predicates.
func SetPredicatesOrdering(names []string) {
predicatesOrdering = names
}
// GetPersistentVolumeInfo returns a persistent volume object by PV ID.
func (c *CachedPersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) {
return c.Get(pvID)
@ -687,7 +682,7 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeI
}
}
}
return false, nil, fmt.Errorf("PersistentVolumeClaim is not bound: %q", pvcName)
return false, nil, fmt.Errorf("PersistentVolumeClaim was not found: %q", pvcName)
}
pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName)
@ -696,7 +691,7 @@ func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta PredicateMetadata, nodeI
}
if pv == nil {
return false, nil, fmt.Errorf("PersistentVolume not found: %q", pvName)
return false, nil, fmt.Errorf("PersistentVolume was not found: %q", pvName)
}
for k, v := range pv.ObjectMeta.Labels {
@ -978,16 +973,18 @@ func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata)
return
}
pm.serviceAffinityInUse = true
var errSvc, errList error
var err error
// Store services which match the pod.
pm.serviceAffinityMatchingPodServices, errSvc = s.serviceLister.GetPodServices(pm.pod)
selector := CreateSelectorFromLabels(pm.pod.Labels)
allMatches, errList := s.podLister.List(selector)
// In the future maybe we will return them as part of the function.
if errSvc != nil || errList != nil {
klog.Errorf("Some Error were found while precomputing svc affinity: \nservices:%v , \npods:%v", errSvc, errList)
pm.serviceAffinityMatchingPodServices, err = s.serviceLister.GetPodServices(pm.pod)
if err != nil {
klog.Errorf("Error precomputing service affinity: could not list services: %v", err)
}
selector := CreateSelectorFromLabels(pm.pod.Labels)
allMatches, err := s.podLister.List(selector)
if err != nil {
klog.Errorf("Error precomputing service affinity: could not list pods: %v", err)
}
// consider only the pods that belong to the same namespace
pm.serviceAffinityMatchingPodList = FilterPodsByNamespace(allMatches, pm.pod.Namespace)
}
@ -1093,6 +1090,9 @@ func PodFitsHostPorts(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulerno
// search two arrays and return true if they have at least one common element; return false otherwise
func haveOverlap(a1, a2 []string) bool {
if len(a1) > len(a2) {
a1, a2 = a2, a1
}
m := map[string]bool{}
for _, val := range a1 {
@ -1293,11 +1293,11 @@ func getMatchingAntiAffinityTopologyPairsOfPod(newPod *v1.Pod, existingPod *v1.P
topologyMaps := newTopologyPairsMaps()
for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) {
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term)
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
if err != nil {
return nil, err
}
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term)
if priorityutil.PodMatchesTermsNamespaceAndSelector(newPod, namespaces, selector) {
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
@ -1315,7 +1315,8 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1.
existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName)
if err != nil {
if apierrors.IsNotFound(err) {
klog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
klog.Errorf("Pod %s has NodeName %q but node is not found",
podName(existingPod), existingPod.Spec.NodeName)
continue
}
return nil, err
@ -1344,12 +1345,12 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta
// present in nodeInfo. Pods on other nodes pass the filter.
filteredPods, err := c.podLister.FilteredList(nodeInfo.Filter, labels.Everything())
if err != nil {
errMessage := fmt.Sprintf("Failed to get all pods, %+v", err)
errMessage := fmt.Sprintf("Failed to get all pods: %v", err)
klog.Error(errMessage)
return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage)
}
if topologyMaps, err = c.getMatchingAntiAffinityTopologyPairsOfPods(pod, filteredPods); err != nil {
errMessage := fmt.Sprintf("Failed to get all terms that pod %+v matches, err: %+v", podName(pod), err)
errMessage := fmt.Sprintf("Failed to get all terms that match pod %s: %v", podName(pod), err)
klog.Error(errMessage)
return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage)
}
@ -1454,7 +1455,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
if !matchFound && len(affinityTerms) > 0 {
affTermsMatch, termsSelectorMatch, err := c.podMatchesPodAffinityTerms(pod, targetPod, nodeInfo, affinityTerms)
if err != nil {
errMessage := fmt.Sprintf("Cannot schedule pod %+v onto node %v, because of PodAffinity, err: %v", podName(pod), node.Name, err)
errMessage := fmt.Sprintf("Cannot schedule pod %s onto node %s, because of PodAffinity: %v", podName(pod), node.Name, err)
klog.Error(errMessage)
return ErrPodAffinityRulesNotMatch, errors.New(errMessage)
}

View File

@ -77,3 +77,13 @@ func portsConflict(existingPorts schedulernodeinfo.HostPortInfo, wantPorts []*v1
return false
}
// SetPredicatesOrderingDuringTest sets the predicatesOrdering to the specified
// value, and returns a function that restores the original value.
func SetPredicatesOrderingDuringTest(value []string) func() {
origVal := predicatesOrdering
predicatesOrdering = value
return func() {
predicatesOrdering = origVal
}
}