mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
74
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/BUILD
generated
vendored
74
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/BUILD
generated
vendored
@ -15,31 +15,29 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/resourcequota",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/v1:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/quota:go_default_library",
|
||||
"//pkg/quota/evaluator/core:go_default_library",
|
||||
"//pkg/quota/generic:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//pkg/quota/v1:go_default_library",
|
||||
"//pkg/quota/v1/evaluator/core:go_default_library",
|
||||
"//pkg/quota/v1/generic:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -49,20 +47,20 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/quota:go_default_library",
|
||||
"//pkg/quota/generic:go_default_library",
|
||||
"//pkg/quota/install:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//pkg/quota/v1:go_default_library",
|
||||
"//pkg/quota/v1/generic:go_default_library",
|
||||
"//pkg/quota/v1/install:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
104
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go
generated
vendored
104
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
@ -39,10 +39,8 @@ import (
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
quota "k8s.io/kubernetes/pkg/quota/v1"
|
||||
)
|
||||
|
||||
// NamespacedResourcesFunc knows how to discover namespaced resources.
|
||||
@ -161,11 +159,14 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) (*Resou
|
||||
|
||||
rq.quotaMonitor = qm
|
||||
|
||||
// do initial quota monitor setup
|
||||
// do initial quota monitor setup. If we have a discovery failure here, it's ok. We'll discover more resources when a later sync happens.
|
||||
resources, err := GetQuotableResources(options.DiscoveryFunc)
|
||||
if err != nil {
|
||||
if discovery.IsGroupDiscoveryFailedError(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("initial discovery check failure, continuing and counting on future sync update: %v", err))
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = qm.SyncMonitors(resources); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("initial monitor sync has error: %v", err))
|
||||
}
|
||||
@ -179,7 +180,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) (*Resou
|
||||
|
||||
// enqueueAll is called at the fullResyncPeriod interval to force a full recalculation of quota usage statistics
|
||||
func (rq *ResourceQuotaController) enqueueAll() {
|
||||
defer glog.V(4).Infof("Resource quota controller queued all resource quota for full calculation of usage")
|
||||
defer klog.V(4).Infof("Resource quota controller queued all resource quota for full calculation of usage")
|
||||
rqs, err := rq.rqLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to enqueue all - error listing resource quotas: %v", err))
|
||||
@ -199,7 +200,7 @@ func (rq *ResourceQuotaController) enqueueAll() {
|
||||
func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
klog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
return
|
||||
}
|
||||
rq.queue.Add(key)
|
||||
@ -208,7 +209,7 @@ func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) {
|
||||
func (rq *ResourceQuotaController) addQuota(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
klog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -223,7 +224,7 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) {
|
||||
// if we declared a constraint that has no usage (which this controller can calculate, prioritize it)
|
||||
for constraint := range resourceQuota.Status.Hard {
|
||||
if _, usageFound := resourceQuota.Status.Used[constraint]; !usageFound {
|
||||
matchedResources := []api.ResourceName{api.ResourceName(constraint)}
|
||||
matchedResources := []v1.ResourceName{v1.ResourceName(constraint)}
|
||||
for _, evaluator := range rq.registry.List() {
|
||||
if intersection := evaluator.MatchingResources(matchedResources); len(intersection) > 0 {
|
||||
rq.missingUsageQueue.Add(key)
|
||||
@ -260,7 +261,7 @@ func (rq *ResourceQuotaController) worker(queue workqueue.RateLimitingInterface)
|
||||
return func() {
|
||||
for {
|
||||
if quit := workFunc(); quit {
|
||||
glog.Infof("resource quota controller worker shutting down")
|
||||
klog.Infof("resource quota controller worker shutting down")
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -272,8 +273,8 @@ func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer rq.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting resource quota controller")
|
||||
defer glog.Infof("Shutting down resource quota controller")
|
||||
klog.Infof("Starting resource quota controller")
|
||||
defer klog.Infof("Shutting down resource quota controller")
|
||||
|
||||
if rq.quotaMonitor != nil {
|
||||
go rq.quotaMonitor.Run(stopCh)
|
||||
@ -297,7 +298,7 @@ func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
|
||||
func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err error) {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@ -306,36 +307,31 @@ func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err err
|
||||
}
|
||||
quota, err := rq.rqLister.ResourceQuotas(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.Infof("Resource quota has been deleted %v", key)
|
||||
klog.Infof("Resource quota has been deleted %v", key)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
glog.Infof("Unable to retrieve resource quota %v from store: %v", key, err)
|
||||
klog.Infof("Unable to retrieve resource quota %v from store: %v", key, err)
|
||||
return err
|
||||
}
|
||||
return rq.syncResourceQuota(quota)
|
||||
}
|
||||
|
||||
// syncResourceQuota runs a complete sync of resource quota status across all known kinds
|
||||
func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota *v1.ResourceQuota) (err error) {
|
||||
func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQuota) (err error) {
|
||||
// quota is dirty if any part of spec hard limits differs from the status hard limits
|
||||
dirty := !apiequality.Semantic.DeepEqual(v1ResourceQuota.Spec.Hard, v1ResourceQuota.Status.Hard)
|
||||
|
||||
resourceQuota := api.ResourceQuota{}
|
||||
if err := k8s_api_v1.Convert_v1_ResourceQuota_To_core_ResourceQuota(v1ResourceQuota, &resourceQuota, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
dirty := !apiequality.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard)
|
||||
|
||||
// dirty tracks if the usage status differs from the previous sync,
|
||||
// if so, we send a new usage with latest status
|
||||
// if this is our first sync, it will be dirty by default, since we need track usage
|
||||
dirty = dirty || (resourceQuota.Status.Hard == nil || resourceQuota.Status.Used == nil)
|
||||
dirty = dirty || resourceQuota.Status.Hard == nil || resourceQuota.Status.Used == nil
|
||||
|
||||
used := api.ResourceList{}
|
||||
used := v1.ResourceList{}
|
||||
if resourceQuota.Status.Used != nil {
|
||||
used = quota.Add(api.ResourceList{}, resourceQuota.Status.Used)
|
||||
used = quota.Add(v1.ResourceList{}, resourceQuota.Status.Used)
|
||||
}
|
||||
hardLimits := quota.Add(api.ResourceList{}, resourceQuota.Spec.Hard)
|
||||
hardLimits := quota.Add(v1.ResourceList{}, resourceQuota.Spec.Hard)
|
||||
|
||||
newUsage, err := quota.CalculateUsage(resourceQuota.Namespace, resourceQuota.Spec.Scopes, hardLimits, rq.registry, resourceQuota.Spec.ScopeSelector)
|
||||
if err != nil {
|
||||
@ -351,28 +347,17 @@ func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota *v1.Resourc
|
||||
|
||||
// Create a usage object that is based on the quota resource version that will handle updates
|
||||
// by default, we preserve the past usage observation, and set hard to the current spec
|
||||
usage := api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: resourceQuota.Name,
|
||||
Namespace: resourceQuota.Namespace,
|
||||
ResourceVersion: resourceQuota.ResourceVersion,
|
||||
Labels: resourceQuota.Labels,
|
||||
Annotations: resourceQuota.Annotations},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: hardLimits,
|
||||
Used: used,
|
||||
},
|
||||
usage := resourceQuota.DeepCopy()
|
||||
usage.Status = v1.ResourceQuotaStatus{
|
||||
Hard: hardLimits,
|
||||
Used: used,
|
||||
}
|
||||
|
||||
dirty = dirty || !quota.Equals(usage.Status.Used, resourceQuota.Status.Used)
|
||||
|
||||
// there was a change observed by this controller that requires we update quota
|
||||
if dirty {
|
||||
v1Usage := &v1.ResourceQuota{}
|
||||
if err := k8s_api_v1.Convert_core_ResourceQuota_To_v1_ResourceQuota(&usage, v1Usage, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(v1Usage)
|
||||
_, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(usage)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -403,12 +388,7 @@ func (rq *ResourceQuotaController) replenishQuota(groupResource schema.GroupReso
|
||||
// only queue those quotas that are tracking a resource associated with this kind.
|
||||
for i := range resourceQuotas {
|
||||
resourceQuota := resourceQuotas[i]
|
||||
internalResourceQuota := &api.ResourceQuota{}
|
||||
if err := k8s_api_v1.Convert_v1_ResourceQuota_To_core_ResourceQuota(resourceQuota, internalResourceQuota, nil); err != nil {
|
||||
glog.Error(err)
|
||||
continue
|
||||
}
|
||||
resourceQuotaResources := quota.ResourceNames(internalResourceQuota.Status.Hard)
|
||||
resourceQuotaResources := quota.ResourceNames(resourceQuota.Status.Hard)
|
||||
if intersection := evaluator.MatchingResources(resourceQuotaResources); len(intersection) > 0 {
|
||||
// TODO: make this support targeted replenishment to a specific kind, right now it does a full recalc on that quota.
|
||||
rq.enqueueResourceQuota(resourceQuota)
|
||||
@ -425,17 +405,26 @@ func (rq *ResourceQuotaController) Sync(discoveryFunc NamespacedResourcesFunc, p
|
||||
newResources, err := GetQuotableResources(discoveryFunc)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
|
||||
if discovery.IsGroupDiscoveryFailedError(err) && len(newResources) > 0 {
|
||||
// In partial discovery cases, don't remove any existing informers, just add new ones
|
||||
for k, v := range oldResources {
|
||||
newResources[k] = v
|
||||
}
|
||||
} else {
|
||||
// short circuit in non-discovery error cases or if discovery returned zero resources
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Decide whether discovery has reported a change.
|
||||
if reflect.DeepEqual(oldResources, newResources) {
|
||||
glog.V(4).Infof("no resource updates from discovery, skipping resource quota sync")
|
||||
klog.V(4).Infof("no resource updates from discovery, skipping resource quota sync")
|
||||
return
|
||||
}
|
||||
|
||||
// Something has changed, so track the new state and perform a sync.
|
||||
glog.V(2).Infof("syncing resource quota controller with updated resources from discovery: %v", newResources)
|
||||
klog.V(2).Infof("syncing resource quota controller with updated resources from discovery: %v", newResources)
|
||||
oldResources = newResources
|
||||
|
||||
// Ensure workers are paused to avoid processing events before informers
|
||||
@ -470,15 +459,18 @@ func (rq *ResourceQuotaController) resyncMonitors(resources map[schema.GroupVers
|
||||
|
||||
// GetQuotableResources returns all resources that the quota system should recognize.
|
||||
// It requires a resource supports the following verbs: 'create','list','delete'
|
||||
// This function may return both results and an error. If that happens, it means that the discovery calls were only
|
||||
// partially successful. A decision about whether to proceed or not is left to the caller.
|
||||
func GetQuotableResources(discoveryFunc NamespacedResourcesFunc) (map[schema.GroupVersionResource]struct{}, error) {
|
||||
possibleResources, err := discoveryFunc()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to discover resources: %v", err)
|
||||
possibleResources, discoveryErr := discoveryFunc()
|
||||
if discoveryErr != nil && len(possibleResources) == 0 {
|
||||
return nil, fmt.Errorf("failed to discover resources: %v", discoveryErr)
|
||||
}
|
||||
quotableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"create", "list", "watch", "delete"}}, possibleResources)
|
||||
quotableGroupVersionResources, err := discovery.GroupVersionResources(quotableResources)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse resources: %v", err)
|
||||
}
|
||||
return quotableGroupVersionResources, nil
|
||||
// return the original discovery error (if any) in addition to the list
|
||||
return quotableGroupVersionResources, discoveryErr
|
||||
}
|
||||
|
463
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller_test.go
generated
vendored
463
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller_test.go
generated
vendored
@ -33,9 +33,9 @@ import (
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/quota/generic"
|
||||
"k8s.io/kubernetes/pkg/quota/install"
|
||||
quota "k8s.io/kubernetes/pkg/quota/v1"
|
||||
"k8s.io/kubernetes/pkg/quota/v1/generic"
|
||||
"k8s.io/kubernetes/pkg/quota/v1/install"
|
||||
)
|
||||
|
||||
func getResourceList(cpu, memory string) v1.ResourceList {
|
||||
@ -136,6 +136,66 @@ func newTestPods() []runtime.Object {
|
||||
}
|
||||
}
|
||||
|
||||
func newBestEffortTestPods() []runtime.Object {
|
||||
return []runtime.Object{
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod-running", Namespace: "testing"},
|
||||
Status: v1.PodStatus{Phase: v1.PodRunning},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{{Name: "vol"}},
|
||||
Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("", ""), getResourceList("", ""))}},
|
||||
},
|
||||
},
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod-running-2", Namespace: "testing"},
|
||||
Status: v1.PodStatus{Phase: v1.PodRunning},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{{Name: "vol"}},
|
||||
Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("", ""), getResourceList("", ""))}},
|
||||
},
|
||||
},
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod-failed", Namespace: "testing"},
|
||||
Status: v1.PodStatus{Phase: v1.PodFailed},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{{Name: "vol"}},
|
||||
Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newTestPodsWithPriorityClasses() []runtime.Object {
|
||||
return []runtime.Object{
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod-running", Namespace: "testing"},
|
||||
Status: v1.PodStatus{Phase: v1.PodRunning},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{{Name: "vol"}},
|
||||
Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("500m", "50Gi"), getResourceList("", ""))}},
|
||||
PriorityClassName: "high",
|
||||
},
|
||||
},
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod-running-2", Namespace: "testing"},
|
||||
Status: v1.PodStatus{Phase: v1.PodRunning},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{{Name: "vol"}},
|
||||
Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
|
||||
PriorityClassName: "low",
|
||||
},
|
||||
},
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod-failed", Namespace: "testing"},
|
||||
Status: v1.PodStatus{Phase: v1.PodFailed},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{{Name: "vol"}},
|
||||
Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncResourceQuota(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
gvr schema.GroupVersionResource
|
||||
@ -144,6 +204,403 @@ func TestSyncResourceQuota(t *testing.T) {
|
||||
status v1.ResourceQuotaStatus
|
||||
expectedActionSet sets.String
|
||||
}{
|
||||
"non-matching-best-effort-scoped-quota": {
|
||||
gvr: v1.SchemeGroupVersion.WithResource("pods"),
|
||||
quota: v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "testing"},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Scopes: []v1.ResourceQuotaScope{v1.ResourceQuotaScopeBestEffort},
|
||||
},
|
||||
},
|
||||
status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0"),
|
||||
v1.ResourceMemory: resource.MustParse("0"),
|
||||
v1.ResourcePods: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
expectedActionSet: sets.NewString(
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
),
|
||||
items: newTestPods(),
|
||||
},
|
||||
"matching-best-effort-scoped-quota": {
|
||||
gvr: v1.SchemeGroupVersion.WithResource("pods"),
|
||||
quota: v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "testing"},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Scopes: []v1.ResourceQuotaScope{v1.ResourceQuotaScopeBestEffort},
|
||||
},
|
||||
},
|
||||
status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0"),
|
||||
v1.ResourceMemory: resource.MustParse("0"),
|
||||
v1.ResourcePods: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
expectedActionSet: sets.NewString(
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
),
|
||||
items: newBestEffortTestPods(),
|
||||
},
|
||||
"non-matching-priorityclass-scoped-quota-OpExists": {
|
||||
gvr: v1.SchemeGroupVersion.WithResource("pods"),
|
||||
quota: v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "testing"},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
ScopeSelector: &v1.ScopeSelector{
|
||||
MatchExpressions: []v1.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: v1.ResourceQuotaScopePriorityClass,
|
||||
Operator: v1.ScopeSelectorOpExists},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0"),
|
||||
v1.ResourceMemory: resource.MustParse("0"),
|
||||
v1.ResourcePods: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
expectedActionSet: sets.NewString(
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
),
|
||||
items: newTestPods(),
|
||||
},
|
||||
"matching-priorityclass-scoped-quota-OpExists": {
|
||||
gvr: v1.SchemeGroupVersion.WithResource("pods"),
|
||||
quota: v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "testing"},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
ScopeSelector: &v1.ScopeSelector{
|
||||
MatchExpressions: []v1.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: v1.ResourceQuotaScopePriorityClass,
|
||||
Operator: v1.ScopeSelectorOpExists},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("600m"),
|
||||
v1.ResourceMemory: resource.MustParse("51Gi"),
|
||||
v1.ResourcePods: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
expectedActionSet: sets.NewString(
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
),
|
||||
items: newTestPodsWithPriorityClasses(),
|
||||
},
|
||||
"matching-priorityclass-scoped-quota-OpIn": {
|
||||
gvr: v1.SchemeGroupVersion.WithResource("pods"),
|
||||
quota: v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "testing"},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
ScopeSelector: &v1.ScopeSelector{
|
||||
MatchExpressions: []v1.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: v1.ResourceQuotaScopePriorityClass,
|
||||
Operator: v1.ScopeSelectorOpIn,
|
||||
Values: []string{"high", "low"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("600m"),
|
||||
v1.ResourceMemory: resource.MustParse("51Gi"),
|
||||
v1.ResourcePods: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
expectedActionSet: sets.NewString(
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
),
|
||||
items: newTestPodsWithPriorityClasses(),
|
||||
},
|
||||
"matching-priorityclass-scoped-quota-OpIn-high": {
|
||||
gvr: v1.SchemeGroupVersion.WithResource("pods"),
|
||||
quota: v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "testing"},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
ScopeSelector: &v1.ScopeSelector{
|
||||
MatchExpressions: []v1.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: v1.ResourceQuotaScopePriorityClass,
|
||||
Operator: v1.ScopeSelectorOpIn,
|
||||
Values: []string{"high"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("500m"),
|
||||
v1.ResourceMemory: resource.MustParse("50Gi"),
|
||||
v1.ResourcePods: resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
expectedActionSet: sets.NewString(
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
),
|
||||
items: newTestPodsWithPriorityClasses(),
|
||||
},
|
||||
"matching-priorityclass-scoped-quota-OpIn-low": {
|
||||
gvr: v1.SchemeGroupVersion.WithResource("pods"),
|
||||
quota: v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "testing"},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
ScopeSelector: &v1.ScopeSelector{
|
||||
MatchExpressions: []v1.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: v1.ResourceQuotaScopePriorityClass,
|
||||
Operator: v1.ScopeSelectorOpIn,
|
||||
Values: []string{"low"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
v1.ResourcePods: resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
expectedActionSet: sets.NewString(
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
),
|
||||
items: newTestPodsWithPriorityClasses(),
|
||||
},
|
||||
"matching-priorityclass-scoped-quota-OpNotIn-low": {
|
||||
gvr: v1.SchemeGroupVersion.WithResource("pods"),
|
||||
quota: v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "testing"},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
ScopeSelector: &v1.ScopeSelector{
|
||||
MatchExpressions: []v1.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: v1.ResourceQuotaScopePriorityClass,
|
||||
Operator: v1.ScopeSelectorOpNotIn,
|
||||
Values: []string{"high"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
v1.ResourcePods: resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
expectedActionSet: sets.NewString(
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
),
|
||||
items: newTestPodsWithPriorityClasses(),
|
||||
},
|
||||
"non-matching-priorityclass-scoped-quota-OpIn": {
|
||||
gvr: v1.SchemeGroupVersion.WithResource("pods"),
|
||||
quota: v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "testing"},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
ScopeSelector: &v1.ScopeSelector{
|
||||
MatchExpressions: []v1.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: v1.ResourceQuotaScopePriorityClass,
|
||||
Operator: v1.ScopeSelectorOpIn,
|
||||
Values: []string{"random"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0"),
|
||||
v1.ResourceMemory: resource.MustParse("0"),
|
||||
v1.ResourcePods: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
expectedActionSet: sets.NewString(
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
),
|
||||
items: newTestPodsWithPriorityClasses(),
|
||||
},
|
||||
"non-matching-priorityclass-scoped-quota-OpNotIn": {
|
||||
gvr: v1.SchemeGroupVersion.WithResource("pods"),
|
||||
quota: v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "testing"},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
ScopeSelector: &v1.ScopeSelector{
|
||||
MatchExpressions: []v1.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: v1.ResourceQuotaScopePriorityClass,
|
||||
Operator: v1.ScopeSelectorOpNotIn,
|
||||
Values: []string{"random"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("200m"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
v1.ResourcePods: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
expectedActionSet: sets.NewString(
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
),
|
||||
items: newTestPods(),
|
||||
},
|
||||
"matching-priorityclass-scoped-quota-OpDoesNotExist": {
|
||||
gvr: v1.SchemeGroupVersion.WithResource("pods"),
|
||||
quota: v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "testing"},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
ScopeSelector: &v1.ScopeSelector{
|
||||
MatchExpressions: []v1.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: v1.ResourceQuotaScopePriorityClass,
|
||||
Operator: v1.ScopeSelectorOpDoesNotExist,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("200m"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
v1.ResourcePods: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
expectedActionSet: sets.NewString(
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
),
|
||||
items: newTestPods(),
|
||||
},
|
||||
"pods": {
|
||||
gvr: v1.SchemeGroupVersion.WithResource("pods"),
|
||||
quota: v1.ResourceQuota{
|
||||
|
28
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_monitor.go
generated
vendored
28
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_monitor.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@ -33,9 +33,9 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/quota/evaluator/core"
|
||||
"k8s.io/kubernetes/pkg/quota/generic"
|
||||
quota "k8s.io/kubernetes/pkg/quota/v1"
|
||||
"k8s.io/kubernetes/pkg/quota/v1/evaluator/core"
|
||||
"k8s.io/kubernetes/pkg/quota/v1/generic"
|
||||
)
|
||||
|
||||
type eventType int
|
||||
@ -173,11 +173,11 @@ func (qm *QuotaMonitor) controllerFor(resource schema.GroupVersionResource) (cac
|
||||
}
|
||||
shared, err := qm.informerFactory.ForResource(resource)
|
||||
if err == nil {
|
||||
glog.V(4).Infof("QuotaMonitor using a shared informer for resource %q", resource.String())
|
||||
klog.V(4).Infof("QuotaMonitor using a shared informer for resource %q", resource.String())
|
||||
shared.Informer().AddEventHandlerWithResyncPeriod(handlers, qm.resyncPeriod())
|
||||
return shared.Informer().GetController(), nil
|
||||
}
|
||||
glog.V(4).Infof("QuotaMonitor unable to use a shared informer for resource %q: %v", resource.String(), err)
|
||||
klog.V(4).Infof("QuotaMonitor unable to use a shared informer for resource %q: %v", resource.String(), err)
|
||||
|
||||
// TODO: if we can share storage with garbage collector, it may make sense to support other resources
|
||||
// until that time, aggregated api servers will have to run their own controller to reconcile their own quota.
|
||||
@ -223,9 +223,9 @@ func (qm *QuotaMonitor) SyncMonitors(resources map[schema.GroupVersionResource]s
|
||||
if evaluator == nil {
|
||||
listerFunc := generic.ListerFuncForResourceFunc(qm.informerFactory.ForResource)
|
||||
listResourceFunc := generic.ListResourceUsingListerFunc(listerFunc, resource)
|
||||
evaluator = generic.NewObjectCountEvaluator(false, resource.GroupResource(), listResourceFunc, "")
|
||||
evaluator = generic.NewObjectCountEvaluator(resource.GroupResource(), listResourceFunc, "")
|
||||
qm.registry.Add(evaluator)
|
||||
glog.Infof("QuotaMonitor created object count evaluator for %s", resource.GroupResource())
|
||||
klog.Infof("QuotaMonitor created object count evaluator for %s", resource.GroupResource())
|
||||
}
|
||||
|
||||
// track the monitor
|
||||
@ -240,7 +240,7 @@ func (qm *QuotaMonitor) SyncMonitors(resources map[schema.GroupVersionResource]s
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(4).Infof("quota synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove))
|
||||
klog.V(4).Infof("quota synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove))
|
||||
// NewAggregate returns nil if errs is 0-length
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
@ -272,7 +272,7 @@ func (qm *QuotaMonitor) StartMonitors() {
|
||||
started++
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("QuotaMonitor started %d new monitors, %d currently running", started, len(monitors))
|
||||
klog.V(4).Infof("QuotaMonitor started %d new monitors, %d currently running", started, len(monitors))
|
||||
}
|
||||
|
||||
// IsSynced returns true if any monitors exist AND all those monitors'
|
||||
@ -298,8 +298,8 @@ func (qm *QuotaMonitor) IsSynced() bool {
|
||||
// Run sets the stop channel and starts monitor execution until stopCh is
|
||||
// closed. Any running monitors will be stopped before Run returns.
|
||||
func (qm *QuotaMonitor) Run(stopCh <-chan struct{}) {
|
||||
glog.Infof("QuotaMonitor running")
|
||||
defer glog.Infof("QuotaMonitor stopping")
|
||||
klog.Infof("QuotaMonitor running")
|
||||
defer klog.Infof("QuotaMonitor stopping")
|
||||
|
||||
// Set up the stop channel.
|
||||
qm.monitorLock.Lock()
|
||||
@ -323,7 +323,7 @@ func (qm *QuotaMonitor) Run(stopCh <-chan struct{}) {
|
||||
close(monitor.stopCh)
|
||||
}
|
||||
}
|
||||
glog.Infof("QuotaMonitor stopped %d of %d monitors", stopped, len(monitors))
|
||||
klog.Infof("QuotaMonitor stopped %d of %d monitors", stopped, len(monitors))
|
||||
}
|
||||
|
||||
func (qm *QuotaMonitor) runProcessResourceChanges() {
|
||||
@ -349,7 +349,7 @@ func (qm *QuotaMonitor) processResourceChanges() bool {
|
||||
utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err))
|
||||
return true
|
||||
}
|
||||
glog.V(4).Infof("QuotaMonitor process object: %s, namespace %s, name %s, uid %s, event type %v", event.gvr.String(), accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType)
|
||||
klog.V(4).Infof("QuotaMonitor process object: %s, namespace %s, name %s, uid %s, event type %v", event.gvr.String(), accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType)
|
||||
qm.replenishmentFunc(event.gvr.GroupResource(), accessor.GetNamespace())
|
||||
return true
|
||||
}
|
||||
|
Reference in New Issue
Block a user