mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor files
This commit is contained in:
81
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/BUILD
generated
vendored
Normal file
81
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/BUILD
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"resource_quota_controller.go",
|
||||
"resource_quota_monitor.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/resourcequota",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/v1:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/quota:go_default_library",
|
||||
"//pkg/quota/evaluator/core:go_default_library",
|
||||
"//pkg/quota/generic:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["resource_quota_controller_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/resourcequota",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/quota:go_default_library",
|
||||
"//pkg/quota/generic:go_default_library",
|
||||
"//pkg/quota/install:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
3
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/OWNERS
generated
vendored
Executable file
3
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/OWNERS
generated
vendored
Executable file
@ -0,0 +1,3 @@
|
||||
reviewers:
|
||||
- derekwaynecarr
|
||||
- deads2k
|
18
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/doc.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// resourcequota contains a controller that makes resource quota usage observations
|
||||
package resourcequota // import "k8s.io/kubernetes/pkg/controller/resourcequota"
|
478
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go
generated
vendored
Normal file
478
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller.go
generated
vendored
Normal file
@ -0,0 +1,478 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcequota
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
)
|
||||
|
||||
// NamespacedResourcesFunc knows how to discover namespaced resources.
|
||||
type NamespacedResourcesFunc func() ([]*metav1.APIResourceList, error)
|
||||
|
||||
// ReplenishmentFunc is a signal that a resource changed in specified namespace
|
||||
// that may require quota to be recalculated.
|
||||
type ReplenishmentFunc func(groupResource schema.GroupResource, namespace string)
|
||||
|
||||
// InformerFactory is all the quota system needs to interface with informers.
|
||||
type InformerFactory interface {
|
||||
ForResource(resource schema.GroupVersionResource) (informers.GenericInformer, error)
|
||||
Start(stopCh <-chan struct{})
|
||||
}
|
||||
|
||||
// ResourceQuotaControllerOptions holds options for creating a quota controller
|
||||
type ResourceQuotaControllerOptions struct {
|
||||
// Must have authority to list all quotas, and update quota status
|
||||
QuotaClient corev1client.ResourceQuotasGetter
|
||||
// Shared informer for resource quotas
|
||||
ResourceQuotaInformer coreinformers.ResourceQuotaInformer
|
||||
// Controls full recalculation of quota usage
|
||||
ResyncPeriod controller.ResyncPeriodFunc
|
||||
// Maintains evaluators that know how to calculate usage for group resource
|
||||
Registry quota.Registry
|
||||
// Discover list of supported resources on the server.
|
||||
DiscoveryFunc NamespacedResourcesFunc
|
||||
// A function that returns the list of resources to ignore
|
||||
IgnoredResourcesFunc func() map[schema.GroupResource]struct{}
|
||||
// InformersStarted knows if informers were started.
|
||||
InformersStarted <-chan struct{}
|
||||
// InformerFactory interfaces with informers.
|
||||
InformerFactory InformerFactory
|
||||
// Controls full resync of objects monitored for replenishment.
|
||||
ReplenishmentResyncPeriod controller.ResyncPeriodFunc
|
||||
}
|
||||
|
||||
// ResourceQuotaController is responsible for tracking quota usage status in the system
|
||||
type ResourceQuotaController struct {
|
||||
// Must have authority to list all resources in the system, and update quota status
|
||||
rqClient corev1client.ResourceQuotasGetter
|
||||
// A lister/getter of resource quota objects
|
||||
rqLister corelisters.ResourceQuotaLister
|
||||
// A list of functions that return true when their caches have synced
|
||||
informerSyncedFuncs []cache.InformerSynced
|
||||
// ResourceQuota objects that need to be synchronized
|
||||
queue workqueue.RateLimitingInterface
|
||||
// missingUsageQueue holds objects that are missing the initial usage information
|
||||
missingUsageQueue workqueue.RateLimitingInterface
|
||||
// To allow injection of syncUsage for testing.
|
||||
syncHandler func(key string) error
|
||||
// function that controls full recalculation of quota usage
|
||||
resyncPeriod controller.ResyncPeriodFunc
|
||||
// knows how to calculate usage
|
||||
registry quota.Registry
|
||||
// knows how to monitor all the resources tracked by quota and trigger replenishment
|
||||
quotaMonitor *QuotaMonitor
|
||||
// controls the workers that process quotas
|
||||
// this lock is acquired to control write access to the monitors and ensures that all
|
||||
// monitors are synced before the controller can process quotas.
|
||||
workerLock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewResourceQuotaController creates a quota controller with specified options
|
||||
func NewResourceQuotaController(options *ResourceQuotaControllerOptions) (*ResourceQuotaController, error) {
|
||||
// build the resource quota controller
|
||||
rq := &ResourceQuotaController{
|
||||
rqClient: options.QuotaClient,
|
||||
rqLister: options.ResourceQuotaInformer.Lister(),
|
||||
informerSyncedFuncs: []cache.InformerSynced{options.ResourceQuotaInformer.Informer().HasSynced},
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_primary"),
|
||||
missingUsageQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_priority"),
|
||||
resyncPeriod: options.ResyncPeriod,
|
||||
registry: options.Registry,
|
||||
}
|
||||
// set the synchronization handler
|
||||
rq.syncHandler = rq.syncResourceQuotaFromKey
|
||||
|
||||
options.ResourceQuotaInformer.Informer().AddEventHandlerWithResyncPeriod(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: rq.addQuota,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
// We are only interested in observing updates to quota.spec to drive updates to quota.status.
|
||||
// We ignore all updates to quota.Status because they are all driven by this controller.
|
||||
// IMPORTANT:
|
||||
// We do not use this function to queue up a full quota recalculation. To do so, would require
|
||||
// us to enqueue all quota.Status updates, and since quota.Status updates involve additional queries
|
||||
// that cannot be backed by a cache and result in a full query of a namespace's content, we do not
|
||||
// want to pay the price on spurious status updates. As a result, we have a separate routine that is
|
||||
// responsible for enqueue of all resource quotas when doing a full resync (enqueueAll)
|
||||
oldResourceQuota := old.(*v1.ResourceQuota)
|
||||
curResourceQuota := cur.(*v1.ResourceQuota)
|
||||
if quota.V1Equals(oldResourceQuota.Spec.Hard, curResourceQuota.Spec.Hard) {
|
||||
return
|
||||
}
|
||||
rq.addQuota(curResourceQuota)
|
||||
},
|
||||
// This will enter the sync loop and no-op, because the controller has been deleted from the store.
|
||||
// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
|
||||
// way of achieving this is by performing a `stop` operation on the controller.
|
||||
DeleteFunc: rq.enqueueResourceQuota,
|
||||
},
|
||||
rq.resyncPeriod(),
|
||||
)
|
||||
|
||||
qm := &QuotaMonitor{
|
||||
informersStarted: options.InformersStarted,
|
||||
informerFactory: options.InformerFactory,
|
||||
ignoredResources: options.IgnoredResourcesFunc(),
|
||||
resourceChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resource_quota_controller_resource_changes"),
|
||||
resyncPeriod: options.ReplenishmentResyncPeriod,
|
||||
replenishmentFunc: rq.replenishQuota,
|
||||
registry: rq.registry,
|
||||
}
|
||||
rq.quotaMonitor = qm
|
||||
|
||||
// do initial quota monitor setup
|
||||
resources, err := GetQuotableResources(options.DiscoveryFunc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = qm.syncMonitors(resources); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("initial monitor sync has error: %v", err))
|
||||
}
|
||||
|
||||
// only start quota once all informers synced
|
||||
rq.informerSyncedFuncs = append(rq.informerSyncedFuncs, qm.IsSynced)
|
||||
|
||||
return rq, nil
|
||||
}
|
||||
|
||||
// enqueueAll is called at the fullResyncPeriod interval to force a full recalculation of quota usage statistics
|
||||
func (rq *ResourceQuotaController) enqueueAll() {
|
||||
defer glog.V(4).Infof("Resource quota controller queued all resource quota for full calculation of usage")
|
||||
rqs, err := rq.rqLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to enqueue all - error listing resource quotas: %v", err))
|
||||
return
|
||||
}
|
||||
for i := range rqs {
|
||||
key, err := controller.KeyFunc(rqs[i])
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", rqs[i], err))
|
||||
continue
|
||||
}
|
||||
rq.queue.Add(key)
|
||||
}
|
||||
}
|
||||
|
||||
// obj could be an *v1.ResourceQuota, or a DeletionFinalStateUnknown marker item.
|
||||
func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
return
|
||||
}
|
||||
rq.queue.Add(key)
|
||||
}
|
||||
|
||||
func (rq *ResourceQuotaController) addQuota(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
return
|
||||
}
|
||||
|
||||
resourceQuota := obj.(*v1.ResourceQuota)
|
||||
|
||||
// if we declared an intent that is not yet captured in status (prioritize it)
|
||||
if !apiequality.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard) {
|
||||
rq.missingUsageQueue.Add(key)
|
||||
return
|
||||
}
|
||||
|
||||
// if we declared a constraint that has no usage (which this controller can calculate, prioritize it)
|
||||
for constraint := range resourceQuota.Status.Hard {
|
||||
if _, usageFound := resourceQuota.Status.Used[constraint]; !usageFound {
|
||||
matchedResources := []api.ResourceName{api.ResourceName(constraint)}
|
||||
for _, evaluator := range rq.registry.List() {
|
||||
if intersection := evaluator.MatchingResources(matchedResources); len(intersection) > 0 {
|
||||
rq.missingUsageQueue.Add(key)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// no special priority, go in normal recalc queue
|
||||
rq.queue.Add(key)
|
||||
}
|
||||
|
||||
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
|
||||
func (rq *ResourceQuotaController) worker(queue workqueue.RateLimitingInterface) func() {
|
||||
workFunc := func() bool {
|
||||
|
||||
rq.workerLock.RLock()
|
||||
defer rq.workerLock.RUnlock()
|
||||
|
||||
key, quit := queue.Get()
|
||||
if quit {
|
||||
return true
|
||||
}
|
||||
defer queue.Done(key)
|
||||
err := rq.syncHandler(key.(string))
|
||||
if err == nil {
|
||||
queue.Forget(key)
|
||||
return false
|
||||
}
|
||||
utilruntime.HandleError(err)
|
||||
queue.AddRateLimited(key)
|
||||
return false
|
||||
}
|
||||
|
||||
return func() {
|
||||
for {
|
||||
if quit := workFunc(); quit {
|
||||
glog.Infof("resource quota controller worker shutting down")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run begins quota controller using the specified number of workers
|
||||
func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer rq.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting resource quota controller")
|
||||
defer glog.Infof("Shutting down resource quota controller")
|
||||
|
||||
go rq.quotaMonitor.Run(stopCh)
|
||||
|
||||
if !controller.WaitForCacheSync("resource quota", stopCh, rq.informerSyncedFuncs...) {
|
||||
return
|
||||
}
|
||||
|
||||
// the workers that chug through the quota calculation backlog
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(rq.worker(rq.queue), time.Second, stopCh)
|
||||
go wait.Until(rq.worker(rq.missingUsageQueue), time.Second, stopCh)
|
||||
}
|
||||
// the timer for how often we do a full recalculation across all quotas
|
||||
go wait.Until(func() { rq.enqueueAll() }, rq.resyncPeriod(), stopCh)
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
// syncResourceQuotaFromKey syncs a quota key
|
||||
func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err error) {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Now().Sub(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
quota, err := rq.rqLister.ResourceQuotas(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.Infof("Resource quota has been deleted %v", key)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
glog.Infof("Unable to retrieve resource quota %v from store: %v", key, err)
|
||||
rq.queue.Add(key)
|
||||
return err
|
||||
}
|
||||
return rq.syncResourceQuota(quota)
|
||||
}
|
||||
|
||||
// syncResourceQuota runs a complete sync of resource quota status across all known kinds
|
||||
func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota *v1.ResourceQuota) (err error) {
|
||||
// quota is dirty if any part of spec hard limits differs from the status hard limits
|
||||
dirty := !apiequality.Semantic.DeepEqual(v1ResourceQuota.Spec.Hard, v1ResourceQuota.Status.Hard)
|
||||
|
||||
resourceQuota := api.ResourceQuota{}
|
||||
if err := k8s_api_v1.Convert_v1_ResourceQuota_To_core_ResourceQuota(v1ResourceQuota, &resourceQuota, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// dirty tracks if the usage status differs from the previous sync,
|
||||
// if so, we send a new usage with latest status
|
||||
// if this is our first sync, it will be dirty by default, since we need track usage
|
||||
dirty = dirty || (resourceQuota.Status.Hard == nil || resourceQuota.Status.Used == nil)
|
||||
|
||||
used := api.ResourceList{}
|
||||
if resourceQuota.Status.Used != nil {
|
||||
used = quota.Add(api.ResourceList{}, resourceQuota.Status.Used)
|
||||
}
|
||||
hardLimits := quota.Add(api.ResourceList{}, resourceQuota.Spec.Hard)
|
||||
|
||||
newUsage, err := quota.CalculateUsage(resourceQuota.Namespace, resourceQuota.Spec.Scopes, hardLimits, rq.registry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for key, value := range newUsage {
|
||||
used[key] = value
|
||||
}
|
||||
|
||||
// ensure set of used values match those that have hard constraints
|
||||
hardResources := quota.ResourceNames(hardLimits)
|
||||
used = quota.Mask(used, hardResources)
|
||||
|
||||
// Create a usage object that is based on the quota resource version that will handle updates
|
||||
// by default, we preserve the past usage observation, and set hard to the current spec
|
||||
usage := api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: resourceQuota.Name,
|
||||
Namespace: resourceQuota.Namespace,
|
||||
ResourceVersion: resourceQuota.ResourceVersion,
|
||||
Labels: resourceQuota.Labels,
|
||||
Annotations: resourceQuota.Annotations},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: hardLimits,
|
||||
Used: used,
|
||||
},
|
||||
}
|
||||
|
||||
dirty = dirty || !quota.Equals(usage.Status.Used, resourceQuota.Status.Used)
|
||||
|
||||
// there was a change observed by this controller that requires we update quota
|
||||
if dirty {
|
||||
v1Usage := &v1.ResourceQuota{}
|
||||
if err := k8s_api_v1.Convert_core_ResourceQuota_To_v1_ResourceQuota(&usage, v1Usage, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(v1Usage)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// replenishQuota is a replenishment function invoked by a controller to notify that a quota should be recalculated
|
||||
func (rq *ResourceQuotaController) replenishQuota(groupResource schema.GroupResource, namespace string) {
|
||||
// check if the quota controller can evaluate this groupResource, if not, ignore it altogether...
|
||||
evaluator := rq.registry.Get(groupResource)
|
||||
if evaluator == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// check if this namespace even has a quota...
|
||||
resourceQuotas, err := rq.rqLister.ResourceQuotas(namespace).List(labels.Everything())
|
||||
if errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("quota controller could not find ResourceQuota associated with namespace: %s, could take up to %v before a quota replenishes", namespace, rq.resyncPeriod()))
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("error checking to see if namespace %s has any ResourceQuota associated with it: %v", namespace, err))
|
||||
return
|
||||
}
|
||||
if len(resourceQuotas) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// only queue those quotas that are tracking a resource associated with this kind.
|
||||
for i := range resourceQuotas {
|
||||
resourceQuota := resourceQuotas[i]
|
||||
internalResourceQuota := &api.ResourceQuota{}
|
||||
if err := k8s_api_v1.Convert_v1_ResourceQuota_To_core_ResourceQuota(resourceQuota, internalResourceQuota, nil); err != nil {
|
||||
glog.Error(err)
|
||||
continue
|
||||
}
|
||||
resourceQuotaResources := quota.ResourceNames(internalResourceQuota.Status.Hard)
|
||||
if intersection := evaluator.MatchingResources(resourceQuotaResources); len(intersection) > 0 {
|
||||
// TODO: make this support targeted replenishment to a specific kind, right now it does a full recalc on that quota.
|
||||
rq.enqueueResourceQuota(resourceQuota)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sync periodically resyncs the controller when new resources are observed from discovery.
|
||||
func (rq *ResourceQuotaController) Sync(discoveryFunc NamespacedResourcesFunc, period time.Duration, stopCh <-chan struct{}) {
|
||||
// Something has changed, so track the new state and perform a sync.
|
||||
oldResources := make(map[schema.GroupVersionResource]struct{})
|
||||
wait.Until(func() {
|
||||
// Get the current resource list from discovery.
|
||||
newResources, err := GetQuotableResources(discoveryFunc)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Decide whether discovery has reported a change.
|
||||
if reflect.DeepEqual(oldResources, newResources) {
|
||||
glog.V(4).Infof("no resource updates from discovery, skipping resource quota sync")
|
||||
return
|
||||
}
|
||||
|
||||
// Something has changed, so track the new state and perform a sync.
|
||||
glog.V(2).Infof("syncing resource quota controller with updated resources from discovery: %v", newResources)
|
||||
oldResources = newResources
|
||||
|
||||
// Ensure workers are paused to avoid processing events before informers
|
||||
// have resynced.
|
||||
rq.workerLock.Lock()
|
||||
defer rq.workerLock.Unlock()
|
||||
|
||||
// Perform the monitor resync and wait for controllers to report cache sync.
|
||||
if err := rq.resyncMonitors(newResources); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to sync resource monitors: %v", err))
|
||||
return
|
||||
}
|
||||
if !controller.WaitForCacheSync("resource quota", stopCh, rq.quotaMonitor.IsSynced) {
|
||||
utilruntime.HandleError(fmt.Errorf("timed out waiting for quota monitor sync"))
|
||||
}
|
||||
}, period, stopCh)
|
||||
}
|
||||
|
||||
// resyncMonitors starts or stops quota monitors as needed to ensure that all
|
||||
// (and only) those resources present in the map are monitored.
|
||||
func (rq *ResourceQuotaController) resyncMonitors(resources map[schema.GroupVersionResource]struct{}) error {
|
||||
if err := rq.quotaMonitor.syncMonitors(resources); err != nil {
|
||||
return err
|
||||
}
|
||||
rq.quotaMonitor.startMonitors()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetQuotableResources returns all resources that the quota system should recognize.
|
||||
// It requires a resource supports the following verbs: 'create','list','delete'
|
||||
func GetQuotableResources(discoveryFunc NamespacedResourcesFunc) (map[schema.GroupVersionResource]struct{}, error) {
|
||||
possibleResources, err := discoveryFunc()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to discover resources: %v", err)
|
||||
}
|
||||
quotableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"create", "list", "delete"}}, possibleResources)
|
||||
quotableGroupVersionResources, err := discovery.GroupVersionResources(quotableResources)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse resources: %v", err)
|
||||
}
|
||||
return quotableGroupVersionResources, nil
|
||||
}
|
434
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller_test.go
generated
vendored
Normal file
434
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_controller_test.go
generated
vendored
Normal file
@ -0,0 +1,434 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcequota
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/quota/generic"
|
||||
"k8s.io/kubernetes/pkg/quota/install"
|
||||
)
|
||||
|
||||
func getResourceList(cpu, memory string) v1.ResourceList {
|
||||
res := v1.ResourceList{}
|
||||
if cpu != "" {
|
||||
res[v1.ResourceCPU] = resource.MustParse(cpu)
|
||||
}
|
||||
if memory != "" {
|
||||
res[v1.ResourceMemory] = resource.MustParse(memory)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements {
|
||||
res := v1.ResourceRequirements{}
|
||||
res.Requests = requests
|
||||
res.Limits = limits
|
||||
return res
|
||||
}
|
||||
|
||||
func mockDiscoveryFunc() ([]*metav1.APIResourceList, error) {
|
||||
return []*metav1.APIResourceList{}, nil
|
||||
}
|
||||
|
||||
func mockListerForResourceFunc(listersForResource map[schema.GroupVersionResource]cache.GenericLister) quota.ListerForResourceFunc {
|
||||
return func(gvr schema.GroupVersionResource) (cache.GenericLister, error) {
|
||||
lister, found := listersForResource[gvr]
|
||||
if !found {
|
||||
return nil, fmt.Errorf("no lister found for resource")
|
||||
}
|
||||
return lister, nil
|
||||
}
|
||||
}
|
||||
|
||||
func newGenericLister(groupResource schema.GroupResource, items []runtime.Object) cache.GenericLister {
|
||||
store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
|
||||
for _, item := range items {
|
||||
store.Add(item)
|
||||
}
|
||||
return cache.NewGenericLister(store, groupResource)
|
||||
}
|
||||
|
||||
type quotaController struct {
|
||||
*ResourceQuotaController
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
func setupQuotaController(t *testing.T, kubeClient kubernetes.Interface, lister quota.ListerForResourceFunc) quotaController {
|
||||
informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc())
|
||||
quotaConfiguration := install.NewQuotaConfigurationForControllers(lister)
|
||||
alwaysStarted := make(chan struct{})
|
||||
close(alwaysStarted)
|
||||
resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
|
||||
QuotaClient: kubeClient.Core(),
|
||||
ResourceQuotaInformer: informerFactory.Core().V1().ResourceQuotas(),
|
||||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
IgnoredResourcesFunc: quotaConfiguration.IgnoredResources,
|
||||
DiscoveryFunc: mockDiscoveryFunc,
|
||||
Registry: generic.NewRegistry(quotaConfiguration.Evaluators()),
|
||||
InformersStarted: alwaysStarted,
|
||||
}
|
||||
qc, err := NewResourceQuotaController(resourceQuotaControllerOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stop := make(chan struct{})
|
||||
go informerFactory.Start(stop)
|
||||
return quotaController{qc, stop}
|
||||
}
|
||||
|
||||
func newTestPods() []runtime.Object {
|
||||
return []runtime.Object{
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod-running", Namespace: "testing"},
|
||||
Status: v1.PodStatus{Phase: v1.PodRunning},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{{Name: "vol"}},
|
||||
Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
|
||||
},
|
||||
},
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod-running-2", Namespace: "testing"},
|
||||
Status: v1.PodStatus{Phase: v1.PodRunning},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{{Name: "vol"}},
|
||||
Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
|
||||
},
|
||||
},
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod-failed", Namespace: "testing"},
|
||||
Status: v1.PodStatus{Phase: v1.PodFailed},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{{Name: "vol"}},
|
||||
Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncResourceQuota(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
gvr schema.GroupVersionResource
|
||||
items []runtime.Object
|
||||
quota v1.ResourceQuota
|
||||
status v1.ResourceQuotaStatus
|
||||
expectedActionSet sets.String
|
||||
}{
|
||||
"pods": {
|
||||
gvr: v1.SchemeGroupVersion.WithResource("pods"),
|
||||
quota: v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "testing"},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
},
|
||||
},
|
||||
status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("200m"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
v1.ResourcePods: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
expectedActionSet: sets.NewString(
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
),
|
||||
items: newTestPods(),
|
||||
},
|
||||
"quota-spec-hard-updated": {
|
||||
gvr: v1.SchemeGroupVersion.WithResource("pods"),
|
||||
quota: v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
Status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
expectedActionSet: sets.NewString(
|
||||
strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
|
||||
),
|
||||
items: []runtime.Object{},
|
||||
},
|
||||
"quota-unchanged": {
|
||||
gvr: v1.SchemeGroupVersion.WithResource("pods"),
|
||||
quota: v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
Status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
expectedActionSet: sets.NewString(),
|
||||
items: []runtime.Object{},
|
||||
},
|
||||
}
|
||||
|
||||
for testName, testCase := range testCases {
|
||||
kubeClient := fake.NewSimpleClientset(&testCase.quota)
|
||||
listersForResourceConfig := map[schema.GroupVersionResource]cache.GenericLister{
|
||||
testCase.gvr: newGenericLister(testCase.gvr.GroupResource(), testCase.items),
|
||||
}
|
||||
qc := setupQuotaController(t, kubeClient, mockListerForResourceFunc(listersForResourceConfig))
|
||||
defer close(qc.stop)
|
||||
|
||||
if err := qc.syncResourceQuota(&testCase.quota); err != nil {
|
||||
t.Fatalf("test: %s, unexpected error: %v", testName, err)
|
||||
}
|
||||
|
||||
actionSet := sets.NewString()
|
||||
for _, action := range kubeClient.Actions() {
|
||||
actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
|
||||
}
|
||||
if !actionSet.HasAll(testCase.expectedActionSet.List()...) {
|
||||
t.Errorf("test: %s,\nExpected actions:\n%v\n but got:\n%v\nDifference:\n%v", testName, testCase.expectedActionSet, actionSet, testCase.expectedActionSet.Difference(actionSet))
|
||||
}
|
||||
|
||||
lastActionIndex := len(kubeClient.Actions()) - 1
|
||||
usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*v1.ResourceQuota)
|
||||
|
||||
// ensure usage is as expected
|
||||
if len(usage.Status.Hard) != len(testCase.status.Hard) {
|
||||
t.Errorf("test: %s, status hard lengths do not match", testName)
|
||||
}
|
||||
if len(usage.Status.Used) != len(testCase.status.Used) {
|
||||
t.Errorf("test: %s, status used lengths do not match", testName)
|
||||
}
|
||||
for k, v := range testCase.status.Hard {
|
||||
actual := usage.Status.Hard[k]
|
||||
actualValue := actual.String()
|
||||
expectedValue := v.String()
|
||||
if expectedValue != actualValue {
|
||||
t.Errorf("test: %s, Usage Hard: Key: %v, Expected: %v, Actual: %v", testName, k, expectedValue, actualValue)
|
||||
}
|
||||
}
|
||||
for k, v := range testCase.status.Used {
|
||||
actual := usage.Status.Used[k]
|
||||
actualValue := actual.String()
|
||||
expectedValue := v.String()
|
||||
if expectedValue != actualValue {
|
||||
t.Errorf("test: %s, Usage Used: Key: %v, Expected: %v, Actual: %v", testName, k, expectedValue, actualValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddQuota(t *testing.T) {
|
||||
kubeClient := fake.NewSimpleClientset()
|
||||
gvr := v1.SchemeGroupVersion.WithResource("pods")
|
||||
listersForResourceConfig := map[schema.GroupVersionResource]cache.GenericLister{
|
||||
gvr: newGenericLister(gvr.GroupResource(), newTestPods()),
|
||||
}
|
||||
|
||||
qc := setupQuotaController(t, kubeClient, mockListerForResourceFunc(listersForResourceConfig))
|
||||
defer close(qc.stop)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
quota *v1.ResourceQuota
|
||||
expectedPriority bool
|
||||
}{
|
||||
{
|
||||
name: "no status",
|
||||
expectedPriority: true,
|
||||
quota: &v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "status, no usage",
|
||||
expectedPriority: true,
|
||||
quota: &v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
Status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "status, mismatch",
|
||||
expectedPriority: true,
|
||||
quota: &v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
Status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "status, missing usage, but don't care (no informer)",
|
||||
expectedPriority: false,
|
||||
quota: &v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
"count/foobars.example.com": resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
Status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
"count/foobars.example.com": resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ready",
|
||||
expectedPriority: false,
|
||||
quota: &v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
Status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
qc.addQuota(tc.quota)
|
||||
if tc.expectedPriority {
|
||||
if e, a := 1, qc.missingUsageQueue.Len(); e != a {
|
||||
t.Errorf("%s: expected %v, got %v", tc.name, e, a)
|
||||
}
|
||||
if e, a := 0, qc.queue.Len(); e != a {
|
||||
t.Errorf("%s: expected %v, got %v", tc.name, e, a)
|
||||
}
|
||||
} else {
|
||||
if e, a := 0, qc.missingUsageQueue.Len(); e != a {
|
||||
t.Errorf("%s: expected %v, got %v", tc.name, e, a)
|
||||
}
|
||||
if e, a := 1, qc.queue.Len(); e != a {
|
||||
t.Errorf("%s: expected %v, got %v", tc.name, e, a)
|
||||
}
|
||||
}
|
||||
for qc.missingUsageQueue.Len() > 0 {
|
||||
key, _ := qc.missingUsageQueue.Get()
|
||||
qc.missingUsageQueue.Done(key)
|
||||
}
|
||||
for qc.queue.Len() > 0 {
|
||||
key, _ := qc.queue.Get()
|
||||
qc.queue.Done(key)
|
||||
}
|
||||
}
|
||||
}
|
341
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_monitor.go
generated
vendored
Normal file
341
vendor/k8s.io/kubernetes/pkg/controller/resourcequota/resource_quota_monitor.go
generated
vendored
Normal file
@ -0,0 +1,341 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcequota
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/quota/evaluator/core"
|
||||
"k8s.io/kubernetes/pkg/quota/generic"
|
||||
)
|
||||
|
||||
type eventType int
|
||||
|
||||
func (e eventType) String() string {
|
||||
switch e {
|
||||
case addEvent:
|
||||
return "add"
|
||||
case updateEvent:
|
||||
return "update"
|
||||
case deleteEvent:
|
||||
return "delete"
|
||||
default:
|
||||
return fmt.Sprintf("unknown(%d)", int(e))
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
addEvent eventType = iota
|
||||
updateEvent
|
||||
deleteEvent
|
||||
)
|
||||
|
||||
type event struct {
|
||||
eventType eventType
|
||||
obj interface{}
|
||||
oldObj interface{}
|
||||
gvr schema.GroupVersionResource
|
||||
}
|
||||
|
||||
type QuotaMonitor struct {
|
||||
// each monitor list/watches a resource and determines if we should replenish quota
|
||||
monitors monitors
|
||||
monitorLock sync.Mutex
|
||||
// informersStarted is closed after after all of the controllers have been initialized and are running.
|
||||
// After that it is safe to start them here, before that it is not.
|
||||
informersStarted <-chan struct{}
|
||||
|
||||
// stopCh drives shutdown. If it is nil, it indicates that Run() has not been
|
||||
// called yet. If it is non-nil, then when closed it indicates everything
|
||||
// should shut down.
|
||||
//
|
||||
// This channel is also protected by monitorLock.
|
||||
stopCh <-chan struct{}
|
||||
|
||||
// monitors are the producer of the resourceChanges queue
|
||||
resourceChanges workqueue.RateLimitingInterface
|
||||
|
||||
// interfaces with informers
|
||||
informerFactory InformerFactory
|
||||
|
||||
// list of resources to ignore
|
||||
ignoredResources map[schema.GroupResource]struct{}
|
||||
|
||||
// The period that should be used to re-sync the monitored resource
|
||||
resyncPeriod controller.ResyncPeriodFunc
|
||||
|
||||
// callback to alert that a change may require quota recalculation
|
||||
replenishmentFunc ReplenishmentFunc
|
||||
|
||||
// maintains list of evaluators
|
||||
registry quota.Registry
|
||||
}
|
||||
|
||||
// monitor runs a Controller with a local stop channel.
|
||||
type monitor struct {
|
||||
controller cache.Controller
|
||||
|
||||
// stopCh stops Controller. If stopCh is nil, the monitor is considered to be
|
||||
// not yet started.
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// Run is intended to be called in a goroutine. Multiple calls of this is an
|
||||
// error.
|
||||
func (m *monitor) Run() {
|
||||
m.controller.Run(m.stopCh)
|
||||
}
|
||||
|
||||
type monitors map[schema.GroupVersionResource]*monitor
|
||||
|
||||
func (qm *QuotaMonitor) controllerFor(resource schema.GroupVersionResource) (cache.Controller, error) {
|
||||
// TODO: pass this down
|
||||
clock := clock.RealClock{}
|
||||
handlers := cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
// TODO: leaky abstraction! live w/ it for now, but should pass down an update filter func.
|
||||
// we only want to queue the updates we care about though as too much noise will overwhelm queue.
|
||||
notifyUpdate := false
|
||||
switch resource.GroupResource() {
|
||||
case schema.GroupResource{Resource: "pods"}:
|
||||
oldPod := oldObj.(*v1.Pod)
|
||||
newPod := newObj.(*v1.Pod)
|
||||
notifyUpdate = core.QuotaV1Pod(oldPod, clock) && !core.QuotaV1Pod(newPod, clock)
|
||||
case schema.GroupResource{Resource: "services"}:
|
||||
oldService := oldObj.(*v1.Service)
|
||||
newService := newObj.(*v1.Service)
|
||||
notifyUpdate = core.GetQuotaServiceType(oldService) != core.GetQuotaServiceType(newService)
|
||||
}
|
||||
if notifyUpdate {
|
||||
event := &event{
|
||||
eventType: updateEvent,
|
||||
obj: newObj,
|
||||
oldObj: oldObj,
|
||||
gvr: resource,
|
||||
}
|
||||
qm.resourceChanges.Add(event)
|
||||
}
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
// delta fifo may wrap the object in a cache.DeletedFinalStateUnknown, unwrap it
|
||||
if deletedFinalStateUnknown, ok := obj.(cache.DeletedFinalStateUnknown); ok {
|
||||
obj = deletedFinalStateUnknown.Obj
|
||||
}
|
||||
event := &event{
|
||||
eventType: deleteEvent,
|
||||
obj: obj,
|
||||
gvr: resource,
|
||||
}
|
||||
qm.resourceChanges.Add(event)
|
||||
},
|
||||
}
|
||||
shared, err := qm.informerFactory.ForResource(resource)
|
||||
if err == nil {
|
||||
glog.V(4).Infof("QuotaMonitor using a shared informer for resource %q", resource.String())
|
||||
shared.Informer().AddEventHandlerWithResyncPeriod(handlers, qm.resyncPeriod())
|
||||
return shared.Informer().GetController(), nil
|
||||
}
|
||||
glog.V(4).Infof("QuotaMonitor unable to use a shared informer for resource %q: %v", resource.String(), err)
|
||||
|
||||
// TODO: if we can share storage with garbage collector, it may make sense to support other resources
|
||||
// until that time, aggregated api servers will have to run their own controller to reconcile their own quota.
|
||||
return nil, fmt.Errorf("unable to monitor quota for resource %q", resource.String())
|
||||
}
|
||||
|
||||
// syncMonitors rebuilds the monitor set according to the supplied resources,
|
||||
// creating or deleting monitors as necessary. It will return any error
|
||||
// encountered, but will make an attempt to create a monitor for each resource
|
||||
// instead of immediately exiting on an error. It may be called before or after
|
||||
// Run. Monitors are NOT started as part of the sync. To ensure all existing
|
||||
// monitors are started, call startMonitors.
|
||||
func (qm *QuotaMonitor) syncMonitors(resources map[schema.GroupVersionResource]struct{}) error {
|
||||
qm.monitorLock.Lock()
|
||||
defer qm.monitorLock.Unlock()
|
||||
|
||||
toRemove := qm.monitors
|
||||
if toRemove == nil {
|
||||
toRemove = monitors{}
|
||||
}
|
||||
current := monitors{}
|
||||
errs := []error{}
|
||||
kept := 0
|
||||
added := 0
|
||||
for resource := range resources {
|
||||
if _, ok := qm.ignoredResources[resource.GroupResource()]; ok {
|
||||
continue
|
||||
}
|
||||
if m, ok := toRemove[resource]; ok {
|
||||
current[resource] = m
|
||||
delete(toRemove, resource)
|
||||
kept++
|
||||
continue
|
||||
}
|
||||
c, err := qm.controllerFor(resource)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("couldn't start monitor for resource %q: %v", resource, err))
|
||||
continue
|
||||
}
|
||||
|
||||
// check if we need to create an evaluator for this resource (if none previously registered)
|
||||
evaluator := qm.registry.Get(resource.GroupResource())
|
||||
if evaluator == nil {
|
||||
listerFunc := generic.ListerFuncForResourceFunc(qm.informerFactory.ForResource)
|
||||
listResourceFunc := generic.ListResourceUsingListerFunc(listerFunc, resource)
|
||||
evaluator = generic.NewObjectCountEvaluator(false, resource.GroupResource(), listResourceFunc, "")
|
||||
qm.registry.Add(evaluator)
|
||||
glog.Infof("QuotaMonitor created object count evaluator for %s", resource.GroupResource())
|
||||
}
|
||||
|
||||
// track the monitor
|
||||
current[resource] = &monitor{controller: c}
|
||||
added++
|
||||
}
|
||||
qm.monitors = current
|
||||
|
||||
for _, monitor := range toRemove {
|
||||
if monitor.stopCh != nil {
|
||||
close(monitor.stopCh)
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(4).Infof("quota synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove))
|
||||
// NewAggregate returns nil if errs is 0-length
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
// startMonitors ensures the current set of monitors are running. Any newly
|
||||
// started monitors will also cause shared informers to be started.
|
||||
//
|
||||
// If called before Run, startMonitors does nothing (as there is no stop channel
|
||||
// to support monitor/informer execution).
|
||||
func (qm *QuotaMonitor) startMonitors() {
|
||||
qm.monitorLock.Lock()
|
||||
defer qm.monitorLock.Unlock()
|
||||
|
||||
if qm.stopCh == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// we're waiting until after the informer start that happens once all the controllers are initialized. This ensures
|
||||
// that they don't get unexpected events on their work queues.
|
||||
<-qm.informersStarted
|
||||
|
||||
monitors := qm.monitors
|
||||
started := 0
|
||||
for _, monitor := range monitors {
|
||||
if monitor.stopCh == nil {
|
||||
monitor.stopCh = make(chan struct{})
|
||||
qm.informerFactory.Start(qm.stopCh)
|
||||
go monitor.Run()
|
||||
started++
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("QuotaMonitor started %d new monitors, %d currently running", started, len(monitors))
|
||||
}
|
||||
|
||||
// IsSynced returns true if any monitors exist AND all those monitors'
|
||||
// controllers HasSynced functions return true. This means IsSynced could return
|
||||
// true at one time, and then later return false if all monitors were
|
||||
// reconstructed.
|
||||
func (qm *QuotaMonitor) IsSynced() bool {
|
||||
qm.monitorLock.Lock()
|
||||
defer qm.monitorLock.Unlock()
|
||||
|
||||
if len(qm.monitors) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, monitor := range qm.monitors {
|
||||
if !monitor.controller.HasSynced() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Run sets the stop channel and starts monitor execution until stopCh is
|
||||
// closed. Any running monitors will be stopped before Run returns.
|
||||
func (qm *QuotaMonitor) Run(stopCh <-chan struct{}) {
|
||||
glog.Infof("QuotaMonitor running")
|
||||
defer glog.Infof("QuotaMonitor stopping")
|
||||
|
||||
// Set up the stop channel.
|
||||
qm.monitorLock.Lock()
|
||||
qm.stopCh = stopCh
|
||||
qm.monitorLock.Unlock()
|
||||
|
||||
// Start monitors and begin change processing until the stop channel is
|
||||
// closed.
|
||||
qm.startMonitors()
|
||||
wait.Until(qm.runProcessResourceChanges, 1*time.Second, stopCh)
|
||||
|
||||
// Stop any running monitors.
|
||||
qm.monitorLock.Lock()
|
||||
defer qm.monitorLock.Unlock()
|
||||
monitors := qm.monitors
|
||||
stopped := 0
|
||||
for _, monitor := range monitors {
|
||||
if monitor.stopCh != nil {
|
||||
stopped++
|
||||
close(monitor.stopCh)
|
||||
}
|
||||
}
|
||||
glog.Infof("QuotaMonitor stopped %d of %d monitors", stopped, len(monitors))
|
||||
}
|
||||
|
||||
func (qm *QuotaMonitor) runProcessResourceChanges() {
|
||||
for qm.processResourceChanges() {
|
||||
}
|
||||
}
|
||||
|
||||
// Dequeueing an event from resourceChanges to process
|
||||
func (qm *QuotaMonitor) processResourceChanges() bool {
|
||||
item, quit := qm.resourceChanges.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer qm.resourceChanges.Done(item)
|
||||
event, ok := item.(*event)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("expect a *event, got %v", item))
|
||||
return true
|
||||
}
|
||||
obj := event.obj
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err))
|
||||
return true
|
||||
}
|
||||
glog.V(4).Infof("QuotaMonitor process object: %s, namespace %s, name %s, uid %s, event type %v", event.gvr.String(), accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType)
|
||||
qm.replenishmentFunc(event.gvr.GroupResource(), accessor.GetNamespace())
|
||||
return true
|
||||
}
|
Reference in New Issue
Block a user