mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor files
This commit is contained in:
68
vendor/k8s.io/kubernetes/pkg/controller/service/BUILD
generated
vendored
Normal file
68
vendor/k8s.io/kubernetes/pkg/controller/service/BUILD
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"service_controller.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/service",
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/metrics:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["service_controller_test.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/service",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/cloudprovider/providers/fake:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
10
vendor/k8s.io/kubernetes/pkg/controller/service/OWNERS
generated
vendored
Normal file
10
vendor/k8s.io/kubernetes/pkg/controller/service/OWNERS
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
reviewers:
|
||||
- bowei
|
||||
- MrHohn
|
||||
- thockin
|
||||
- matchstick
|
||||
approvers:
|
||||
- bowei
|
||||
- MrHohn
|
||||
- thockin
|
||||
- matchstick
|
19
vendor/k8s.io/kubernetes/pkg/controller/service/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/controller/service/doc.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package service contains code for syncing cloud load balancers
|
||||
// with the service registry.
|
||||
package service // import "k8s.io/kubernetes/pkg/controller/service"
|
818
vendor/k8s.io/kubernetes/pkg/controller/service/service_controller.go
generated
vendored
Normal file
818
vendor/k8s.io/kubernetes/pkg/controller/service/service_controller.go
generated
vendored
Normal file
@ -0,0 +1,818 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package service
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
// Interval of synchronizing service status from apiserver
|
||||
serviceSyncPeriod = 30 * time.Second
|
||||
// Interval of synchronizing node status from apiserver
|
||||
nodeSyncPeriod = 100 * time.Second
|
||||
|
||||
// How long to wait before retrying the processing of a service change.
|
||||
// If this changes, the sleep in hack/jenkins/e2e.sh before downing a cluster
|
||||
// should be changed appropriately.
|
||||
minRetryDelay = 5 * time.Second
|
||||
maxRetryDelay = 300 * time.Second
|
||||
|
||||
clientRetryCount = 5
|
||||
clientRetryInterval = 5 * time.Second
|
||||
|
||||
retryable = true
|
||||
notRetryable = false
|
||||
|
||||
doNotRetry = time.Duration(0)
|
||||
|
||||
// LabelNodeRoleMaster specifies that a node is a master
|
||||
// It's copied over to kubeadm until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112
|
||||
LabelNodeRoleMaster = "node-role.kubernetes.io/master"
|
||||
|
||||
// LabelNodeRoleExcludeBalancer specifies that the node should be
|
||||
// exclude from load balancers created by a cloud provider.
|
||||
LabelNodeRoleExcludeBalancer = "alpha.service-controller.kubernetes.io/exclude-balancer"
|
||||
)
|
||||
|
||||
type cachedService struct {
|
||||
// The cached state of the service
|
||||
state *v1.Service
|
||||
// Controls error back-off
|
||||
lastRetryDelay time.Duration
|
||||
}
|
||||
|
||||
type serviceCache struct {
|
||||
mu sync.Mutex // protects serviceMap
|
||||
serviceMap map[string]*cachedService
|
||||
}
|
||||
|
||||
type ServiceController struct {
|
||||
cloud cloudprovider.Interface
|
||||
knownHosts []*v1.Node
|
||||
servicesToUpdate []*v1.Service
|
||||
kubeClient clientset.Interface
|
||||
clusterName string
|
||||
balancer cloudprovider.LoadBalancer
|
||||
cache *serviceCache
|
||||
serviceLister corelisters.ServiceLister
|
||||
serviceListerSynced cache.InformerSynced
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
eventRecorder record.EventRecorder
|
||||
nodeLister corelisters.NodeLister
|
||||
nodeListerSynced cache.InformerSynced
|
||||
// services that need to be synced
|
||||
workingQueue workqueue.DelayingInterface
|
||||
}
|
||||
|
||||
// New returns a new service controller to keep cloud provider service resources
|
||||
// (like load balancers) in sync with the registry.
|
||||
func New(
|
||||
cloud cloudprovider.Interface,
|
||||
kubeClient clientset.Interface,
|
||||
serviceInformer coreinformers.ServiceInformer,
|
||||
nodeInformer coreinformers.NodeInformer,
|
||||
clusterName string,
|
||||
) (*ServiceController, error) {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartLogging(glog.Infof)
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "service-controller"})
|
||||
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
if err := metrics.RegisterMetricAndTrackRateLimiterUsage("service_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
s := &ServiceController{
|
||||
cloud: cloud,
|
||||
knownHosts: []*v1.Node{},
|
||||
kubeClient: kubeClient,
|
||||
clusterName: clusterName,
|
||||
cache: &serviceCache{serviceMap: make(map[string]*cachedService)},
|
||||
eventBroadcaster: broadcaster,
|
||||
eventRecorder: recorder,
|
||||
nodeLister: nodeInformer.Lister(),
|
||||
nodeListerSynced: nodeInformer.Informer().HasSynced,
|
||||
workingQueue: workqueue.NewNamedDelayingQueue("service"),
|
||||
}
|
||||
|
||||
serviceInformer.Informer().AddEventHandlerWithResyncPeriod(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: s.enqueueService,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
oldSvc, ok1 := old.(*v1.Service)
|
||||
curSvc, ok2 := cur.(*v1.Service)
|
||||
if ok1 && ok2 && s.needsUpdate(oldSvc, curSvc) {
|
||||
s.enqueueService(cur)
|
||||
}
|
||||
},
|
||||
DeleteFunc: s.enqueueService,
|
||||
},
|
||||
serviceSyncPeriod,
|
||||
)
|
||||
s.serviceLister = serviceInformer.Lister()
|
||||
s.serviceListerSynced = serviceInformer.Informer().HasSynced
|
||||
|
||||
if err := s.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// obj could be an *v1.Service, or a DeletionFinalStateUnknown marker item.
|
||||
func (s *ServiceController) enqueueService(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %#v: %v", obj, err)
|
||||
return
|
||||
}
|
||||
s.workingQueue.Add(key)
|
||||
}
|
||||
|
||||
// Run starts a background goroutine that watches for changes to services that
|
||||
// have (or had) LoadBalancers=true and ensures that they have
|
||||
// load balancers created and deleted appropriately.
|
||||
// serviceSyncPeriod controls how often we check the cluster's services to
|
||||
// ensure that the correct load balancers exist.
|
||||
// nodeSyncPeriod controls how often we check the cluster's nodes to determine
|
||||
// if load balancers need to be updated to point to a new set.
|
||||
//
|
||||
// It's an error to call Run() more than once for a given ServiceController
|
||||
// object.
|
||||
func (s *ServiceController) Run(stopCh <-chan struct{}, workers int) {
|
||||
defer runtime.HandleCrash()
|
||||
defer s.workingQueue.ShutDown()
|
||||
|
||||
glog.Info("Starting service controller")
|
||||
defer glog.Info("Shutting down service controller")
|
||||
|
||||
if !controller.WaitForCacheSync("service", stopCh, s.serviceListerSynced, s.nodeListerSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(s.worker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
go wait.Until(s.nodeSyncLoop, nodeSyncPeriod, stopCh)
|
||||
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
|
||||
// It enforces that the syncHandler is never invoked concurrently with the same key.
|
||||
func (s *ServiceController) worker() {
|
||||
for {
|
||||
func() {
|
||||
key, quit := s.workingQueue.Get()
|
||||
if quit {
|
||||
return
|
||||
}
|
||||
defer s.workingQueue.Done(key)
|
||||
err := s.syncService(key.(string))
|
||||
if err != nil {
|
||||
glog.Errorf("Error syncing service: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ServiceController) init() error {
|
||||
if s.cloud == nil {
|
||||
return fmt.Errorf("WARNING: no cloud provider provided, services of type LoadBalancer will fail")
|
||||
}
|
||||
|
||||
balancer, ok := s.cloud.LoadBalancer()
|
||||
if !ok {
|
||||
return fmt.Errorf("the cloud provider does not support external load balancers")
|
||||
}
|
||||
s.balancer = balancer
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns an error if processing the service update failed, along with a time.Duration
|
||||
// indicating whether processing should be retried; zero means no-retry; otherwise
|
||||
// we should retry in that Duration.
|
||||
func (s *ServiceController) processServiceUpdate(cachedService *cachedService, service *v1.Service, key string) (error, time.Duration) {
|
||||
if cachedService.state != nil {
|
||||
if cachedService.state.UID != service.UID {
|
||||
err, retry := s.processLoadBalancerDelete(cachedService, key)
|
||||
if err != nil {
|
||||
return err, retry
|
||||
}
|
||||
}
|
||||
}
|
||||
// cache the service, we need the info for service deletion
|
||||
cachedService.state = service
|
||||
err, retry := s.createLoadBalancerIfNeeded(key, service)
|
||||
if err != nil {
|
||||
message := "Error creating load balancer"
|
||||
var retryToReturn time.Duration
|
||||
if retry {
|
||||
message += " (will retry): "
|
||||
retryToReturn = cachedService.nextRetryDelay()
|
||||
} else {
|
||||
message += " (will not retry): "
|
||||
retryToReturn = doNotRetry
|
||||
}
|
||||
message += err.Error()
|
||||
s.eventRecorder.Event(service, v1.EventTypeWarning, "CreatingLoadBalancerFailed", message)
|
||||
return err, retryToReturn
|
||||
}
|
||||
// Always update the cache upon success.
|
||||
// NOTE: Since we update the cached service if and only if we successfully
|
||||
// processed it, a cached service being nil implies that it hasn't yet
|
||||
// been successfully processed.
|
||||
s.cache.set(key, cachedService)
|
||||
|
||||
cachedService.resetRetryDelay()
|
||||
return nil, doNotRetry
|
||||
}
|
||||
|
||||
// Returns whatever error occurred along with a boolean indicator of whether it
|
||||
// should be retried.
|
||||
func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.Service) (error, bool) {
|
||||
// Note: It is safe to just call EnsureLoadBalancer. But, on some clouds that requires a delete & create,
|
||||
// which may involve service interruption. Also, we would like user-friendly events.
|
||||
|
||||
// Save the state so we can avoid a write if it doesn't change
|
||||
previousState := v1helper.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer)
|
||||
var newState *v1.LoadBalancerStatus
|
||||
var err error
|
||||
|
||||
if !wantsLoadBalancer(service) {
|
||||
_, exists, err := s.balancer.GetLoadBalancer(s.clusterName, service)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting LB for service %s: %v", key, err), retryable
|
||||
}
|
||||
if exists {
|
||||
glog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", key)
|
||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
|
||||
if err := s.balancer.EnsureLoadBalancerDeleted(s.clusterName, service); err != nil {
|
||||
return err, retryable
|
||||
}
|
||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
|
||||
}
|
||||
|
||||
newState = &v1.LoadBalancerStatus{}
|
||||
} else {
|
||||
glog.V(2).Infof("Ensuring LB for service %s", key)
|
||||
|
||||
// TODO: We could do a dry-run here if wanted to avoid the spurious cloud-calls & events when we restart
|
||||
|
||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "EnsuringLoadBalancer", "Ensuring load balancer")
|
||||
newState, err = s.ensureLoadBalancer(service)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to ensure load balancer for service %s: %v", key, err), retryable
|
||||
}
|
||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "EnsuredLoadBalancer", "Ensured load balancer")
|
||||
}
|
||||
|
||||
// Write the state if changed
|
||||
// TODO: Be careful here ... what if there were other changes to the service?
|
||||
if !v1helper.LoadBalancerStatusEqual(previousState, newState) {
|
||||
// Make a copy so we don't mutate the shared informer cache
|
||||
service = service.DeepCopy()
|
||||
|
||||
// Update the status on the copy
|
||||
service.Status.LoadBalancer = *newState
|
||||
|
||||
if err := s.persistUpdate(service); err != nil {
|
||||
return fmt.Errorf("failed to persist updated status to apiserver, even after retries. Giving up: %v", err), notRetryable
|
||||
}
|
||||
} else {
|
||||
glog.V(2).Infof("Not persisting unchanged LoadBalancerStatus for service %s to registry.", key)
|
||||
}
|
||||
|
||||
return nil, notRetryable
|
||||
}
|
||||
|
||||
func (s *ServiceController) persistUpdate(service *v1.Service) error {
|
||||
var err error
|
||||
for i := 0; i < clientRetryCount; i++ {
|
||||
_, err = s.kubeClient.CoreV1().Services(service.Namespace).UpdateStatus(service)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
// If the object no longer exists, we don't want to recreate it. Just bail
|
||||
// out so that we can process the delete, which we should soon be receiving
|
||||
// if we haven't already.
|
||||
if errors.IsNotFound(err) {
|
||||
glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v",
|
||||
service.Namespace, service.Name, err)
|
||||
return nil
|
||||
}
|
||||
// TODO: Try to resolve the conflict if the change was unrelated to load
|
||||
// balancer status. For now, just pass it up the stack.
|
||||
if errors.IsConflict(err) {
|
||||
return fmt.Errorf("not persisting update to service '%s/%s' that has been changed since we received it: %v",
|
||||
service.Namespace, service.Name, err)
|
||||
}
|
||||
glog.Warningf("Failed to persist updated LoadBalancerStatus to service '%s/%s' after creating its load balancer: %v",
|
||||
service.Namespace, service.Name, err)
|
||||
time.Sleep(clientRetryInterval)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *ServiceController) ensureLoadBalancer(service *v1.Service) (*v1.LoadBalancerStatus, error) {
|
||||
nodes, err := s.nodeLister.ListWithPredicate(getNodeConditionPredicate())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If there are no available nodes for LoadBalancer service, make a EventTypeWarning event for it.
|
||||
if len(nodes) == 0 {
|
||||
s.eventRecorder.Eventf(service, v1.EventTypeWarning, "UnAvailableLoadBalancer", "There are no available nodes for LoadBalancer service %s/%s", service.Namespace, service.Name)
|
||||
}
|
||||
|
||||
// - Only one protocol supported per service
|
||||
// - Not all cloud providers support all protocols and the next step is expected to return
|
||||
// an error for unsupported protocols
|
||||
return s.balancer.EnsureLoadBalancer(s.clusterName, service, nodes)
|
||||
}
|
||||
|
||||
// ListKeys implements the interface required by DeltaFIFO to list the keys we
|
||||
// already know about.
|
||||
func (s *serviceCache) ListKeys() []string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
keys := make([]string, 0, len(s.serviceMap))
|
||||
for k := range s.serviceMap {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// GetByKey returns the value stored in the serviceMap under the given key
|
||||
func (s *serviceCache) GetByKey(key string) (interface{}, bool, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if v, ok := s.serviceMap[key]; ok {
|
||||
return v, true, nil
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// ListKeys implements the interface required by DeltaFIFO to list the keys we
|
||||
// already know about.
|
||||
func (s *serviceCache) allServices() []*v1.Service {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
services := make([]*v1.Service, 0, len(s.serviceMap))
|
||||
for _, v := range s.serviceMap {
|
||||
services = append(services, v.state)
|
||||
}
|
||||
return services
|
||||
}
|
||||
|
||||
func (s *serviceCache) get(serviceName string) (*cachedService, bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
service, ok := s.serviceMap[serviceName]
|
||||
return service, ok
|
||||
}
|
||||
|
||||
func (s *serviceCache) getOrCreate(serviceName string) *cachedService {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
service, ok := s.serviceMap[serviceName]
|
||||
if !ok {
|
||||
service = &cachedService{}
|
||||
s.serviceMap[serviceName] = service
|
||||
}
|
||||
return service
|
||||
}
|
||||
|
||||
func (s *serviceCache) set(serviceName string, service *cachedService) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.serviceMap[serviceName] = service
|
||||
}
|
||||
|
||||
func (s *serviceCache) delete(serviceName string) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
delete(s.serviceMap, serviceName)
|
||||
}
|
||||
|
||||
func (s *ServiceController) needsUpdate(oldService *v1.Service, newService *v1.Service) bool {
|
||||
if !wantsLoadBalancer(oldService) && !wantsLoadBalancer(newService) {
|
||||
return false
|
||||
}
|
||||
if wantsLoadBalancer(oldService) != wantsLoadBalancer(newService) {
|
||||
s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "Type", "%v -> %v",
|
||||
oldService.Spec.Type, newService.Spec.Type)
|
||||
return true
|
||||
}
|
||||
|
||||
if wantsLoadBalancer(newService) && !reflect.DeepEqual(oldService.Spec.LoadBalancerSourceRanges, newService.Spec.LoadBalancerSourceRanges) {
|
||||
s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "LoadBalancerSourceRanges", "%v -> %v",
|
||||
oldService.Spec.LoadBalancerSourceRanges, newService.Spec.LoadBalancerSourceRanges)
|
||||
return true
|
||||
}
|
||||
|
||||
if !portsEqualForLB(oldService, newService) || oldService.Spec.SessionAffinity != newService.Spec.SessionAffinity {
|
||||
return true
|
||||
}
|
||||
if !loadBalancerIPsAreEqual(oldService, newService) {
|
||||
s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "LoadbalancerIP", "%v -> %v",
|
||||
oldService.Spec.LoadBalancerIP, newService.Spec.LoadBalancerIP)
|
||||
return true
|
||||
}
|
||||
if len(oldService.Spec.ExternalIPs) != len(newService.Spec.ExternalIPs) {
|
||||
s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "ExternalIP", "Count: %v -> %v",
|
||||
len(oldService.Spec.ExternalIPs), len(newService.Spec.ExternalIPs))
|
||||
return true
|
||||
}
|
||||
for i := range oldService.Spec.ExternalIPs {
|
||||
if oldService.Spec.ExternalIPs[i] != newService.Spec.ExternalIPs[i] {
|
||||
s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "ExternalIP", "Added: %v",
|
||||
newService.Spec.ExternalIPs[i])
|
||||
return true
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(oldService.Annotations, newService.Annotations) {
|
||||
return true
|
||||
}
|
||||
if oldService.UID != newService.UID {
|
||||
s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "UID", "%v -> %v",
|
||||
oldService.UID, newService.UID)
|
||||
return true
|
||||
}
|
||||
if oldService.Spec.ExternalTrafficPolicy != newService.Spec.ExternalTrafficPolicy {
|
||||
s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "ExternalTrafficPolicy", "%v -> %v",
|
||||
oldService.Spec.ExternalTrafficPolicy, newService.Spec.ExternalTrafficPolicy)
|
||||
return true
|
||||
}
|
||||
if oldService.Spec.HealthCheckNodePort != newService.Spec.HealthCheckNodePort {
|
||||
s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "HealthCheckNodePort", "%v -> %v",
|
||||
oldService.Spec.HealthCheckNodePort, newService.Spec.HealthCheckNodePort)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *ServiceController) loadBalancerName(service *v1.Service) string {
|
||||
return cloudprovider.GetLoadBalancerName(service)
|
||||
}
|
||||
|
||||
func getPortsForLB(service *v1.Service) ([]*v1.ServicePort, error) {
|
||||
var protocol v1.Protocol
|
||||
|
||||
ports := []*v1.ServicePort{}
|
||||
for i := range service.Spec.Ports {
|
||||
sp := &service.Spec.Ports[i]
|
||||
// The check on protocol was removed here. The cloud provider itself is now responsible for all protocol validation
|
||||
ports = append(ports, sp)
|
||||
if protocol == "" {
|
||||
protocol = sp.Protocol
|
||||
} else if protocol != sp.Protocol && wantsLoadBalancer(service) {
|
||||
// TODO: Convert error messages to use event recorder
|
||||
return nil, fmt.Errorf("mixed protocol external load balancers are not supported")
|
||||
}
|
||||
}
|
||||
return ports, nil
|
||||
}
|
||||
|
||||
func portsEqualForLB(x, y *v1.Service) bool {
|
||||
xPorts, err := getPortsForLB(x)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
yPorts, err := getPortsForLB(y)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return portSlicesEqualForLB(xPorts, yPorts)
|
||||
}
|
||||
|
||||
func portSlicesEqualForLB(x, y []*v1.ServicePort) bool {
|
||||
if len(x) != len(y) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range x {
|
||||
if !portEqualForLB(x[i], y[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func portEqualForLB(x, y *v1.ServicePort) bool {
|
||||
// TODO: Should we check name? (In theory, an LB could expose it)
|
||||
if x.Name != y.Name {
|
||||
return false
|
||||
}
|
||||
|
||||
if x.Protocol != y.Protocol {
|
||||
return false
|
||||
}
|
||||
|
||||
if x.Port != y.Port {
|
||||
return false
|
||||
}
|
||||
|
||||
if x.NodePort != y.NodePort {
|
||||
return false
|
||||
}
|
||||
|
||||
// We don't check TargetPort; that is not relevant for load balancing
|
||||
// TODO: Should we blank it out? Or just check it anyway?
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func nodeNames(nodes []*v1.Node) []string {
|
||||
ret := make([]string, len(nodes))
|
||||
for i, node := range nodes {
|
||||
ret[i] = node.Name
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func nodeSlicesEqualForLB(x, y []*v1.Node) bool {
|
||||
if len(x) != len(y) {
|
||||
return false
|
||||
}
|
||||
return stringSlicesEqual(nodeNames(x), nodeNames(y))
|
||||
}
|
||||
|
||||
func stringSlicesEqual(x, y []string) bool {
|
||||
if len(x) != len(y) {
|
||||
return false
|
||||
}
|
||||
if !sort.StringsAreSorted(x) {
|
||||
sort.Strings(x)
|
||||
}
|
||||
if !sort.StringsAreSorted(y) {
|
||||
sort.Strings(y)
|
||||
}
|
||||
for i := range x {
|
||||
if x[i] != y[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func getNodeConditionPredicate() corelisters.NodeConditionPredicate {
|
||||
return func(node *v1.Node) bool {
|
||||
// We add the master to the node list, but its unschedulable. So we use this to filter
|
||||
// the master.
|
||||
if node.Spec.Unschedulable {
|
||||
return false
|
||||
}
|
||||
|
||||
// As of 1.6, we will taint the master, but not necessarily mark it unschedulable.
|
||||
// Recognize nodes labeled as master, and filter them also, as we were doing previously.
|
||||
if _, hasMasterRoleLabel := node.Labels[LabelNodeRoleMaster]; hasMasterRoleLabel {
|
||||
return false
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.ServiceNodeExclusion) {
|
||||
if _, hasExcludeBalancerLabel := node.Labels[LabelNodeRoleExcludeBalancer]; hasExcludeBalancerLabel {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// If we have no info, don't accept
|
||||
if len(node.Status.Conditions) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, cond := range node.Status.Conditions {
|
||||
// We consider the node for load balancing only when its NodeReady condition status
|
||||
// is ConditionTrue
|
||||
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
|
||||
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// nodeSyncLoop handles updating the hosts pointed to by all load
|
||||
// balancers whenever the set of nodes in the cluster changes.
|
||||
func (s *ServiceController) nodeSyncLoop() {
|
||||
newHosts, err := s.nodeLister.ListWithPredicate(getNodeConditionPredicate())
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve current set of nodes from node lister: %v", err)
|
||||
return
|
||||
}
|
||||
if nodeSlicesEqualForLB(newHosts, s.knownHosts) {
|
||||
// The set of nodes in the cluster hasn't changed, but we can retry
|
||||
// updating any services that we failed to update last time around.
|
||||
s.servicesToUpdate = s.updateLoadBalancerHosts(s.servicesToUpdate, newHosts)
|
||||
return
|
||||
}
|
||||
|
||||
glog.Infof("Detected change in list of current cluster nodes. New node set: %v",
|
||||
nodeNames(newHosts))
|
||||
|
||||
// Try updating all services, and save the ones that fail to try again next
|
||||
// round.
|
||||
s.servicesToUpdate = s.cache.allServices()
|
||||
numServices := len(s.servicesToUpdate)
|
||||
s.servicesToUpdate = s.updateLoadBalancerHosts(s.servicesToUpdate, newHosts)
|
||||
glog.Infof("Successfully updated %d out of %d load balancers to direct traffic to the updated set of nodes",
|
||||
numServices-len(s.servicesToUpdate), numServices)
|
||||
|
||||
s.knownHosts = newHosts
|
||||
}
|
||||
|
||||
// updateLoadBalancerHosts updates all existing load balancers so that
|
||||
// they will match the list of hosts provided.
|
||||
// Returns the list of services that couldn't be updated.
|
||||
func (s *ServiceController) updateLoadBalancerHosts(services []*v1.Service, hosts []*v1.Node) (servicesToRetry []*v1.Service) {
|
||||
for _, service := range services {
|
||||
func() {
|
||||
if service == nil {
|
||||
return
|
||||
}
|
||||
if err := s.lockedUpdateLoadBalancerHosts(service, hosts); err != nil {
|
||||
glog.Errorf("External error while updating load balancer: %v.", err)
|
||||
servicesToRetry = append(servicesToRetry, service)
|
||||
}
|
||||
}()
|
||||
}
|
||||
return servicesToRetry
|
||||
}
|
||||
|
||||
// Updates the load balancer of a service, assuming we hold the mutex
|
||||
// associated with the service.
|
||||
func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *v1.Service, hosts []*v1.Node) error {
|
||||
if !wantsLoadBalancer(service) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This operation doesn't normally take very long (and happens pretty often), so we only record the final event
|
||||
err := s.balancer.UpdateLoadBalancer(s.clusterName, service, hosts)
|
||||
if err == nil {
|
||||
// If there are no available nodes for LoadBalancer service, make a EventTypeWarning event for it.
|
||||
if len(hosts) == 0 {
|
||||
s.eventRecorder.Eventf(service, v1.EventTypeWarning, "UnAvailableLoadBalancer", "There are no available nodes for LoadBalancer service %s/%s", service.Namespace, service.Name)
|
||||
} else {
|
||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "UpdatedLoadBalancer", "Updated load balancer with new hosts")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// It's only an actual error if the load balancer still exists.
|
||||
if _, exists, err := s.balancer.GetLoadBalancer(s.clusterName, service); err != nil {
|
||||
glog.Errorf("External error while checking if load balancer %q exists: name, %v", cloudprovider.GetLoadBalancerName(service), err)
|
||||
} else if !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.eventRecorder.Eventf(service, v1.EventTypeWarning, "LoadBalancerUpdateFailed", "Error updating load balancer with new hosts %v: %v", nodeNames(hosts), err)
|
||||
return err
|
||||
}
|
||||
|
||||
func wantsLoadBalancer(service *v1.Service) bool {
|
||||
return service.Spec.Type == v1.ServiceTypeLoadBalancer
|
||||
}
|
||||
|
||||
func loadBalancerIPsAreEqual(oldService, newService *v1.Service) bool {
|
||||
return oldService.Spec.LoadBalancerIP == newService.Spec.LoadBalancerIP
|
||||
}
|
||||
|
||||
// Computes the next retry, using exponential backoff
|
||||
// mutex must be held.
|
||||
func (s *cachedService) nextRetryDelay() time.Duration {
|
||||
s.lastRetryDelay = s.lastRetryDelay * 2
|
||||
if s.lastRetryDelay < minRetryDelay {
|
||||
s.lastRetryDelay = minRetryDelay
|
||||
}
|
||||
if s.lastRetryDelay > maxRetryDelay {
|
||||
s.lastRetryDelay = maxRetryDelay
|
||||
}
|
||||
return s.lastRetryDelay
|
||||
}
|
||||
|
||||
// Resets the retry exponential backoff. mutex must be held.
|
||||
func (s *cachedService) resetRetryDelay() {
|
||||
s.lastRetryDelay = time.Duration(0)
|
||||
}
|
||||
|
||||
// syncService will sync the Service with the given key if it has had its expectations fulfilled,
|
||||
// meaning it did not expect to see any more of its pods created or deleted. This function is not meant to be
|
||||
// invoked concurrently with the same key.
|
||||
func (s *ServiceController) syncService(key string) error {
|
||||
startTime := time.Now()
|
||||
var cachedService *cachedService
|
||||
var retryDelay time.Duration
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing service %q (%v)", key, time.Now().Sub(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// service holds the latest service info from apiserver
|
||||
service, err := s.serviceLister.Services(namespace).Get(name)
|
||||
switch {
|
||||
case errors.IsNotFound(err):
|
||||
// service absence in store means watcher caught the deletion, ensure LB info is cleaned
|
||||
glog.Infof("Service has been deleted %v", key)
|
||||
err, retryDelay = s.processServiceDeletion(key)
|
||||
case err != nil:
|
||||
glog.Infof("Unable to retrieve service %v from store: %v", key, err)
|
||||
s.workingQueue.Add(key)
|
||||
return err
|
||||
default:
|
||||
cachedService = s.cache.getOrCreate(key)
|
||||
err, retryDelay = s.processServiceUpdate(cachedService, service, key)
|
||||
}
|
||||
|
||||
if retryDelay != 0 {
|
||||
// Add the failed service back to the queue so we'll retry it.
|
||||
glog.Errorf("Failed to process service %v. Retrying in %s: %v", key, retryDelay, err)
|
||||
go func(obj interface{}, delay time.Duration) {
|
||||
// put back the service key to working queue, it is possible that more entries of the service
|
||||
// were added into the queue during the delay, but it does not mess as when handling the retry,
|
||||
// it always get the last service info from service store
|
||||
s.workingQueue.AddAfter(obj, delay)
|
||||
}(key, retryDelay)
|
||||
} else if err != nil {
|
||||
runtime.HandleError(fmt.Errorf("failed to process service %v. Not retrying: %v", key, err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns an error if processing the service deletion failed, along with a time.Duration
|
||||
// indicating whether processing should be retried; zero means no-retry; otherwise
|
||||
// we should retry after that Duration.
|
||||
func (s *ServiceController) processServiceDeletion(key string) (error, time.Duration) {
|
||||
cachedService, ok := s.cache.get(key)
|
||||
if !ok {
|
||||
return fmt.Errorf("service %s not in cache even though the watcher thought it was. Ignoring the deletion", key), doNotRetry
|
||||
}
|
||||
return s.processLoadBalancerDelete(cachedService, key)
|
||||
}
|
||||
|
||||
func (s *ServiceController) processLoadBalancerDelete(cachedService *cachedService, key string) (error, time.Duration) {
|
||||
service := cachedService.state
|
||||
// delete load balancer info only if the service type is LoadBalancer
|
||||
if !wantsLoadBalancer(service) {
|
||||
return nil, doNotRetry
|
||||
}
|
||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
|
||||
err := s.balancer.EnsureLoadBalancerDeleted(s.clusterName, service)
|
||||
if err != nil {
|
||||
message := "Error deleting load balancer (will retry): " + err.Error()
|
||||
s.eventRecorder.Event(service, v1.EventTypeWarning, "DeletingLoadBalancerFailed", message)
|
||||
return err, cachedService.nextRetryDelay()
|
||||
}
|
||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
|
||||
s.cache.delete(key)
|
||||
|
||||
cachedService.resetRetryDelay()
|
||||
return nil, doNotRetry
|
||||
}
|
823
vendor/k8s.io/kubernetes/pkg/controller/service/service_controller_test.go
generated
vendored
Normal file
823
vendor/k8s.io/kubernetes/pkg/controller/service/service_controller_test.go
generated
vendored
Normal file
@ -0,0 +1,823 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package service
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
)
|
||||
|
||||
const region = "us-central"
|
||||
|
||||
func newService(name string, uid types.UID, serviceType v1.ServiceType) *v1.Service {
|
||||
return &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "default", UID: uid, SelfLink: testapi.Default.SelfLink("services", name)}, Spec: v1.ServiceSpec{Type: serviceType}}
|
||||
}
|
||||
|
||||
//Wrap newService so that you dont have to call default argumetns again and again.
|
||||
func defaultExternalService() *v1.Service {
|
||||
|
||||
return newService("external-balancer", types.UID("123"), v1.ServiceTypeLoadBalancer)
|
||||
|
||||
}
|
||||
|
||||
func alwaysReady() bool { return true }
|
||||
|
||||
func newController() (*ServiceController, *fakecloud.FakeCloud, *fake.Clientset) {
|
||||
cloud := &fakecloud.FakeCloud{}
|
||||
cloud.Region = region
|
||||
|
||||
client := fake.NewSimpleClientset()
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||
serviceInformer := informerFactory.Core().V1().Services()
|
||||
nodeInformer := informerFactory.Core().V1().Nodes()
|
||||
|
||||
controller, _ := New(cloud, client, serviceInformer, nodeInformer, "test-cluster")
|
||||
controller.nodeListerSynced = alwaysReady
|
||||
controller.serviceListerSynced = alwaysReady
|
||||
controller.eventRecorder = record.NewFakeRecorder(100)
|
||||
|
||||
controller.init()
|
||||
cloud.Calls = nil // ignore any cloud calls made in init()
|
||||
client.ClearActions() // ignore any client calls made in init()
|
||||
|
||||
return controller, cloud, client
|
||||
}
|
||||
|
||||
func TestCreateExternalLoadBalancer(t *testing.T) {
|
||||
table := []struct {
|
||||
service *v1.Service
|
||||
expectErr bool
|
||||
expectCreateAttempt bool
|
||||
}{
|
||||
{
|
||||
service: &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "no-external-balancer",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
},
|
||||
},
|
||||
expectErr: false,
|
||||
expectCreateAttempt: false,
|
||||
},
|
||||
{
|
||||
service: &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "udp-service",
|
||||
Namespace: "default",
|
||||
SelfLink: testapi.Default.SelfLink("services", "udp-service"),
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: 80,
|
||||
Protocol: v1.ProtocolUDP,
|
||||
}},
|
||||
Type: v1.ServiceTypeLoadBalancer,
|
||||
},
|
||||
},
|
||||
expectErr: false,
|
||||
expectCreateAttempt: true,
|
||||
},
|
||||
{
|
||||
service: &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "basic-service1",
|
||||
Namespace: "default",
|
||||
SelfLink: testapi.Default.SelfLink("services", "basic-service1"),
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: 80,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}},
|
||||
Type: v1.ServiceTypeLoadBalancer,
|
||||
},
|
||||
},
|
||||
expectErr: false,
|
||||
expectCreateAttempt: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
controller, cloud, client := newController()
|
||||
err, _ := controller.createLoadBalancerIfNeeded("foo/bar", item.service)
|
||||
if !item.expectErr && err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
} else if item.expectErr && err == nil {
|
||||
t.Errorf("expected error creating %v, got nil", item.service)
|
||||
}
|
||||
actions := client.Actions()
|
||||
if !item.expectCreateAttempt {
|
||||
if len(cloud.Calls) > 0 {
|
||||
t.Errorf("unexpected cloud provider calls: %v", cloud.Calls)
|
||||
}
|
||||
if len(actions) > 0 {
|
||||
t.Errorf("unexpected client actions: %v", actions)
|
||||
}
|
||||
} else {
|
||||
var balancer *fakecloud.FakeBalancer
|
||||
for k := range cloud.Balancers {
|
||||
if balancer == nil {
|
||||
b := cloud.Balancers[k]
|
||||
balancer = &b
|
||||
} else {
|
||||
t.Errorf("expected one load balancer to be created, got %v", cloud.Balancers)
|
||||
break
|
||||
}
|
||||
}
|
||||
if balancer == nil {
|
||||
t.Errorf("expected one load balancer to be created, got none")
|
||||
} else if balancer.Name != controller.loadBalancerName(item.service) ||
|
||||
balancer.Region != region ||
|
||||
balancer.Ports[0].Port != item.service.Spec.Ports[0].Port {
|
||||
t.Errorf("created load balancer has incorrect parameters: %v", balancer)
|
||||
}
|
||||
actionFound := false
|
||||
for _, action := range actions {
|
||||
if action.GetVerb() == "update" && action.GetResource().Resource == "services" {
|
||||
actionFound = true
|
||||
}
|
||||
}
|
||||
if !actionFound {
|
||||
t.Errorf("expected updated service to be sent to client, got these actions instead: %v", actions)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Finish converting and update comments
|
||||
func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
|
||||
nodes := []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node0"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node1"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "node73"}},
|
||||
}
|
||||
table := []struct {
|
||||
services []*v1.Service
|
||||
expectedUpdateCalls []fakecloud.FakeUpdateBalancerCall
|
||||
}{
|
||||
{
|
||||
// No services present: no calls should be made.
|
||||
services: []*v1.Service{},
|
||||
expectedUpdateCalls: nil,
|
||||
},
|
||||
{
|
||||
// Services do not have external load balancers: no calls should be made.
|
||||
services: []*v1.Service{
|
||||
newService("s0", "111", v1.ServiceTypeClusterIP),
|
||||
newService("s1", "222", v1.ServiceTypeNodePort),
|
||||
},
|
||||
expectedUpdateCalls: nil,
|
||||
},
|
||||
{
|
||||
// Services does have an external load balancer: one call should be made.
|
||||
services: []*v1.Service{
|
||||
newService("s0", "333", v1.ServiceTypeLoadBalancer),
|
||||
},
|
||||
expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{
|
||||
{Service: newService("s0", "333", v1.ServiceTypeLoadBalancer), Hosts: nodes},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Three services have an external load balancer: three calls.
|
||||
services: []*v1.Service{
|
||||
newService("s0", "444", v1.ServiceTypeLoadBalancer),
|
||||
newService("s1", "555", v1.ServiceTypeLoadBalancer),
|
||||
newService("s2", "666", v1.ServiceTypeLoadBalancer),
|
||||
},
|
||||
expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{
|
||||
{Service: newService("s0", "444", v1.ServiceTypeLoadBalancer), Hosts: nodes},
|
||||
{Service: newService("s1", "555", v1.ServiceTypeLoadBalancer), Hosts: nodes},
|
||||
{Service: newService("s2", "666", v1.ServiceTypeLoadBalancer), Hosts: nodes},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Two services have an external load balancer and two don't: two calls.
|
||||
services: []*v1.Service{
|
||||
newService("s0", "777", v1.ServiceTypeNodePort),
|
||||
newService("s1", "888", v1.ServiceTypeLoadBalancer),
|
||||
newService("s3", "999", v1.ServiceTypeLoadBalancer),
|
||||
newService("s4", "123", v1.ServiceTypeClusterIP),
|
||||
},
|
||||
expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{
|
||||
{Service: newService("s1", "888", v1.ServiceTypeLoadBalancer), Hosts: nodes},
|
||||
{Service: newService("s3", "999", v1.ServiceTypeLoadBalancer), Hosts: nodes},
|
||||
},
|
||||
},
|
||||
{
|
||||
// One service has an external load balancer and one is nil: one call.
|
||||
services: []*v1.Service{
|
||||
newService("s0", "234", v1.ServiceTypeLoadBalancer),
|
||||
nil,
|
||||
},
|
||||
expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{
|
||||
{Service: newService("s0", "234", v1.ServiceTypeLoadBalancer), Hosts: nodes},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, item := range table {
|
||||
controller, cloud, _ := newController()
|
||||
|
||||
var services []*v1.Service
|
||||
for _, service := range item.services {
|
||||
services = append(services, service)
|
||||
}
|
||||
if err := controller.updateLoadBalancerHosts(services, nodes); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(item.expectedUpdateCalls, cloud.UpdateCalls) {
|
||||
t.Errorf("expected update calls mismatch, expected %+v, got %+v", item.expectedUpdateCalls, cloud.UpdateCalls)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNodeConditionPredicate(t *testing.T) {
|
||||
tests := []struct {
|
||||
node v1.Node
|
||||
expectAccept bool
|
||||
name string
|
||||
}{
|
||||
{
|
||||
node: v1.Node{},
|
||||
expectAccept: false,
|
||||
name: "empty",
|
||||
},
|
||||
{
|
||||
node: v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionTrue},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectAccept: true,
|
||||
name: "basic",
|
||||
},
|
||||
{
|
||||
node: v1.Node{
|
||||
Spec: v1.NodeSpec{Unschedulable: true},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionTrue},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectAccept: false,
|
||||
name: "unschedulable",
|
||||
},
|
||||
}
|
||||
pred := getNodeConditionPredicate()
|
||||
for _, test := range tests {
|
||||
accept := pred(&test.node)
|
||||
if accept != test.expectAccept {
|
||||
t.Errorf("Test failed for %s, expected %v, saw %v", test.name, test.expectAccept, accept)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(a-robinson): Add tests for update/sync/delete.
|
||||
|
||||
func TestProcessServiceUpdate(t *testing.T) {
|
||||
|
||||
var controller *ServiceController
|
||||
var cloud *fakecloud.FakeCloud
|
||||
|
||||
//A pair of old and new loadbalancer IP address
|
||||
oldLBIP := "192.168.1.1"
|
||||
newLBIP := "192.168.1.11"
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
key string
|
||||
updateFn func(*v1.Service) *v1.Service //Manipulate the structure
|
||||
svc *v1.Service
|
||||
expectedFn func(*v1.Service, error, time.Duration) error //Error comparision function
|
||||
}{
|
||||
{
|
||||
testName: "If updating a valid service",
|
||||
key: "validKey",
|
||||
svc: defaultExternalService(),
|
||||
updateFn: func(svc *v1.Service) *v1.Service {
|
||||
|
||||
controller, cloud, _ = newController()
|
||||
controller.cache.getOrCreate("validKey")
|
||||
return svc
|
||||
|
||||
},
|
||||
expectedFn: func(svc *v1.Service, err error, retryDuration time.Duration) error {
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if retryDuration != doNotRetry {
|
||||
return fmt.Errorf("retryDuration Expected=%v Obtained=%v", doNotRetry, retryDuration)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "If Updating Loadbalancer IP",
|
||||
key: "default/sync-test-name",
|
||||
svc: newService("sync-test-name", types.UID("sync-test-uid"), v1.ServiceTypeLoadBalancer),
|
||||
updateFn: func(svc *v1.Service) *v1.Service {
|
||||
|
||||
svc.Spec.LoadBalancerIP = oldLBIP
|
||||
|
||||
keyExpected := svc.GetObjectMeta().GetNamespace() + "/" + svc.GetObjectMeta().GetName()
|
||||
controller.enqueueService(svc)
|
||||
cachedServiceTest := controller.cache.getOrCreate(keyExpected)
|
||||
cachedServiceTest.state = svc
|
||||
controller.cache.set(keyExpected, cachedServiceTest)
|
||||
|
||||
keyGot, quit := controller.workingQueue.Get()
|
||||
if quit {
|
||||
t.Fatalf("get no workingQueue element")
|
||||
}
|
||||
if keyExpected != keyGot.(string) {
|
||||
t.Fatalf("get service key error, expected: %s, got: %s", keyExpected, keyGot.(string))
|
||||
}
|
||||
|
||||
newService := svc.DeepCopy()
|
||||
|
||||
newService.Spec.LoadBalancerIP = newLBIP
|
||||
return newService
|
||||
|
||||
},
|
||||
expectedFn: func(svc *v1.Service, err error, retryDuration time.Duration) error {
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if retryDuration != doNotRetry {
|
||||
return fmt.Errorf("retryDuration Expected=%v Obtained=%v", doNotRetry, retryDuration)
|
||||
}
|
||||
|
||||
keyExpected := svc.GetObjectMeta().GetNamespace() + "/" + svc.GetObjectMeta().GetName()
|
||||
|
||||
cachedServiceGot, exist := controller.cache.get(keyExpected)
|
||||
if !exist {
|
||||
return fmt.Errorf("update service error, workingQueue should contain service: %s", keyExpected)
|
||||
}
|
||||
if cachedServiceGot.state.Spec.LoadBalancerIP != newLBIP {
|
||||
return fmt.Errorf("update LoadBalancerIP error, expected: %s, got: %s", newLBIP, cachedServiceGot.state.Spec.LoadBalancerIP)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
newSvc := tc.updateFn(tc.svc)
|
||||
svcCache := controller.cache.getOrCreate(tc.key)
|
||||
obtErr, retryDuration := controller.processServiceUpdate(svcCache, newSvc, tc.key)
|
||||
if err := tc.expectedFn(newSvc, obtErr, retryDuration); err != nil {
|
||||
t.Errorf("%v processServiceUpdate() %v", tc.testName, err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSyncService(t *testing.T) {
|
||||
|
||||
var controller *ServiceController
|
||||
var cloud *fakecloud.FakeCloud
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
key string
|
||||
updateFn func() //Function to manipulate the controller element to simulate error
|
||||
expectedFn func(error) error //Expected function if returns nil then test passed, failed otherwise
|
||||
}{
|
||||
{
|
||||
testName: "if an invalid service name is synced",
|
||||
key: "invalid/key/string",
|
||||
updateFn: func() {
|
||||
controller, cloud, _ = newController()
|
||||
|
||||
},
|
||||
expectedFn: func(e error) error {
|
||||
//TODO: Expected error is of the format fmt.Errorf("unexpected key format: %q", "invalid/key/string"),
|
||||
//TODO: should find a way to test for dependent package errors in such a way that it wont break
|
||||
//TODO: our tests, currently we only test if there is an error.
|
||||
//Error should be non-nil
|
||||
if e == nil {
|
||||
return fmt.Errorf("Expected=unexpected key format: %q, Obtained=nil", "invalid/key/string")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
/* We cannot open this test case as syncService(key) currently runtime.HandleError(err) and suppresses frequently occurring errors
|
||||
{
|
||||
testName: "if an invalid service is synced",
|
||||
key: "somethingelse",
|
||||
updateFn: func() {
|
||||
controller, cloud, _ = newController()
|
||||
srv := controller.cache.getOrCreate("external-balancer")
|
||||
srv.state = defaultExternalService()
|
||||
},
|
||||
expectedErr: fmt.Errorf("Service somethingelse not in cache even though the watcher thought it was. Ignoring the deletion."),
|
||||
},
|
||||
*/
|
||||
|
||||
//TODO: see if we can add a test for valid but error throwing service, its difficult right now because synCService() currently runtime.HandleError
|
||||
{
|
||||
testName: "if valid service",
|
||||
key: "external-balancer",
|
||||
updateFn: func() {
|
||||
testSvc := defaultExternalService()
|
||||
controller, cloud, _ = newController()
|
||||
controller.enqueueService(testSvc)
|
||||
svc := controller.cache.getOrCreate("external-balancer")
|
||||
svc.state = testSvc
|
||||
},
|
||||
expectedFn: func(e error) error {
|
||||
//error should be nil
|
||||
if e != nil {
|
||||
return fmt.Errorf("Expected=nil, Obtained=%v", e)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
||||
tc.updateFn()
|
||||
obtainedErr := controller.syncService(tc.key)
|
||||
|
||||
//expected matches obtained ??.
|
||||
if exp := tc.expectedFn(obtainedErr); exp != nil {
|
||||
t.Errorf("%v Error:%v", tc.testName, exp)
|
||||
}
|
||||
|
||||
//Post processing, the element should not be in the sync queue.
|
||||
_, exist := controller.cache.get(tc.key)
|
||||
if exist {
|
||||
t.Fatalf("%v working Queue should be empty, but contains %s", tc.testName, tc.key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessServiceDeletion(t *testing.T) {
|
||||
|
||||
var controller *ServiceController
|
||||
var cloud *fakecloud.FakeCloud
|
||||
//Add a global svcKey name
|
||||
svcKey := "external-balancer"
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
updateFn func(*ServiceController) //Update function used to manupulate srv and controller values
|
||||
expectedFn func(svcErr error, retryDuration time.Duration) error //Function to check if the returned value is expected
|
||||
}{
|
||||
{
|
||||
testName: "If an non-existant service is deleted",
|
||||
updateFn: func(controller *ServiceController) {
|
||||
//Does not do anything
|
||||
},
|
||||
expectedFn: func(svcErr error, retryDuration time.Duration) error {
|
||||
|
||||
expectedError := "service external-balancer not in cache even though the watcher thought it was. Ignoring the deletion"
|
||||
if svcErr == nil || svcErr.Error() != expectedError {
|
||||
//cannot be nil or Wrong error message
|
||||
return fmt.Errorf("Expected=%v Obtained=%v", expectedError, svcErr)
|
||||
}
|
||||
|
||||
if retryDuration != doNotRetry {
|
||||
//Retry duration should match
|
||||
return fmt.Errorf("RetryDuration Expected=%v Obtained=%v", doNotRetry, retryDuration)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "If cloudprovided failed to delete the service",
|
||||
updateFn: func(controller *ServiceController) {
|
||||
|
||||
svc := controller.cache.getOrCreate(svcKey)
|
||||
svc.state = defaultExternalService()
|
||||
cloud.Err = fmt.Errorf("Error Deleting the Loadbalancer")
|
||||
|
||||
},
|
||||
expectedFn: func(svcErr error, retryDuration time.Duration) error {
|
||||
|
||||
expectedError := "Error Deleting the Loadbalancer"
|
||||
|
||||
if svcErr == nil || svcErr.Error() != expectedError {
|
||||
return fmt.Errorf("Expected=%v Obtained=%v", expectedError, svcErr)
|
||||
}
|
||||
|
||||
if retryDuration != minRetryDelay {
|
||||
return fmt.Errorf("RetryDuration Expected=%v Obtained=%v", minRetryDelay, retryDuration)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "If delete was successful",
|
||||
updateFn: func(controller *ServiceController) {
|
||||
|
||||
testSvc := defaultExternalService()
|
||||
controller.enqueueService(testSvc)
|
||||
svc := controller.cache.getOrCreate(svcKey)
|
||||
svc.state = testSvc
|
||||
controller.cache.set(svcKey, svc)
|
||||
|
||||
},
|
||||
expectedFn: func(svcErr error, retryDuration time.Duration) error {
|
||||
|
||||
if svcErr != nil {
|
||||
return fmt.Errorf("Expected=nil Obtained=%v", svcErr)
|
||||
}
|
||||
|
||||
if retryDuration != doNotRetry {
|
||||
//Retry duration should match
|
||||
return fmt.Errorf("RetryDuration Expected=%v Obtained=%v", doNotRetry, retryDuration)
|
||||
}
|
||||
|
||||
//It should no longer be in the workqueue.
|
||||
_, exist := controller.cache.get(svcKey)
|
||||
if exist {
|
||||
return fmt.Errorf("delete service error, workingQueue should not contain service: %s any more", svcKey)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
//Create a new controller.
|
||||
controller, cloud, _ = newController()
|
||||
tc.updateFn(controller)
|
||||
obtainedErr, retryDuration := controller.processServiceDeletion(svcKey)
|
||||
if err := tc.expectedFn(obtainedErr, retryDuration); err != nil {
|
||||
t.Errorf("%v processServiceDeletion() %v", tc.testName, err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestDoesExternalLoadBalancerNeedsUpdate(t *testing.T) {
|
||||
|
||||
var oldSvc, newSvc *v1.Service
|
||||
|
||||
testCases := []struct {
|
||||
testName string //Name of the test case
|
||||
updateFn func() //Function to update the service object
|
||||
expectedNeedsUpdate bool //needsupdate always returns bool
|
||||
|
||||
}{
|
||||
{
|
||||
testName: "If the service type is changed from LoadBalancer to ClusterIP",
|
||||
updateFn: func() {
|
||||
oldSvc = defaultExternalService()
|
||||
newSvc = defaultExternalService()
|
||||
newSvc.Spec.Type = v1.ServiceTypeClusterIP
|
||||
},
|
||||
expectedNeedsUpdate: true,
|
||||
},
|
||||
{
|
||||
testName: "If the Ports are different",
|
||||
updateFn: func() {
|
||||
oldSvc = defaultExternalService()
|
||||
newSvc = defaultExternalService()
|
||||
oldSvc.Spec.Ports = []v1.ServicePort{
|
||||
{
|
||||
Port: 8000,
|
||||
},
|
||||
{
|
||||
Port: 9000,
|
||||
},
|
||||
{
|
||||
Port: 10000,
|
||||
},
|
||||
}
|
||||
newSvc.Spec.Ports = []v1.ServicePort{
|
||||
{
|
||||
Port: 8001,
|
||||
},
|
||||
{
|
||||
Port: 9001,
|
||||
},
|
||||
{
|
||||
Port: 10001,
|
||||
},
|
||||
}
|
||||
|
||||
},
|
||||
expectedNeedsUpdate: true,
|
||||
},
|
||||
{
|
||||
testName: "If externel ip counts are different",
|
||||
updateFn: func() {
|
||||
oldSvc = defaultExternalService()
|
||||
newSvc = defaultExternalService()
|
||||
oldSvc.Spec.ExternalIPs = []string{"old.IP.1"}
|
||||
newSvc.Spec.ExternalIPs = []string{"new.IP.1", "new.IP.2"}
|
||||
},
|
||||
expectedNeedsUpdate: true,
|
||||
},
|
||||
{
|
||||
testName: "If externel ips are different",
|
||||
updateFn: func() {
|
||||
oldSvc = defaultExternalService()
|
||||
newSvc = defaultExternalService()
|
||||
oldSvc.Spec.ExternalIPs = []string{"old.IP.1", "old.IP.2"}
|
||||
newSvc.Spec.ExternalIPs = []string{"new.IP.1", "new.IP.2"}
|
||||
},
|
||||
expectedNeedsUpdate: true,
|
||||
},
|
||||
{
|
||||
testName: "If UID is different",
|
||||
updateFn: func() {
|
||||
oldSvc = defaultExternalService()
|
||||
newSvc = defaultExternalService()
|
||||
oldSvc.UID = types.UID("UID old")
|
||||
newSvc.UID = types.UID("UID new")
|
||||
},
|
||||
expectedNeedsUpdate: true,
|
||||
},
|
||||
{
|
||||
testName: "If ExternalTrafficPolicy is different",
|
||||
updateFn: func() {
|
||||
oldSvc = defaultExternalService()
|
||||
newSvc = defaultExternalService()
|
||||
newSvc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
||||
},
|
||||
expectedNeedsUpdate: true,
|
||||
},
|
||||
{
|
||||
testName: "If HealthCheckNodePort is different",
|
||||
updateFn: func() {
|
||||
oldSvc = defaultExternalService()
|
||||
newSvc = defaultExternalService()
|
||||
newSvc.Spec.HealthCheckNodePort = 30123
|
||||
},
|
||||
expectedNeedsUpdate: true,
|
||||
},
|
||||
}
|
||||
|
||||
controller, _, _ := newController()
|
||||
for _, tc := range testCases {
|
||||
tc.updateFn()
|
||||
obtainedResult := controller.needsUpdate(oldSvc, newSvc)
|
||||
if obtainedResult != tc.expectedNeedsUpdate {
|
||||
t.Errorf("%v needsUpdate() should have returned %v but returned %v", tc.testName, tc.expectedNeedsUpdate, obtainedResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//All the testcases for ServiceCache uses a single cache, these below test cases should be run in order,
|
||||
//as tc1 (addCache would add elements to the cache)
|
||||
//and tc2 (delCache would remove element from the cache without it adding automatically)
|
||||
//Please keep this in mind while adding new test cases.
|
||||
func TestServiceCache(t *testing.T) {
|
||||
|
||||
//ServiceCache a common service cache for all the test cases
|
||||
sc := &serviceCache{serviceMap: make(map[string]*cachedService)}
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
setCacheFn func()
|
||||
checkCacheFn func() error
|
||||
}{
|
||||
{
|
||||
testName: "Add",
|
||||
setCacheFn: func() {
|
||||
cS := sc.getOrCreate("addTest")
|
||||
cS.state = defaultExternalService()
|
||||
},
|
||||
checkCacheFn: func() error {
|
||||
//There must be exactly one element
|
||||
if len(sc.serviceMap) != 1 {
|
||||
return fmt.Errorf("Expected=1 Obtained=%d", len(sc.serviceMap))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "Del",
|
||||
setCacheFn: func() {
|
||||
sc.delete("addTest")
|
||||
|
||||
},
|
||||
checkCacheFn: func() error {
|
||||
//Now it should have no element
|
||||
if len(sc.serviceMap) != 0 {
|
||||
return fmt.Errorf("Expected=0 Obtained=%d", len(sc.serviceMap))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "Set and Get",
|
||||
setCacheFn: func() {
|
||||
sc.set("addTest", &cachedService{state: defaultExternalService()})
|
||||
},
|
||||
checkCacheFn: func() error {
|
||||
//Now it should have one element
|
||||
Cs, bool := sc.get("addTest")
|
||||
if !bool {
|
||||
return fmt.Errorf("is Available Expected=true Obtained=%v", bool)
|
||||
}
|
||||
if Cs == nil {
|
||||
return fmt.Errorf("CachedService expected:non-nil Obtained=nil")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "ListKeys",
|
||||
setCacheFn: func() {
|
||||
//Add one more entry here
|
||||
sc.set("addTest1", &cachedService{state: defaultExternalService()})
|
||||
},
|
||||
checkCacheFn: func() error {
|
||||
//It should have two elements
|
||||
keys := sc.ListKeys()
|
||||
if len(keys) != 2 {
|
||||
return fmt.Errorf("Elementes Expected=2 Obtained=%v", len(keys))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "GetbyKeys",
|
||||
setCacheFn: nil, //Nothing to set
|
||||
checkCacheFn: func() error {
|
||||
//It should have two elements
|
||||
svc, isKey, err := sc.GetByKey("addTest")
|
||||
if svc == nil || isKey == false || err != nil {
|
||||
return fmt.Errorf("Expected(non-nil, true, nil) Obtained(%v,%v,%v)", svc, isKey, err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "allServices",
|
||||
setCacheFn: nil, //Nothing to set
|
||||
checkCacheFn: func() error {
|
||||
//It should return two elements
|
||||
svcArray := sc.allServices()
|
||||
if len(svcArray) != 2 {
|
||||
return fmt.Errorf("Expected(2) Obtained(%v)", len(svcArray))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if tc.setCacheFn != nil {
|
||||
tc.setCacheFn()
|
||||
}
|
||||
if err := tc.checkCacheFn(); err != nil {
|
||||
t.Errorf("%v returned %v", tc.testName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//Test a utility functions as its not easy to unit test nodeSyncLoop directly
|
||||
func TestNodeSlicesEqualForLB(t *testing.T) {
|
||||
numNodes := 10
|
||||
nArray := make([]*v1.Node, 10)
|
||||
|
||||
for i := 0; i < numNodes; i++ {
|
||||
nArray[i] = &v1.Node{}
|
||||
nArray[i].Name = fmt.Sprintf("node1")
|
||||
}
|
||||
if !nodeSlicesEqualForLB(nArray, nArray) {
|
||||
t.Errorf("nodeSlicesEqualForLB() Expected=true Obtained=false")
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user