Vendor cleanup

Signed-off-by: Madhu Rajanna <mrajanna@redhat.com>
This commit is contained in:
Madhu Rajanna
2019-01-16 18:11:54 +05:30
parent 661818bd79
commit 0f836c62fa
16816 changed files with 20 additions and 4611100 deletions

View File

@ -1,7 +0,0 @@
approvers:
- sig-auth-authenticators-approvers
reviewers:
- sig-auth-authenticators-reviewers
labels:
- sig/auth

View File

@ -1,69 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth_test
import (
"io/ioutil"
"os"
"reflect"
"testing"
clientauth "k8s.io/client-go/tools/auth"
)
func TestLoadFromFile(t *testing.T) {
loadAuthInfoTests := []struct {
authData string
authInfo *clientauth.Info
expectErr bool
}{
{
`{"user": "user", "password": "pass"}`,
&clientauth.Info{User: "user", Password: "pass"},
false,
},
{
"", nil, true,
},
}
for _, loadAuthInfoTest := range loadAuthInfoTests {
tt := loadAuthInfoTest
aifile, err := ioutil.TempFile("", "testAuthInfo")
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if tt.authData != "missing" {
defer os.Remove(aifile.Name())
defer aifile.Close()
_, err = aifile.WriteString(tt.authData)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
} else {
aifile.Close()
os.Remove(aifile.Name())
}
authInfo, err := clientauth.LoadFromFile(aifile.Name())
gotErr := err != nil
if gotErr != tt.expectErr {
t.Errorf("expected errorness: %v, actual errorness: %v", tt.expectErr, gotErr)
}
if !reflect.DeepEqual(authInfo, tt.authInfo) {
t.Errorf("Expected %v, got %v", tt.authInfo, authInfo)
}
}
}

View File

@ -1,50 +0,0 @@
approvers:
- thockin
- lavalamp
- smarterclayton
- wojtek-t
- deads2k
- caesarxuchao
- liggitt
- ncdc
reviewers:
- thockin
- lavalamp
- smarterclayton
- wojtek-t
- deads2k
- brendandburns
- derekwaynecarr
- caesarxuchao
- mikedanese
- liggitt
- nikhiljindal
- erictune
- davidopp
- pmorie
- kargakis
- janetkuo
- justinsb
- eparis
- soltysh
- jsafrane
- dims
- madhusudancs
- hongchaodeng
- krousey
- markturansky
- fgrzadkowski
- xiang90
- mml
- ingvagabund
- resouer
- jessfraz
- david-mcmahon
- mfojtik
- '249043822'
- lixiaobing10051267
- ddysher
- mqliang
- feihujiang
- sdminonne
- ncdc

View File

@ -1,394 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"sync"
"time"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
)
// Config contains all the settings for a Controller.
type Config struct {
// The queue for your objects; either a FIFO or
// a DeltaFIFO. Your Process() function should accept
// the output of this Queue's Pop() method.
Queue
// Something that can list and watch your objects.
ListerWatcher
// Something that can process your objects.
Process ProcessFunc
// The type of your objects.
ObjectType runtime.Object
// Reprocess everything at least this often.
// Note that if it takes longer for you to clear the queue than this
// period, you will end up processing items in the order determined
// by FIFO.Replace(). Currently, this is random. If this is a
// problem, we can change that replacement policy to append new
// things to the end of the queue instead of replacing the entire
// queue.
FullResyncPeriod time.Duration
// ShouldResync, if specified, is invoked when the controller's reflector determines the next
// periodic sync should occur. If this returns true, it means the reflector should proceed with
// the resync.
ShouldResync ShouldResyncFunc
// If true, when Process() returns an error, re-enqueue the object.
// TODO: add interface to let you inject a delay/backoff or drop
// the object completely if desired. Pass the object in
// question to this interface as a parameter.
RetryOnError bool
}
// ShouldResyncFunc is a type of function that indicates if a reflector should perform a
// resync or not. It can be used by a shared informer to support multiple event handlers with custom
// resync periods.
type ShouldResyncFunc func() bool
// ProcessFunc processes a single object.
type ProcessFunc func(obj interface{}) error
// Controller is a generic controller framework.
type controller struct {
config Config
reflector *Reflector
reflectorMutex sync.RWMutex
clock clock.Clock
}
type Controller interface {
Run(stopCh <-chan struct{})
HasSynced() bool
LastSyncResourceVersion() string
}
// New makes a new Controller from the given Config.
func New(c *Config) Controller {
ctlr := &controller{
config: *c,
clock: &clock.RealClock{},
}
return ctlr
}
// Run begins processing items, and will continue until a value is sent down stopCh.
// It's an error to call Run more than once.
// Run blocks; call via go.
func (c *controller) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
go func() {
<-stopCh
c.config.Queue.Close()
}()
r := NewReflector(
c.config.ListerWatcher,
c.config.ObjectType,
c.config.Queue,
c.config.FullResyncPeriod,
)
r.ShouldResync = c.config.ShouldResync
r.clock = c.clock
c.reflectorMutex.Lock()
c.reflector = r
c.reflectorMutex.Unlock()
var wg wait.Group
defer wg.Wait()
wg.StartWithChannel(stopCh, r.Run)
wait.Until(c.processLoop, time.Second, stopCh)
}
// Returns true once this controller has completed an initial resource listing
func (c *controller) HasSynced() bool {
return c.config.Queue.HasSynced()
}
func (c *controller) LastSyncResourceVersion() string {
if c.reflector == nil {
return ""
}
return c.reflector.LastSyncResourceVersion()
}
// processLoop drains the work queue.
// TODO: Consider doing the processing in parallel. This will require a little thought
// to make sure that we don't end up processing the same object multiple times
// concurrently.
//
// TODO: Plumb through the stopCh here (and down to the queue) so that this can
// actually exit when the controller is stopped. Or just give up on this stuff
// ever being stoppable. Converting this whole package to use Context would
// also be helpful.
func (c *controller) processLoop() {
for {
obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process))
if err != nil {
if err == FIFOClosedError {
return
}
if c.config.RetryOnError {
// This is the safe way to re-enqueue.
c.config.Queue.AddIfNotPresent(obj)
}
}
}
}
// ResourceEventHandler can handle notifications for events that happen to a
// resource. The events are informational only, so you can't return an
// error.
// * OnAdd is called when an object is added.
// * OnUpdate is called when an object is modified. Note that oldObj is the
// last known state of the object-- it is possible that several changes
// were combined together, so you can't use this to see every single
// change. OnUpdate is also called when a re-list happens, and it will
// get called even if nothing changed. This is useful for periodically
// evaluating or syncing something.
// * OnDelete will get the final state of the item if it is known, otherwise
// it will get an object of type DeletedFinalStateUnknown. This can
// happen if the watch is closed and misses the delete event and we don't
// notice the deletion until the subsequent re-list.
type ResourceEventHandler interface {
OnAdd(obj interface{})
OnUpdate(oldObj, newObj interface{})
OnDelete(obj interface{})
}
// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or
// as few of the notification functions as you want while still implementing
// ResourceEventHandler.
type ResourceEventHandlerFuncs struct {
AddFunc func(obj interface{})
UpdateFunc func(oldObj, newObj interface{})
DeleteFunc func(obj interface{})
}
// OnAdd calls AddFunc if it's not nil.
func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) {
if r.AddFunc != nil {
r.AddFunc(obj)
}
}
// OnUpdate calls UpdateFunc if it's not nil.
func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) {
if r.UpdateFunc != nil {
r.UpdateFunc(oldObj, newObj)
}
}
// OnDelete calls DeleteFunc if it's not nil.
func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
if r.DeleteFunc != nil {
r.DeleteFunc(obj)
}
}
// FilteringResourceEventHandler applies the provided filter to all events coming
// in, ensuring the appropriate nested handler method is invoked. An object
// that starts passing the filter after an update is considered an add, and an
// object that stops passing the filter after an update is considered a delete.
type FilteringResourceEventHandler struct {
FilterFunc func(obj interface{}) bool
Handler ResourceEventHandler
}
// OnAdd calls the nested handler only if the filter succeeds
func (r FilteringResourceEventHandler) OnAdd(obj interface{}) {
if !r.FilterFunc(obj) {
return
}
r.Handler.OnAdd(obj)
}
// OnUpdate ensures the proper handler is called depending on whether the filter matches
func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) {
newer := r.FilterFunc(newObj)
older := r.FilterFunc(oldObj)
switch {
case newer && older:
r.Handler.OnUpdate(oldObj, newObj)
case newer && !older:
r.Handler.OnAdd(newObj)
case !newer && older:
r.Handler.OnDelete(oldObj)
default:
// do nothing
}
}
// OnDelete calls the nested handler only if the filter succeeds
func (r FilteringResourceEventHandler) OnDelete(obj interface{}) {
if !r.FilterFunc(obj) {
return
}
r.Handler.OnDelete(obj)
}
// DeletionHandlingMetaNamespaceKeyFunc checks for
// DeletedFinalStateUnknown objects before calling
// MetaNamespaceKeyFunc.
func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
if d, ok := obj.(DeletedFinalStateUnknown); ok {
return d.Key, nil
}
return MetaNamespaceKeyFunc(obj)
}
// NewInformer returns a Store and a controller for populating the store
// while also providing event notifications. You should only used the returned
// Store for Get/List operations; Add/Modify/Deletes will cause the event
// notifications to be faulty.
//
// Parameters:
// * lw is list and watch functions for the source of the resource you want to
// be informed of.
// * objType is an object of the type that you expect to receive.
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
// calls, even if nothing changed). Otherwise, re-list will be delayed as
// long as possible (until the upstream source closes the watch or times out,
// or you stop the controller).
// * h is the object you want notifications sent to.
//
func NewInformer(
lw ListerWatcher,
objType runtime.Object,
resyncPeriod time.Duration,
h ResourceEventHandler,
) (Store, Controller) {
// This will hold the client state, as we know it.
clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
// This will hold incoming changes. Note how we pass clientState in as a
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState)
cfg := &Config{
Queue: fifo,
ListerWatcher: lw,
ObjectType: objType,
FullResyncPeriod: resyncPeriod,
RetryOnError: false,
Process: func(obj interface{}) error {
// from oldest to newest
for _, d := range obj.(Deltas) {
switch d.Type {
case Sync, Added, Updated:
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
if err := clientState.Update(d.Object); err != nil {
return err
}
h.OnUpdate(old, d.Object)
} else {
if err := clientState.Add(d.Object); err != nil {
return err
}
h.OnAdd(d.Object)
}
case Deleted:
if err := clientState.Delete(d.Object); err != nil {
return err
}
h.OnDelete(d.Object)
}
}
return nil
},
}
return clientState, New(cfg)
}
// NewIndexerInformer returns a Indexer and a controller for populating the index
// while also providing event notifications. You should only used the returned
// Index for Get/List operations; Add/Modify/Deletes will cause the event
// notifications to be faulty.
//
// Parameters:
// * lw is list and watch functions for the source of the resource you want to
// be informed of.
// * objType is an object of the type that you expect to receive.
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
// calls, even if nothing changed). Otherwise, re-list will be delayed as
// long as possible (until the upstream source closes the watch or times out,
// or you stop the controller).
// * h is the object you want notifications sent to.
// * indexers is the indexer for the received object type.
//
func NewIndexerInformer(
lw ListerWatcher,
objType runtime.Object,
resyncPeriod time.Duration,
h ResourceEventHandler,
indexers Indexers,
) (Indexer, Controller) {
// This will hold the client state, as we know it.
clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
// This will hold incoming changes. Note how we pass clientState in as a
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState)
cfg := &Config{
Queue: fifo,
ListerWatcher: lw,
ObjectType: objType,
FullResyncPeriod: resyncPeriod,
RetryOnError: false,
Process: func(obj interface{}) error {
// from oldest to newest
for _, d := range obj.(Deltas) {
switch d.Type {
case Sync, Added, Updated:
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
if err := clientState.Update(d.Object); err != nil {
return err
}
h.OnUpdate(old, d.Object)
} else {
if err := clientState.Add(d.Object); err != nil {
return err
}
h.OnAdd(d.Object)
}
case Deleted:
if err := clientState.Delete(d.Object); err != nil {
return err
}
h.OnDelete(d.Object)
}
}
return nil
},
}
return clientState, New(cfg)
}

View File

@ -1,405 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"math/rand"
"sync"
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
fcache "k8s.io/client-go/tools/cache/testing"
"github.com/google/gofuzz"
)
func Example() {
// source simulates an apiserver object endpoint.
source := fcache.NewFakeControllerSource()
// This will hold the downstream state, as we know it.
downstream := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
// This will hold incoming changes. Note how we pass downstream in as a
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, downstream)
// Let's do threadsafe output to get predictable test results.
deletionCounter := make(chan string, 1000)
cfg := &Config{
Queue: fifo,
ListerWatcher: source,
ObjectType: &v1.Pod{},
FullResyncPeriod: time.Millisecond * 100,
RetryOnError: false,
// Let's implement a simple controller that just deletes
// everything that comes in.
Process: func(obj interface{}) error {
// Obj is from the Pop method of the Queue we make above.
newest := obj.(Deltas).Newest()
if newest.Type != Deleted {
// Update our downstream store.
err := downstream.Add(newest.Object)
if err != nil {
return err
}
// Delete this object.
source.Delete(newest.Object.(runtime.Object))
} else {
// Update our downstream store.
err := downstream.Delete(newest.Object)
if err != nil {
return err
}
// fifo's KeyOf is easiest, because it handles
// DeletedFinalStateUnknown markers.
key, err := fifo.KeyOf(newest.Object)
if err != nil {
return err
}
// Report this deletion.
deletionCounter <- key
}
return nil
},
}
// Create the controller and run it until we close stop.
stop := make(chan struct{})
defer close(stop)
go New(cfg).Run(stop)
// Let's add a few objects to the source.
testIDs := []string{"a-hello", "b-controller", "c-framework"}
for _, name := range testIDs {
// Note that these pods are not valid-- the fake source doesn't
// call validation or anything.
source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: name}})
}
// Let's wait for the controller to process the things we just added.
outputSet := sets.String{}
for i := 0; i < len(testIDs); i++ {
outputSet.Insert(<-deletionCounter)
}
for _, key := range outputSet.List() {
fmt.Println(key)
}
// Output:
// a-hello
// b-controller
// c-framework
}
func ExampleNewInformer() {
// source simulates an apiserver object endpoint.
source := fcache.NewFakeControllerSource()
// Let's do threadsafe output to get predictable test results.
deletionCounter := make(chan string, 1000)
// Make a controller that immediately deletes anything added to it, and
// logs anything deleted.
_, controller := NewInformer(
source,
&v1.Pod{},
time.Millisecond*100,
ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
source.Delete(obj.(runtime.Object))
},
DeleteFunc: func(obj interface{}) {
key, err := DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
key = "oops something went wrong with the key"
}
// Report this deletion.
deletionCounter <- key
},
},
)
// Run the controller and run it until we close stop.
stop := make(chan struct{})
defer close(stop)
go controller.Run(stop)
// Let's add a few objects to the source.
testIDs := []string{"a-hello", "b-controller", "c-framework"}
for _, name := range testIDs {
// Note that these pods are not valid-- the fake source doesn't
// call validation or anything.
source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: name}})
}
// Let's wait for the controller to process the things we just added.
outputSet := sets.String{}
for i := 0; i < len(testIDs); i++ {
outputSet.Insert(<-deletionCounter)
}
for _, key := range outputSet.List() {
fmt.Println(key)
}
// Output:
// a-hello
// b-controller
// c-framework
}
func TestHammerController(t *testing.T) {
// This test executes a bunch of requests through the fake source and
// controller framework to make sure there's no locking/threading
// errors. If an error happens, it should hang forever or trigger the
// race detector.
// source simulates an apiserver object endpoint.
source := fcache.NewFakeControllerSource()
// Let's do threadsafe output to get predictable test results.
outputSetLock := sync.Mutex{}
// map of key to operations done on the key
outputSet := map[string][]string{}
recordFunc := func(eventType string, obj interface{}) {
key, err := DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
t.Errorf("something wrong with key: %v", err)
key = "oops something went wrong with the key"
}
// Record some output when items are deleted.
outputSetLock.Lock()
defer outputSetLock.Unlock()
outputSet[key] = append(outputSet[key], eventType)
}
// Make a controller which just logs all the changes it gets.
_, controller := NewInformer(
source,
&v1.Pod{},
time.Millisecond*100,
ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { recordFunc("add", obj) },
UpdateFunc: func(oldObj, newObj interface{}) { recordFunc("update", newObj) },
DeleteFunc: func(obj interface{}) { recordFunc("delete", obj) },
},
)
if controller.HasSynced() {
t.Errorf("Expected HasSynced() to return false before we started the controller")
}
// Run the controller and run it until we close stop.
stop := make(chan struct{})
go controller.Run(stop)
// Let's wait for the controller to do its initial sync
wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
return controller.HasSynced(), nil
})
if !controller.HasSynced() {
t.Errorf("Expected HasSynced() to return true after the initial sync")
}
wg := sync.WaitGroup{}
const threads = 3
wg.Add(threads)
for i := 0; i < threads; i++ {
go func() {
defer wg.Done()
// Let's add a few objects to the source.
currentNames := sets.String{}
rs := rand.NewSource(rand.Int63())
f := fuzz.New().NilChance(.5).NumElements(0, 2).RandSource(rs)
r := rand.New(rs) // Mustn't use r and f concurrently!
for i := 0; i < 100; i++ {
var name string
var isNew bool
if currentNames.Len() == 0 || r.Intn(3) == 1 {
f.Fuzz(&name)
isNew = true
} else {
l := currentNames.List()
name = l[r.Intn(len(l))]
}
pod := &v1.Pod{}
f.Fuzz(pod)
pod.ObjectMeta.Name = name
pod.ObjectMeta.Namespace = "default"
// Add, update, or delete randomly.
// Note that these pods are not valid-- the fake source doesn't
// call validation or perform any other checking.
if isNew {
currentNames.Insert(name)
source.Add(pod)
continue
}
switch r.Intn(2) {
case 0:
currentNames.Insert(name)
source.Modify(pod)
case 1:
currentNames.Delete(name)
source.Delete(pod)
}
}
}()
}
wg.Wait()
// Let's wait for the controller to finish processing the things we just added.
// TODO: look in the queue to see how many items need to be processed.
time.Sleep(100 * time.Millisecond)
close(stop)
// TODO: Verify that no goroutines were leaked here and that everything shut
// down cleanly.
outputSetLock.Lock()
t.Logf("got: %#v", outputSet)
}
func TestUpdate(t *testing.T) {
// This test is going to exercise the various paths that result in a
// call to update.
// source simulates an apiserver object endpoint.
source := fcache.NewFakeControllerSource()
const (
FROM = "from"
TO = "to"
)
// These are the transitions we expect to see; because this is
// asynchronous, there are a lot of valid possibilities.
type pair struct{ from, to string }
allowedTransitions := map[pair]bool{
{FROM, TO}: true,
// Because a resync can happen when we've already observed one
// of the above but before the item is deleted.
{TO, TO}: true,
// Because a resync could happen before we observe an update.
{FROM, FROM}: true,
}
pod := func(name, check string, final bool) *v1.Pod {
p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{"check": check},
},
}
if final {
p.Labels["final"] = "true"
}
return p
}
deletePod := func(p *v1.Pod) bool {
return p.Labels["final"] == "true"
}
tests := []func(string){
func(name string) {
name = "a-" + name
source.Add(pod(name, FROM, false))
source.Modify(pod(name, TO, true))
},
}
const threads = 3
var testDoneWG sync.WaitGroup
testDoneWG.Add(threads * len(tests))
// Make a controller that deletes things once it observes an update.
// It calls Done() on the wait group on deletions so we can tell when
// everything we've added has been deleted.
watchCh := make(chan struct{})
_, controller := NewInformer(
&testLW{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
watch, err := source.Watch(options)
close(watchCh)
return watch, err
},
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return source.List(options)
},
},
&v1.Pod{},
0,
ResourceEventHandlerFuncs{
UpdateFunc: func(oldObj, newObj interface{}) {
o, n := oldObj.(*v1.Pod), newObj.(*v1.Pod)
from, to := o.Labels["check"], n.Labels["check"]
if !allowedTransitions[pair{from, to}] {
t.Errorf("observed transition %q -> %q for %v", from, to, n.Name)
}
if deletePod(n) {
source.Delete(n)
}
},
DeleteFunc: func(obj interface{}) {
testDoneWG.Done()
},
},
)
// Run the controller and run it until we close stop.
// Once Run() is called, calls to testDoneWG.Done() might start, so
// all testDoneWG.Add() calls must happen before this point
stop := make(chan struct{})
go controller.Run(stop)
<-watchCh
// run every test a few times, in parallel
var wg sync.WaitGroup
wg.Add(threads * len(tests))
for i := 0; i < threads; i++ {
for j, f := range tests {
go func(name string, f func(string)) {
defer wg.Done()
f(name)
}(fmt.Sprintf("%v-%v", i, j), f)
}
}
wg.Wait()
// Let's wait for the controller to process the things we just added.
testDoneWG.Wait()
close(stop)
}

View File

@ -1,651 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"errors"
"fmt"
"sync"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog"
)
// NewDeltaFIFO returns a Store which can be used process changes to items.
//
// keyFunc is used to figure out what key an object should have. (It's
// exposed in the returned DeltaFIFO's KeyOf() method, with bonus features.)
//
// 'keyLister' is expected to return a list of keys that the consumer of
// this queue "knows about". It is used to decide which items are missing
// when Replace() is called; 'Deleted' deltas are produced for these items.
// It may be nil if you don't need to detect all deletions.
// TODO: consider merging keyLister with this object, tracking a list of
// "known" keys when Pop() is called. Have to think about how that
// affects error retrying.
// NOTE: It is possible to misuse this and cause a race when using an
// external known object source.
// Whether there is a potential race depends on how the comsumer
// modifies knownObjects. In Pop(), process function is called under
// lock, so it is safe to update data structures in it that need to be
// in sync with the queue (e.g. knownObjects).
//
// Example:
// In case of sharedIndexInformer being a consumer
// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/
// src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
// there is no race as knownObjects (s.indexer) is modified safely
// under DeltaFIFO's lock. The only exceptions are GetStore() and
// GetIndexer() methods, which expose ways to modify the underlying
// storage. Currently these two methods are used for creating Lister
// and internal tests.
//
// Also see the comment on DeltaFIFO.
func NewDeltaFIFO(keyFunc KeyFunc, knownObjects KeyListerGetter) *DeltaFIFO {
f := &DeltaFIFO{
items: map[string]Deltas{},
queue: []string{},
keyFunc: keyFunc,
knownObjects: knownObjects,
}
f.cond.L = &f.lock
return f
}
// DeltaFIFO is like FIFO, but allows you to process deletes.
//
// DeltaFIFO is a producer-consumer queue, where a Reflector is
// intended to be the producer, and the consumer is whatever calls
// the Pop() method.
//
// DeltaFIFO solves this use case:
// * You want to process every object change (delta) at most once.
// * When you process an object, you want to see everything
// that's happened to it since you last processed it.
// * You want to process the deletion of objects.
// * You might want to periodically reprocess objects.
//
// DeltaFIFO's Pop(), Get(), and GetByKey() methods return
// interface{} to satisfy the Store/Queue interfaces, but it
// will always return an object of type Deltas.
//
// A note on threading: If you call Pop() in parallel from multiple
// threads, you could end up with multiple threads processing slightly
// different versions of the same object.
//
// A note on the KeyLister used by the DeltaFIFO: It's main purpose is
// to list keys that are "known", for the purpose of figuring out which
// items have been deleted when Replace() or Delete() are called. The deleted
// object will be included in the DeleteFinalStateUnknown markers. These objects
// could be stale.
type DeltaFIFO struct {
// lock/cond protects access to 'items' and 'queue'.
lock sync.RWMutex
cond sync.Cond
// We depend on the property that items in the set are in
// the queue and vice versa, and that all Deltas in this
// map have at least one Delta.
items map[string]Deltas
queue []string
// populated is true if the first batch of items inserted by Replace() has been populated
// or Delete/Add/Update was called first.
populated bool
// initialPopulationCount is the number of items inserted by the first call of Replace()
initialPopulationCount int
// keyFunc is used to make the key used for queued item
// insertion and retrieval, and should be deterministic.
keyFunc KeyFunc
// knownObjects list keys that are "known", for the
// purpose of figuring out which items have been deleted
// when Replace() or Delete() is called.
knownObjects KeyListerGetter
// Indication the queue is closed.
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
// Currently, not used to gate any of CRED operations.
closed bool
closedLock sync.Mutex
}
var (
_ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue
)
var (
// ErrZeroLengthDeltasObject is returned in a KeyError if a Deltas
// object with zero length is encountered (should be impossible,
// but included for completeness).
ErrZeroLengthDeltasObject = errors.New("0 length Deltas object; can't get key")
)
// Close the queue.
func (f *DeltaFIFO) Close() {
f.closedLock.Lock()
defer f.closedLock.Unlock()
f.closed = true
f.cond.Broadcast()
}
// KeyOf exposes f's keyFunc, but also detects the key of a Deltas object or
// DeletedFinalStateUnknown objects.
func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) {
if d, ok := obj.(Deltas); ok {
if len(d) == 0 {
return "", KeyError{obj, ErrZeroLengthDeltasObject}
}
obj = d.Newest().Object
}
if d, ok := obj.(DeletedFinalStateUnknown); ok {
return d.Key, nil
}
return f.keyFunc(obj)
}
// Return true if an Add/Update/Delete/AddIfNotPresent are called first,
// or an Update called first but the first batch of items inserted by Replace() has been popped
func (f *DeltaFIFO) HasSynced() bool {
f.lock.Lock()
defer f.lock.Unlock()
return f.populated && f.initialPopulationCount == 0
}
// Add inserts an item, and puts it in the queue. The item is only enqueued
// if it doesn't already exist in the set.
func (f *DeltaFIFO) Add(obj interface{}) error {
f.lock.Lock()
defer f.lock.Unlock()
f.populated = true
return f.queueActionLocked(Added, obj)
}
// Update is just like Add, but makes an Updated Delta.
func (f *DeltaFIFO) Update(obj interface{}) error {
f.lock.Lock()
defer f.lock.Unlock()
f.populated = true
return f.queueActionLocked(Updated, obj)
}
// Delete is just like Add, but makes an Deleted Delta. If the item does not
// already exist, it will be ignored. (It may have already been deleted by a
// Replace (re-list), for example.
func (f *DeltaFIFO) Delete(obj interface{}) error {
id, err := f.KeyOf(obj)
if err != nil {
return KeyError{obj, err}
}
f.lock.Lock()
defer f.lock.Unlock()
f.populated = true
if f.knownObjects == nil {
if _, exists := f.items[id]; !exists {
// Presumably, this was deleted when a relist happened.
// Don't provide a second report of the same deletion.
return nil
}
} else {
// We only want to skip the "deletion" action if the object doesn't
// exist in knownObjects and it doesn't have corresponding item in items.
// Note that even if there is a "deletion" action in items, we can ignore it,
// because it will be deduped automatically in "queueActionLocked"
_, exists, err := f.knownObjects.GetByKey(id)
_, itemsExist := f.items[id]
if err == nil && !exists && !itemsExist {
// Presumably, this was deleted when a relist happened.
// Don't provide a second report of the same deletion.
return nil
}
}
return f.queueActionLocked(Deleted, obj)
}
// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already
// present in the set, it is neither enqueued nor added to the set.
//
// This is useful in a single producer/consumer scenario so that the consumer can
// safely retry items without contending with the producer and potentially enqueueing
// stale items.
//
// Important: obj must be a Deltas (the output of the Pop() function). Yes, this is
// different from the Add/Update/Delete functions.
func (f *DeltaFIFO) AddIfNotPresent(obj interface{}) error {
deltas, ok := obj.(Deltas)
if !ok {
return fmt.Errorf("object must be of type deltas, but got: %#v", obj)
}
id, err := f.KeyOf(deltas.Newest().Object)
if err != nil {
return KeyError{obj, err}
}
f.lock.Lock()
defer f.lock.Unlock()
f.addIfNotPresent(id, deltas)
return nil
}
// addIfNotPresent inserts deltas under id if it does not exist, and assumes the caller
// already holds the fifo lock.
func (f *DeltaFIFO) addIfNotPresent(id string, deltas Deltas) {
f.populated = true
if _, exists := f.items[id]; exists {
return
}
f.queue = append(f.queue, id)
f.items[id] = deltas
f.cond.Broadcast()
}
// re-listing and watching can deliver the same update multiple times in any
// order. This will combine the most recent two deltas if they are the same.
func dedupDeltas(deltas Deltas) Deltas {
n := len(deltas)
if n < 2 {
return deltas
}
a := &deltas[n-1]
b := &deltas[n-2]
if out := isDup(a, b); out != nil {
d := append(Deltas{}, deltas[:n-2]...)
return append(d, *out)
}
return deltas
}
// If a & b represent the same event, returns the delta that ought to be kept.
// Otherwise, returns nil.
// TODO: is there anything other than deletions that need deduping?
func isDup(a, b *Delta) *Delta {
if out := isDeletionDup(a, b); out != nil {
return out
}
// TODO: Detect other duplicate situations? Are there any?
return nil
}
// keep the one with the most information if both are deletions.
func isDeletionDup(a, b *Delta) *Delta {
if b.Type != Deleted || a.Type != Deleted {
return nil
}
// Do more sophisticated checks, or is this sufficient?
if _, ok := b.Object.(DeletedFinalStateUnknown); ok {
return a
}
return b
}
// willObjectBeDeletedLocked returns true only if the last delta for the
// given object is Delete. Caller must lock first.
func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool {
deltas := f.items[id]
return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted
}
// queueActionLocked appends to the delta list for the object.
// Caller must lock first.
func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error {
id, err := f.KeyOf(obj)
if err != nil {
return KeyError{obj, err}
}
// If object is supposed to be deleted (last event is Deleted),
// then we should ignore Sync events, because it would result in
// recreation of this object.
if actionType == Sync && f.willObjectBeDeletedLocked(id) {
return nil
}
newDeltas := append(f.items[id], Delta{actionType, obj})
newDeltas = dedupDeltas(newDeltas)
if len(newDeltas) > 0 {
if _, exists := f.items[id]; !exists {
f.queue = append(f.queue, id)
}
f.items[id] = newDeltas
f.cond.Broadcast()
} else {
// We need to remove this from our map (extra items in the queue are
// ignored if they are not in the map).
delete(f.items, id)
}
return nil
}
// List returns a list of all the items; it returns the object
// from the most recent Delta.
// You should treat the items returned inside the deltas as immutable.
func (f *DeltaFIFO) List() []interface{} {
f.lock.RLock()
defer f.lock.RUnlock()
return f.listLocked()
}
func (f *DeltaFIFO) listLocked() []interface{} {
list := make([]interface{}, 0, len(f.items))
for _, item := range f.items {
list = append(list, item.Newest().Object)
}
return list
}
// ListKeys returns a list of all the keys of the objects currently
// in the FIFO.
func (f *DeltaFIFO) ListKeys() []string {
f.lock.RLock()
defer f.lock.RUnlock()
list := make([]string, 0, len(f.items))
for key := range f.items {
list = append(list, key)
}
return list
}
// Get returns the complete list of deltas for the requested item,
// or sets exists=false.
// You should treat the items returned inside the deltas as immutable.
func (f *DeltaFIFO) Get(obj interface{}) (item interface{}, exists bool, err error) {
key, err := f.KeyOf(obj)
if err != nil {
return nil, false, KeyError{obj, err}
}
return f.GetByKey(key)
}
// GetByKey returns the complete list of deltas for the requested item,
// setting exists=false if that list is empty.
// You should treat the items returned inside the deltas as immutable.
func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
f.lock.RLock()
defer f.lock.RUnlock()
d, exists := f.items[key]
if exists {
// Copy item's slice so operations on this slice
// won't interfere with the object we return.
d = copyDeltas(d)
}
return d, exists, nil
}
// Checks if the queue is closed
func (f *DeltaFIFO) IsClosed() bool {
f.closedLock.Lock()
defer f.closedLock.Unlock()
return f.closed
}
// Pop blocks until an item is added to the queue, and then returns it. If
// multiple items are ready, they are returned in the order in which they were
// added/updated. The item is removed from the queue (and the store) before it
// is returned, so if you don't successfully process it, you need to add it back
// with AddIfNotPresent().
// process function is called under lock, so it is safe update data structures
// in it that need to be in sync with the queue (e.g. knownKeys). The PopProcessFunc
// may return an instance of ErrRequeue with a nested error to indicate the current
// item should be requeued (equivalent to calling AddIfNotPresent under the lock).
//
// Pop returns a 'Deltas', which has a complete list of all the things
// that happened to the object (deltas) while it was sitting in the queue.
func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
f.lock.Lock()
defer f.lock.Unlock()
for {
for len(f.queue) == 0 {
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
// When Close() is called, the f.closed is set and the condition is broadcasted.
// Which causes this loop to continue and return from the Pop().
if f.IsClosed() {
return nil, FIFOClosedError
}
f.cond.Wait()
}
id := f.queue[0]
f.queue = f.queue[1:]
if f.initialPopulationCount > 0 {
f.initialPopulationCount--
}
item, ok := f.items[id]
if !ok {
// Item may have been deleted subsequently.
continue
}
delete(f.items, id)
err := process(item)
if e, ok := err.(ErrRequeue); ok {
f.addIfNotPresent(id, item)
err = e.Err
}
// Don't need to copyDeltas here, because we're transferring
// ownership to the caller.
return item, err
}
}
// Replace will delete the contents of 'f', using instead the given map.
// 'f' takes ownership of the map, you should not reference the map again
// after calling this function. f's queue is reset, too; upon return, it
// will contain the items in the map, in no particular order.
func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
f.lock.Lock()
defer f.lock.Unlock()
keys := make(sets.String, len(list))
for _, item := range list {
key, err := f.KeyOf(item)
if err != nil {
return KeyError{item, err}
}
keys.Insert(key)
if err := f.queueActionLocked(Sync, item); err != nil {
return fmt.Errorf("couldn't enqueue object: %v", err)
}
}
if f.knownObjects == nil {
// Do deletion detection against our own list.
for k, oldItem := range f.items {
if keys.Has(k) {
continue
}
var deletedObj interface{}
if n := oldItem.Newest(); n != nil {
deletedObj = n.Object
}
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
return err
}
}
if !f.populated {
f.populated = true
f.initialPopulationCount = len(list)
}
return nil
}
// Detect deletions not already in the queue.
knownKeys := f.knownObjects.ListKeys()
queuedDeletions := 0
for _, k := range knownKeys {
if keys.Has(k) {
continue
}
deletedObj, exists, err := f.knownObjects.GetByKey(k)
if err != nil {
deletedObj = nil
klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
} else if !exists {
deletedObj = nil
klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
}
queuedDeletions++
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
return err
}
}
if !f.populated {
f.populated = true
f.initialPopulationCount = len(list) + queuedDeletions
}
return nil
}
// Resync will send a sync event for each item
func (f *DeltaFIFO) Resync() error {
f.lock.Lock()
defer f.lock.Unlock()
if f.knownObjects == nil {
return nil
}
keys := f.knownObjects.ListKeys()
for _, k := range keys {
if err := f.syncKeyLocked(k); err != nil {
return err
}
}
return nil
}
func (f *DeltaFIFO) syncKey(key string) error {
f.lock.Lock()
defer f.lock.Unlock()
return f.syncKeyLocked(key)
}
func (f *DeltaFIFO) syncKeyLocked(key string) error {
obj, exists, err := f.knownObjects.GetByKey(key)
if err != nil {
klog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key)
return nil
} else if !exists {
klog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key)
return nil
}
// If we are doing Resync() and there is already an event queued for that object,
// we ignore the Resync for it. This is to avoid the race, in which the resync
// comes with the previous value of object (since queueing an event for the object
// doesn't trigger changing the underlying store <knownObjects>.
id, err := f.KeyOf(obj)
if err != nil {
return KeyError{obj, err}
}
if len(f.items[id]) > 0 {
return nil
}
if err := f.queueActionLocked(Sync, obj); err != nil {
return fmt.Errorf("couldn't queue object: %v", err)
}
return nil
}
// A KeyListerGetter is anything that knows how to list its keys and look up by key.
type KeyListerGetter interface {
KeyLister
KeyGetter
}
// A KeyLister is anything that knows how to list its keys.
type KeyLister interface {
ListKeys() []string
}
// A KeyGetter is anything that knows how to get the value stored under a given key.
type KeyGetter interface {
GetByKey(key string) (interface{}, bool, error)
}
// DeltaType is the type of a change (addition, deletion, etc)
type DeltaType string
const (
Added DeltaType = "Added"
Updated DeltaType = "Updated"
Deleted DeltaType = "Deleted"
// The other types are obvious. You'll get Sync deltas when:
// * A watch expires/errors out and a new list/watch cycle is started.
// * You've turned on periodic syncs.
// (Anything that trigger's DeltaFIFO's Replace() method.)
Sync DeltaType = "Sync"
)
// Delta is the type stored by a DeltaFIFO. It tells you what change
// happened, and the object's state after* that change.
//
// [*] Unless the change is a deletion, and then you'll get the final
// state of the object before it was deleted.
type Delta struct {
Type DeltaType
Object interface{}
}
// Deltas is a list of one or more 'Delta's to an individual object.
// The oldest delta is at index 0, the newest delta is the last one.
type Deltas []Delta
// Oldest is a convenience function that returns the oldest delta, or
// nil if there are no deltas.
func (d Deltas) Oldest() *Delta {
if len(d) > 0 {
return &d[0]
}
return nil
}
// Newest is a convenience function that returns the newest delta, or
// nil if there are no deltas.
func (d Deltas) Newest() *Delta {
if n := len(d); n > 0 {
return &d[n-1]
}
return nil
}
// copyDeltas returns a shallow copy of d; that is, it copies the slice but not
// the objects in the slice. This allows Get/List to return an object that we
// know won't be clobbered by a subsequent modifications.
func copyDeltas(d Deltas) Deltas {
d2 := make(Deltas, len(d))
copy(d2, d)
return d2
}
// DeletedFinalStateUnknown is placed into a DeltaFIFO in the case where
// an object was deleted but the watch deletion event was missed. In this
// case we don't know the final "resting" state of the object, so there's
// a chance the included `Obj` is stale.
type DeletedFinalStateUnknown struct {
Key string
Obj interface{}
}

View File

@ -1,492 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"reflect"
"testing"
"time"
)
// helper function to reduce stuttering
func testPop(f *DeltaFIFO) testFifoObject {
return Pop(f).(Deltas).Newest().Object.(testFifoObject)
}
// keyLookupFunc adapts a raw function to be a KeyLookup.
type keyLookupFunc func() []testFifoObject
// ListKeys just calls kl.
func (kl keyLookupFunc) ListKeys() []string {
result := []string{}
for _, fifoObj := range kl() {
result = append(result, fifoObj.name)
}
return result
}
// GetByKey returns the key if it exists in the list returned by kl.
func (kl keyLookupFunc) GetByKey(key string) (interface{}, bool, error) {
for _, v := range kl() {
if v.name == key {
return v, true, nil
}
}
return nil, false, nil
}
func TestDeltaFIFO_basic(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
const amount = 500
go func() {
for i := 0; i < amount; i++ {
f.Add(mkFifoObj(string([]rune{'a', rune(i)}), i+1))
}
}()
go func() {
for u := uint64(0); u < amount; u++ {
f.Add(mkFifoObj(string([]rune{'b', rune(u)}), u+1))
}
}()
lastInt := int(0)
lastUint := uint64(0)
for i := 0; i < amount*2; i++ {
switch obj := testPop(f).val.(type) {
case int:
if obj <= lastInt {
t.Errorf("got %v (int) out of order, last was %v", obj, lastInt)
}
lastInt = obj
case uint64:
if obj <= lastUint {
t.Errorf("got %v (uint) out of order, last was %v", obj, lastUint)
} else {
lastUint = obj
}
default:
t.Fatalf("unexpected type %#v", obj)
}
}
}
func TestDeltaFIFO_requeueOnPop(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("foo", 10))
_, err := f.Pop(func(obj interface{}) error {
if obj.(Deltas)[0].Object.(testFifoObject).name != "foo" {
t.Fatalf("unexpected object: %#v", obj)
}
return ErrRequeue{Err: nil}
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if _, ok, err := f.GetByKey("foo"); !ok || err != nil {
t.Fatalf("object should have been requeued: %t %v", ok, err)
}
_, err = f.Pop(func(obj interface{}) error {
if obj.(Deltas)[0].Object.(testFifoObject).name != "foo" {
t.Fatalf("unexpected object: %#v", obj)
}
return ErrRequeue{Err: fmt.Errorf("test error")}
})
if err == nil || err.Error() != "test error" {
t.Fatalf("unexpected error: %v", err)
}
if _, ok, err := f.GetByKey("foo"); !ok || err != nil {
t.Fatalf("object should have been requeued: %t %v", ok, err)
}
_, err = f.Pop(func(obj interface{}) error {
if obj.(Deltas)[0].Object.(testFifoObject).name != "foo" {
t.Fatalf("unexpected object: %#v", obj)
}
return nil
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if _, ok, err := f.GetByKey("foo"); ok || err != nil {
t.Fatalf("object should have been removed: %t %v", ok, err)
}
}
func TestDeltaFIFO_addUpdate(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("foo", 10))
f.Update(mkFifoObj("foo", 12))
f.Delete(mkFifoObj("foo", 15))
if e, a := []interface{}{mkFifoObj("foo", 15)}, f.List(); !reflect.DeepEqual(e, a) {
t.Errorf("Expected %+v, got %+v", e, a)
}
if e, a := []string{"foo"}, f.ListKeys(); !reflect.DeepEqual(e, a) {
t.Errorf("Expected %+v, got %+v", e, a)
}
got := make(chan testFifoObject, 2)
go func() {
for {
obj := testPop(f)
t.Logf("got a thing %#v", obj)
t.Logf("D len: %v", len(f.queue))
got <- obj
}
}()
first := <-got
if e, a := 15, first.val; e != a {
t.Errorf("Didn't get updated value (%v), got %v", e, a)
}
select {
case unexpected := <-got:
t.Errorf("Got second value %v", unexpected.val)
case <-time.After(50 * time.Millisecond):
}
_, exists, _ := f.Get(mkFifoObj("foo", ""))
if exists {
t.Errorf("item did not get removed")
}
}
func TestDeltaFIFO_enqueueingNoLister(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("foo", 10))
f.Update(mkFifoObj("bar", 15))
f.Add(mkFifoObj("qux", 17))
f.Delete(mkFifoObj("qux", 18))
// This delete does not enqueue anything because baz doesn't exist.
f.Delete(mkFifoObj("baz", 20))
expectList := []int{10, 15, 18}
for _, expect := range expectList {
if e, a := expect, testPop(f).val; e != a {
t.Errorf("Didn't get updated value (%v), got %v", e, a)
}
}
if e, a := 0, len(f.items); e != a {
t.Errorf("queue unexpectedly not empty: %v != %v\n%#v", e, a, f.items)
}
}
func TestDeltaFIFO_enqueueingWithLister(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{mkFifoObj("foo", 5), mkFifoObj("bar", 6), mkFifoObj("baz", 7)}
}),
)
f.Add(mkFifoObj("foo", 10))
f.Update(mkFifoObj("bar", 15))
// This delete does enqueue the deletion, because "baz" is in the key lister.
f.Delete(mkFifoObj("baz", 20))
expectList := []int{10, 15, 20}
for _, expect := range expectList {
if e, a := expect, testPop(f).val; e != a {
t.Errorf("Didn't get updated value (%v), got %v", e, a)
}
}
if e, a := 0, len(f.items); e != a {
t.Errorf("queue unexpectedly not empty: %v != %v", e, a)
}
}
func TestDeltaFIFO_addReplace(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("foo", 10))
f.Replace([]interface{}{mkFifoObj("foo", 15)}, "0")
got := make(chan testFifoObject, 2)
go func() {
for {
got <- testPop(f)
}
}()
first := <-got
if e, a := 15, first.val; e != a {
t.Errorf("Didn't get updated value (%v), got %v", e, a)
}
select {
case unexpected := <-got:
t.Errorf("Got second value %v", unexpected.val)
case <-time.After(50 * time.Millisecond):
}
_, exists, _ := f.Get(mkFifoObj("foo", ""))
if exists {
t.Errorf("item did not get removed")
}
}
func TestDeltaFIFO_ResyncNonExisting(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{mkFifoObj("foo", 5)}
}),
)
f.Delete(mkFifoObj("foo", 10))
f.Resync()
deltas := f.items["foo"]
if len(deltas) != 1 {
t.Fatalf("unexpected deltas length: %v", deltas)
}
if deltas[0].Type != Deleted {
t.Errorf("unexpected delta: %v", deltas[0])
}
}
func TestDeltaFIFO_DeleteExistingNonPropagated(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{}
}),
)
f.Add(mkFifoObj("foo", 5))
f.Delete(mkFifoObj("foo", 6))
deltas := f.items["foo"]
if len(deltas) != 2 {
t.Fatalf("unexpected deltas length: %v", deltas)
}
if deltas[len(deltas)-1].Type != Deleted {
t.Errorf("unexpected delta: %v", deltas[len(deltas)-1])
}
}
func TestDeltaFIFO_ReplaceMakesDeletions(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{mkFifoObj("foo", 5), mkFifoObj("bar", 6), mkFifoObj("baz", 7)}
}),
)
f.Delete(mkFifoObj("baz", 10))
f.Replace([]interface{}{mkFifoObj("foo", 5)}, "0")
expectedList := []Deltas{
{{Deleted, mkFifoObj("baz", 10)}},
{{Sync, mkFifoObj("foo", 5)}},
// Since "bar" didn't have a delete event and wasn't in the Replace list
// it should get a tombstone key with the right Obj.
{{Deleted, DeletedFinalStateUnknown{Key: "bar", Obj: mkFifoObj("bar", 6)}}},
}
for _, expected := range expectedList {
cur := Pop(f).(Deltas)
if e, a := expected, cur; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %#v, got %#v", e, a)
}
}
}
func TestDeltaFIFO_UpdateResyncRace(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{mkFifoObj("foo", 5)}
}),
)
f.Update(mkFifoObj("foo", 6))
f.Resync()
expectedList := []Deltas{
{{Updated, mkFifoObj("foo", 6)}},
}
for _, expected := range expectedList {
cur := Pop(f).(Deltas)
if e, a := expected, cur; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %#v, got %#v", e, a)
}
}
}
func TestDeltaFIFO_HasSyncedCorrectOnDeletion(t *testing.T) {
f := NewDeltaFIFO(
testFifoObjectKeyFunc,
keyLookupFunc(func() []testFifoObject {
return []testFifoObject{mkFifoObj("foo", 5), mkFifoObj("bar", 6), mkFifoObj("baz", 7)}
}),
)
f.Replace([]interface{}{mkFifoObj("foo", 5)}, "0")
expectedList := []Deltas{
{{Sync, mkFifoObj("foo", 5)}},
// Since "bar" didn't have a delete event and wasn't in the Replace list
// it should get a tombstone key with the right Obj.
{{Deleted, DeletedFinalStateUnknown{Key: "bar", Obj: mkFifoObj("bar", 6)}}},
}
for _, expected := range expectedList {
if f.HasSynced() {
t.Errorf("Expected HasSynced to be false")
}
cur := Pop(f).(Deltas)
if e, a := expected, cur; !reflect.DeepEqual(e, a) {
t.Errorf("Expected %#v, got %#v", e, a)
}
}
if f.HasSynced() {
t.Errorf("Expected HasSynced to be true")
}
}
func TestDeltaFIFO_detectLineJumpers(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("foo", 10))
f.Add(mkFifoObj("bar", 1))
f.Add(mkFifoObj("foo", 11))
f.Add(mkFifoObj("foo", 13))
f.Add(mkFifoObj("zab", 30))
if e, a := 13, testPop(f).val; a != e {
t.Fatalf("expected %d, got %d", e, a)
}
f.Add(mkFifoObj("foo", 14)) // ensure foo doesn't jump back in line
if e, a := 1, testPop(f).val; a != e {
t.Fatalf("expected %d, got %d", e, a)
}
if e, a := 30, testPop(f).val; a != e {
t.Fatalf("expected %d, got %d", e, a)
}
if e, a := 14, testPop(f).val; a != e {
t.Fatalf("expected %d, got %d", e, a)
}
}
func TestDeltaFIFO_addIfNotPresent(t *testing.T) {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
f.Add(mkFifoObj("b", 3))
b3 := Pop(f)
f.Add(mkFifoObj("c", 4))
c4 := Pop(f)
if e, a := 0, len(f.items); e != a {
t.Fatalf("Expected %v, got %v items in queue", e, a)
}
f.Add(mkFifoObj("a", 1))
f.Add(mkFifoObj("b", 2))
f.AddIfNotPresent(b3)
f.AddIfNotPresent(c4)
if e, a := 3, len(f.items); a != e {
t.Fatalf("expected queue length %d, got %d", e, a)
}
expectedValues := []int{1, 2, 4}
for _, expected := range expectedValues {
if actual := testPop(f).val; actual != expected {
t.Fatalf("expected value %d, got %d", expected, actual)
}
}
}
func TestDeltaFIFO_KeyOf(t *testing.T) {
f := DeltaFIFO{keyFunc: testFifoObjectKeyFunc}
table := []struct {
obj interface{}
key string
}{
{obj: testFifoObject{name: "A"}, key: "A"},
{obj: DeletedFinalStateUnknown{Key: "B", Obj: nil}, key: "B"},
{obj: Deltas{{Object: testFifoObject{name: "C"}}}, key: "C"},
{obj: Deltas{{Object: DeletedFinalStateUnknown{Key: "D", Obj: nil}}}, key: "D"},
}
for _, item := range table {
got, err := f.KeyOf(item.obj)
if err != nil {
t.Errorf("Unexpected error for %q: %v", item.obj, err)
continue
}
if e, a := item.key, got; e != a {
t.Errorf("Expected %v, got %v", e, a)
}
}
}
func TestDeltaFIFO_HasSynced(t *testing.T) {
tests := []struct {
actions []func(f *DeltaFIFO)
expectedSynced bool
}{
{
actions: []func(f *DeltaFIFO){},
expectedSynced: false,
},
{
actions: []func(f *DeltaFIFO){
func(f *DeltaFIFO) { f.Add(mkFifoObj("a", 1)) },
},
expectedSynced: true,
},
{
actions: []func(f *DeltaFIFO){
func(f *DeltaFIFO) { f.Replace([]interface{}{}, "0") },
},
expectedSynced: true,
},
{
actions: []func(f *DeltaFIFO){
func(f *DeltaFIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") },
},
expectedSynced: false,
},
{
actions: []func(f *DeltaFIFO){
func(f *DeltaFIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") },
func(f *DeltaFIFO) { Pop(f) },
},
expectedSynced: false,
},
{
actions: []func(f *DeltaFIFO){
func(f *DeltaFIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") },
func(f *DeltaFIFO) { Pop(f) },
func(f *DeltaFIFO) { Pop(f) },
},
expectedSynced: true,
},
}
for i, test := range tests {
f := NewDeltaFIFO(testFifoObjectKeyFunc, nil)
for _, action := range test.actions {
action(f)
}
if e, a := test.expectedSynced, f.HasSynced(); a != e {
t.Errorf("test case %v failed, expected: %v , got %v", i, e, a)
}
}
}

View File

@ -1,24 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cache is a client-side caching mechanism. It is useful for
// reducing the number of server calls you'd otherwise need to make.
// Reflector watches a server and updates a Store. Two stores are provided;
// one that simply caches objects (for example, to allow a scheduler to
// list currently available nodes), and one that additionally acts as
// a FIFO queue (for example, to allow a scheduler to process incoming
// pods).
package cache // import "k8s.io/client-go/tools/cache"

View File

@ -1,208 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/klog"
)
// ExpirationCache implements the store interface
// 1. All entries are automatically time stamped on insert
// a. The key is computed based off the original item/keyFunc
// b. The value inserted under that key is the timestamped item
// 2. Expiration happens lazily on read based on the expiration policy
// a. No item can be inserted into the store while we're expiring
// *any* item in the cache.
// 3. Time-stamps are stripped off unexpired entries before return
// Note that the ExpirationCache is inherently slower than a normal
// threadSafeStore because it takes a write lock every time it checks if
// an item has expired.
type ExpirationCache struct {
cacheStorage ThreadSafeStore
keyFunc KeyFunc
clock clock.Clock
expirationPolicy ExpirationPolicy
// expirationLock is a write lock used to guarantee that we don't clobber
// newly inserted objects because of a stale expiration timestamp comparison
expirationLock sync.Mutex
}
// ExpirationPolicy dictates when an object expires. Currently only abstracted out
// so unittests don't rely on the system clock.
type ExpirationPolicy interface {
IsExpired(obj *timestampedEntry) bool
}
// TTLPolicy implements a ttl based ExpirationPolicy.
type TTLPolicy struct {
// >0: Expire entries with an age > ttl
// <=0: Don't expire any entry
Ttl time.Duration
// Clock used to calculate ttl expiration
Clock clock.Clock
}
// IsExpired returns true if the given object is older than the ttl, or it can't
// determine its age.
func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool {
return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl
}
// timestampedEntry is the only type allowed in a ExpirationCache.
type timestampedEntry struct {
obj interface{}
timestamp time.Time
}
// getTimestampedEntry returns the timestampedEntry stored under the given key.
func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) {
item, _ := c.cacheStorage.Get(key)
if tsEntry, ok := item.(*timestampedEntry); ok {
return tsEntry, true
}
return nil, false
}
// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't
// already expired. It holds a write lock across deletion.
func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
// Prevent all inserts from the time we deem an item as "expired" to when we
// delete it, so an un-expired item doesn't sneak in under the same key, just
// before the Delete.
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
timestampedItem, exists := c.getTimestampedEntry(key)
if !exists {
return nil, false
}
if c.expirationPolicy.IsExpired(timestampedItem) {
klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj)
c.cacheStorage.Delete(key)
return nil, false
}
return timestampedItem.obj, true
}
// GetByKey returns the item stored under the key, or sets exists=false.
func (c *ExpirationCache) GetByKey(key string) (interface{}, bool, error) {
obj, exists := c.getOrExpire(key)
return obj, exists, nil
}
// Get returns unexpired items. It purges the cache of expired items in the
// process.
func (c *ExpirationCache) Get(obj interface{}) (interface{}, bool, error) {
key, err := c.keyFunc(obj)
if err != nil {
return nil, false, KeyError{obj, err}
}
obj, exists := c.getOrExpire(key)
return obj, exists, nil
}
// List retrieves a list of unexpired items. It purges the cache of expired
// items in the process.
func (c *ExpirationCache) List() []interface{} {
items := c.cacheStorage.List()
list := make([]interface{}, 0, len(items))
for _, item := range items {
obj := item.(*timestampedEntry).obj
if key, err := c.keyFunc(obj); err != nil {
list = append(list, obj)
} else if obj, exists := c.getOrExpire(key); exists {
list = append(list, obj)
}
}
return list
}
// ListKeys returns a list of all keys in the expiration cache.
func (c *ExpirationCache) ListKeys() []string {
return c.cacheStorage.ListKeys()
}
// Add timestamps an item and inserts it into the cache, overwriting entries
// that might exist under the same key.
func (c *ExpirationCache) Add(obj interface{}) error {
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
key, err := c.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
c.cacheStorage.Add(key, &timestampedEntry{obj, c.clock.Now()})
return nil
}
// Update has not been implemented yet for lack of a use case, so this method
// simply calls `Add`. This effectively refreshes the timestamp.
func (c *ExpirationCache) Update(obj interface{}) error {
return c.Add(obj)
}
// Delete removes an item from the cache.
func (c *ExpirationCache) Delete(obj interface{}) error {
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
key, err := c.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
c.cacheStorage.Delete(key)
return nil
}
// Replace will convert all items in the given list to TimestampedEntries
// before attempting the replace operation. The replace operation will
// delete the contents of the ExpirationCache `c`.
func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error {
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
items := make(map[string]interface{}, len(list))
ts := c.clock.Now()
for _, item := range list {
key, err := c.keyFunc(item)
if err != nil {
return KeyError{item, err}
}
items[key] = &timestampedEntry{item, ts}
}
c.cacheStorage.Replace(items, resourceVersion)
return nil
}
// Resync will touch all objects to put them into the processing queue
func (c *ExpirationCache) Resync() error {
return c.cacheStorage.Resync()
}
// NewTTLStore creates and returns a ExpirationCache with a TTLPolicy
func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store {
return &ExpirationCache{
cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}),
keyFunc: keyFunc,
clock: clock.RealClock{},
expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}},
}
}

View File

@ -1,54 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
)
type fakeThreadSafeMap struct {
ThreadSafeStore
deletedKeys chan<- string
}
func (c *fakeThreadSafeMap) Delete(key string) {
if c.deletedKeys != nil {
c.ThreadSafeStore.Delete(key)
c.deletedKeys <- key
}
}
type FakeExpirationPolicy struct {
NeverExpire sets.String
RetrieveKeyFunc KeyFunc
}
func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool {
key, _ := p.RetrieveKeyFunc(obj)
return !p.NeverExpire.Has(key)
}
func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store {
cacheStorage := NewThreadSafeStore(Indexers{}, Indices{})
return &ExpirationCache{
cacheStorage: &fakeThreadSafeMap{cacheStorage, deletedKeys},
keyFunc: keyFunc,
clock: cacheClock,
expirationPolicy: expirationPolicy,
}
}

View File

@ -1,189 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"reflect"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
)
func TestTTLExpirationBasic(t *testing.T) {
testObj := testStoreObject{id: "foo", val: "bar"}
deleteChan := make(chan string, 1)
ttlStore := NewFakeExpirationStore(
testStoreKeyFunc, deleteChan,
&FakeExpirationPolicy{
NeverExpire: sets.NewString(),
RetrieveKeyFunc: func(obj interface{}) (string, error) {
return obj.(*timestampedEntry).obj.(testStoreObject).id, nil
},
},
clock.RealClock{},
)
err := ttlStore.Add(testObj)
if err != nil {
t.Errorf("Unable to add obj %#v", testObj)
}
item, exists, err := ttlStore.Get(testObj)
if err != nil {
t.Errorf("Failed to get from store, %v", err)
}
if exists || item != nil {
t.Errorf("Got unexpected item %#v", item)
}
key, _ := testStoreKeyFunc(testObj)
select {
case delKey := <-deleteChan:
if delKey != key {
t.Errorf("Unexpected delete for key %s", key)
}
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("Unexpected timeout waiting on delete")
}
close(deleteChan)
}
func TestReAddExpiredItem(t *testing.T) {
deleteChan := make(chan string, 1)
exp := &FakeExpirationPolicy{
NeverExpire: sets.NewString(),
RetrieveKeyFunc: func(obj interface{}) (string, error) {
return obj.(*timestampedEntry).obj.(testStoreObject).id, nil
},
}
ttlStore := NewFakeExpirationStore(
testStoreKeyFunc, deleteChan, exp, clock.RealClock{})
testKey := "foo"
testObj := testStoreObject{id: testKey, val: "bar"}
err := ttlStore.Add(testObj)
if err != nil {
t.Errorf("Unable to add obj %#v", testObj)
}
// This get will expire the item.
item, exists, err := ttlStore.Get(testObj)
if err != nil {
t.Errorf("Failed to get from store, %v", err)
}
if exists || item != nil {
t.Errorf("Got unexpected item %#v", item)
}
key, _ := testStoreKeyFunc(testObj)
differentValue := "different_bar"
err = ttlStore.Add(
testStoreObject{id: testKey, val: differentValue})
if err != nil {
t.Errorf("Failed to add second value")
}
select {
case delKey := <-deleteChan:
if delKey != key {
t.Errorf("Unexpected delete for key %s", key)
}
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("Unexpected timeout waiting on delete")
}
exp.NeverExpire = sets.NewString(testKey)
item, exists, err = ttlStore.GetByKey(testKey)
if err != nil {
t.Errorf("Failed to get from store, %v", err)
}
if !exists || item == nil || item.(testStoreObject).val != differentValue {
t.Errorf("Got unexpected item %#v", item)
}
close(deleteChan)
}
func TestTTLList(t *testing.T) {
testObjs := []testStoreObject{
{id: "foo", val: "bar"},
{id: "foo1", val: "bar1"},
{id: "foo2", val: "bar2"},
}
expireKeys := sets.NewString(testObjs[0].id, testObjs[2].id)
deleteChan := make(chan string, len(testObjs))
defer close(deleteChan)
ttlStore := NewFakeExpirationStore(
testStoreKeyFunc, deleteChan,
&FakeExpirationPolicy{
NeverExpire: sets.NewString(testObjs[1].id),
RetrieveKeyFunc: func(obj interface{}) (string, error) {
return obj.(*timestampedEntry).obj.(testStoreObject).id, nil
},
},
clock.RealClock{},
)
for _, obj := range testObjs {
err := ttlStore.Add(obj)
if err != nil {
t.Errorf("Unable to add obj %#v", obj)
}
}
listObjs := ttlStore.List()
if len(listObjs) != 1 || !reflect.DeepEqual(listObjs[0], testObjs[1]) {
t.Errorf("List returned unexpected results %#v", listObjs)
}
// Make sure all our deletes come through in an acceptable rate (1/100ms)
for expireKeys.Len() != 0 {
select {
case delKey := <-deleteChan:
if !expireKeys.Has(delKey) {
t.Errorf("Unexpected delete for key %s", delKey)
}
expireKeys.Delete(delKey)
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("Unexpected timeout waiting on delete")
return
}
}
}
func TestTTLPolicy(t *testing.T) {
fakeTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
ttl := 30 * time.Second
exactlyOnTTL := fakeTime.Add(-ttl)
expiredTime := fakeTime.Add(-(ttl + 1))
policy := TTLPolicy{ttl, clock.NewFakeClock(fakeTime)}
fakeTimestampedEntry := &timestampedEntry{obj: struct{}{}, timestamp: exactlyOnTTL}
if policy.IsExpired(fakeTimestampedEntry) {
t.Errorf("TTL cache should not expire entries exactly on ttl")
}
fakeTimestampedEntry.timestamp = fakeTime
if policy.IsExpired(fakeTimestampedEntry) {
t.Errorf("TTL Cache should not expire entries before ttl")
}
fakeTimestampedEntry.timestamp = expiredTime
if !policy.IsExpired(fakeTimestampedEntry) {
t.Errorf("TTL Cache should expire entries older than ttl")
}
for _, ttl = range []time.Duration{0, -1} {
policy.Ttl = ttl
if policy.IsExpired(fakeTimestampedEntry) {
t.Errorf("TTL policy should only expire entries when initialized with a ttl > 0")
}
}
}

View File

@ -1,102 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
// FakeStore lets you define custom functions for store operations
type FakeCustomStore struct {
AddFunc func(obj interface{}) error
UpdateFunc func(obj interface{}) error
DeleteFunc func(obj interface{}) error
ListFunc func() []interface{}
ListKeysFunc func() []string
GetFunc func(obj interface{}) (item interface{}, exists bool, err error)
GetByKeyFunc func(key string) (item interface{}, exists bool, err error)
ReplaceFunc func(list []interface{}, resourceVerion string) error
ResyncFunc func() error
}
// Add calls the custom Add function if defined
func (f *FakeCustomStore) Add(obj interface{}) error {
if f.AddFunc != nil {
return f.AddFunc(obj)
}
return nil
}
// Update calls the custom Update function if defined
func (f *FakeCustomStore) Update(obj interface{}) error {
if f.UpdateFunc != nil {
return f.Update(obj)
}
return nil
}
// Delete calls the custom Delete function if defined
func (f *FakeCustomStore) Delete(obj interface{}) error {
if f.DeleteFunc != nil {
return f.DeleteFunc(obj)
}
return nil
}
// List calls the custom List function if defined
func (f *FakeCustomStore) List() []interface{} {
if f.ListFunc != nil {
return f.ListFunc()
}
return nil
}
// ListKeys calls the custom ListKeys function if defined
func (f *FakeCustomStore) ListKeys() []string {
if f.ListKeysFunc != nil {
return f.ListKeysFunc()
}
return nil
}
// Get calls the custom Get function if defined
func (f *FakeCustomStore) Get(obj interface{}) (item interface{}, exists bool, err error) {
if f.GetFunc != nil {
return f.GetFunc(obj)
}
return nil, false, nil
}
// GetByKey calls the custom GetByKey function if defined
func (f *FakeCustomStore) GetByKey(key string) (item interface{}, exists bool, err error) {
if f.GetByKeyFunc != nil {
return f.GetByKeyFunc(key)
}
return nil, false, nil
}
// Replace calls the custom Replace function if defined
func (f *FakeCustomStore) Replace(list []interface{}, resourceVersion string) error {
if f.ReplaceFunc != nil {
return f.ReplaceFunc(list, resourceVersion)
}
return nil
}
// Resync calls the custom Resync function if defined
func (f *FakeCustomStore) Resync() error {
if f.ResyncFunc != nil {
return f.ResyncFunc()
}
return nil
}

View File

@ -1,358 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"errors"
"sync"
"k8s.io/apimachinery/pkg/util/sets"
)
// PopProcessFunc is passed to Pop() method of Queue interface.
// It is supposed to process the element popped from the queue.
type PopProcessFunc func(interface{}) error
// ErrRequeue may be returned by a PopProcessFunc to safely requeue
// the current item. The value of Err will be returned from Pop.
type ErrRequeue struct {
// Err is returned by the Pop function
Err error
}
var FIFOClosedError error = errors.New("DeltaFIFO: manipulating with closed queue")
func (e ErrRequeue) Error() string {
if e.Err == nil {
return "the popped item should be requeued without returning an error"
}
return e.Err.Error()
}
// Queue is exactly like a Store, but has a Pop() method too.
type Queue interface {
Store
// Pop blocks until it has something to process.
// It returns the object that was process and the result of processing.
// The PopProcessFunc may return an ErrRequeue{...} to indicate the item
// should be requeued before releasing the lock on the queue.
Pop(PopProcessFunc) (interface{}, error)
// AddIfNotPresent adds a value previously
// returned by Pop back into the queue as long
// as nothing else (presumably more recent)
// has since been added.
AddIfNotPresent(interface{}) error
// HasSynced returns true if the first batch of items has been popped
HasSynced() bool
// Close queue
Close()
}
// Helper function for popping from Queue.
// WARNING: Do NOT use this function in non-test code to avoid races
// unless you really really really really know what you are doing.
func Pop(queue Queue) interface{} {
var result interface{}
queue.Pop(func(obj interface{}) error {
result = obj
return nil
})
return result
}
// FIFO receives adds and updates from a Reflector, and puts them in a queue for
// FIFO order processing. If multiple adds/updates of a single item happen while
// an item is in the queue before it has been processed, it will only be
// processed once, and when it is processed, the most recent version will be
// processed. This can't be done with a channel.
//
// FIFO solves this use case:
// * You want to process every object (exactly) once.
// * You want to process the most recent version of the object when you process it.
// * You do not want to process deleted objects, they should be removed from the queue.
// * You do not want to periodically reprocess objects.
// Compare with DeltaFIFO for other use cases.
type FIFO struct {
lock sync.RWMutex
cond sync.Cond
// We depend on the property that items in the set are in the queue and vice versa.
items map[string]interface{}
queue []string
// populated is true if the first batch of items inserted by Replace() has been populated
// or Delete/Add/Update was called first.
populated bool
// initialPopulationCount is the number of items inserted by the first call of Replace()
initialPopulationCount int
// keyFunc is used to make the key used for queued item insertion and retrieval, and
// should be deterministic.
keyFunc KeyFunc
// Indication the queue is closed.
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
// Currently, not used to gate any of CRED operations.
closed bool
closedLock sync.Mutex
}
var (
_ = Queue(&FIFO{}) // FIFO is a Queue
)
// Close the queue.
func (f *FIFO) Close() {
f.closedLock.Lock()
defer f.closedLock.Unlock()
f.closed = true
f.cond.Broadcast()
}
// Return true if an Add/Update/Delete/AddIfNotPresent are called first,
// or an Update called first but the first batch of items inserted by Replace() has been popped
func (f *FIFO) HasSynced() bool {
f.lock.Lock()
defer f.lock.Unlock()
return f.populated && f.initialPopulationCount == 0
}
// Add inserts an item, and puts it in the queue. The item is only enqueued
// if it doesn't already exist in the set.
func (f *FIFO) Add(obj interface{}) error {
id, err := f.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
f.lock.Lock()
defer f.lock.Unlock()
f.populated = true
if _, exists := f.items[id]; !exists {
f.queue = append(f.queue, id)
}
f.items[id] = obj
f.cond.Broadcast()
return nil
}
// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already
// present in the set, it is neither enqueued nor added to the set.
//
// This is useful in a single producer/consumer scenario so that the consumer can
// safely retry items without contending with the producer and potentially enqueueing
// stale items.
func (f *FIFO) AddIfNotPresent(obj interface{}) error {
id, err := f.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
f.lock.Lock()
defer f.lock.Unlock()
f.addIfNotPresent(id, obj)
return nil
}
// addIfNotPresent assumes the fifo lock is already held and adds the provided
// item to the queue under id if it does not already exist.
func (f *FIFO) addIfNotPresent(id string, obj interface{}) {
f.populated = true
if _, exists := f.items[id]; exists {
return
}
f.queue = append(f.queue, id)
f.items[id] = obj
f.cond.Broadcast()
}
// Update is the same as Add in this implementation.
func (f *FIFO) Update(obj interface{}) error {
return f.Add(obj)
}
// Delete removes an item. It doesn't add it to the queue, because
// this implementation assumes the consumer only cares about the objects,
// not the order in which they were created/added.
func (f *FIFO) Delete(obj interface{}) error {
id, err := f.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
f.lock.Lock()
defer f.lock.Unlock()
f.populated = true
delete(f.items, id)
return err
}
// List returns a list of all the items.
func (f *FIFO) List() []interface{} {
f.lock.RLock()
defer f.lock.RUnlock()
list := make([]interface{}, 0, len(f.items))
for _, item := range f.items {
list = append(list, item)
}
return list
}
// ListKeys returns a list of all the keys of the objects currently
// in the FIFO.
func (f *FIFO) ListKeys() []string {
f.lock.RLock()
defer f.lock.RUnlock()
list := make([]string, 0, len(f.items))
for key := range f.items {
list = append(list, key)
}
return list
}
// Get returns the requested item, or sets exists=false.
func (f *FIFO) Get(obj interface{}) (item interface{}, exists bool, err error) {
key, err := f.keyFunc(obj)
if err != nil {
return nil, false, KeyError{obj, err}
}
return f.GetByKey(key)
}
// GetByKey returns the requested item, or sets exists=false.
func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
f.lock.RLock()
defer f.lock.RUnlock()
item, exists = f.items[key]
return item, exists, nil
}
// Checks if the queue is closed
func (f *FIFO) IsClosed() bool {
f.closedLock.Lock()
defer f.closedLock.Unlock()
if f.closed {
return true
}
return false
}
// Pop waits until an item is ready and processes it. If multiple items are
// ready, they are returned in the order in which they were added/updated.
// The item is removed from the queue (and the store) before it is processed,
// so if you don't successfully process it, it should be added back with
// AddIfNotPresent(). process function is called under lock, so it is safe
// update data structures in it that need to be in sync with the queue.
func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {
f.lock.Lock()
defer f.lock.Unlock()
for {
for len(f.queue) == 0 {
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
// When Close() is called, the f.closed is set and the condition is broadcasted.
// Which causes this loop to continue and return from the Pop().
if f.IsClosed() {
return nil, FIFOClosedError
}
f.cond.Wait()
}
id := f.queue[0]
f.queue = f.queue[1:]
if f.initialPopulationCount > 0 {
f.initialPopulationCount--
}
item, ok := f.items[id]
if !ok {
// Item may have been deleted subsequently.
continue
}
delete(f.items, id)
err := process(item)
if e, ok := err.(ErrRequeue); ok {
f.addIfNotPresent(id, item)
err = e.Err
}
return item, err
}
}
// Replace will delete the contents of 'f', using instead the given map.
// 'f' takes ownership of the map, you should not reference the map again
// after calling this function. f's queue is reset, too; upon return, it
// will contain the items in the map, in no particular order.
func (f *FIFO) Replace(list []interface{}, resourceVersion string) error {
items := make(map[string]interface{}, len(list))
for _, item := range list {
key, err := f.keyFunc(item)
if err != nil {
return KeyError{item, err}
}
items[key] = item
}
f.lock.Lock()
defer f.lock.Unlock()
if !f.populated {
f.populated = true
f.initialPopulationCount = len(items)
}
f.items = items
f.queue = f.queue[:0]
for id := range items {
f.queue = append(f.queue, id)
}
if len(f.queue) > 0 {
f.cond.Broadcast()
}
return nil
}
// Resync will touch all objects to put them into the processing queue
func (f *FIFO) Resync() error {
f.lock.Lock()
defer f.lock.Unlock()
inQueue := sets.NewString()
for _, id := range f.queue {
inQueue.Insert(id)
}
for id := range f.items {
if !inQueue.Has(id) {
f.queue = append(f.queue, id)
}
}
if len(f.queue) > 0 {
f.cond.Broadcast()
}
return nil
}
// NewFIFO returns a Store which can be used to queue up items to
// process.
func NewFIFO(keyFunc KeyFunc) *FIFO {
f := &FIFO{
items: map[string]interface{}{},
queue: []string{},
keyFunc: keyFunc,
}
f.cond.L = &f.lock
return f
}

View File

@ -1,280 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"reflect"
"testing"
"time"
)
func testFifoObjectKeyFunc(obj interface{}) (string, error) {
return obj.(testFifoObject).name, nil
}
type testFifoObject struct {
name string
val interface{}
}
func mkFifoObj(name string, val interface{}) testFifoObject {
return testFifoObject{name: name, val: val}
}
func TestFIFO_basic(t *testing.T) {
f := NewFIFO(testFifoObjectKeyFunc)
const amount = 500
go func() {
for i := 0; i < amount; i++ {
f.Add(mkFifoObj(string([]rune{'a', rune(i)}), i+1))
}
}()
go func() {
for u := uint64(0); u < amount; u++ {
f.Add(mkFifoObj(string([]rune{'b', rune(u)}), u+1))
}
}()
lastInt := int(0)
lastUint := uint64(0)
for i := 0; i < amount*2; i++ {
switch obj := Pop(f).(testFifoObject).val.(type) {
case int:
if obj <= lastInt {
t.Errorf("got %v (int) out of order, last was %v", obj, lastInt)
}
lastInt = obj
case uint64:
if obj <= lastUint {
t.Errorf("got %v (uint) out of order, last was %v", obj, lastUint)
} else {
lastUint = obj
}
default:
t.Fatalf("unexpected type %#v", obj)
}
}
}
func TestFIFO_requeueOnPop(t *testing.T) {
f := NewFIFO(testFifoObjectKeyFunc)
f.Add(mkFifoObj("foo", 10))
_, err := f.Pop(func(obj interface{}) error {
if obj.(testFifoObject).name != "foo" {
t.Fatalf("unexpected object: %#v", obj)
}
return ErrRequeue{Err: nil}
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if _, ok, err := f.GetByKey("foo"); !ok || err != nil {
t.Fatalf("object should have been requeued: %t %v", ok, err)
}
_, err = f.Pop(func(obj interface{}) error {
if obj.(testFifoObject).name != "foo" {
t.Fatalf("unexpected object: %#v", obj)
}
return ErrRequeue{Err: fmt.Errorf("test error")}
})
if err == nil || err.Error() != "test error" {
t.Fatalf("unexpected error: %v", err)
}
if _, ok, err := f.GetByKey("foo"); !ok || err != nil {
t.Fatalf("object should have been requeued: %t %v", ok, err)
}
_, err = f.Pop(func(obj interface{}) error {
if obj.(testFifoObject).name != "foo" {
t.Fatalf("unexpected object: %#v", obj)
}
return nil
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if _, ok, err := f.GetByKey("foo"); ok || err != nil {
t.Fatalf("object should have been removed: %t %v", ok, err)
}
}
func TestFIFO_addUpdate(t *testing.T) {
f := NewFIFO(testFifoObjectKeyFunc)
f.Add(mkFifoObj("foo", 10))
f.Update(mkFifoObj("foo", 15))
if e, a := []interface{}{mkFifoObj("foo", 15)}, f.List(); !reflect.DeepEqual(e, a) {
t.Errorf("Expected %+v, got %+v", e, a)
}
if e, a := []string{"foo"}, f.ListKeys(); !reflect.DeepEqual(e, a) {
t.Errorf("Expected %+v, got %+v", e, a)
}
got := make(chan testFifoObject, 2)
go func() {
for {
got <- Pop(f).(testFifoObject)
}
}()
first := <-got
if e, a := 15, first.val; e != a {
t.Errorf("Didn't get updated value (%v), got %v", e, a)
}
select {
case unexpected := <-got:
t.Errorf("Got second value %v", unexpected.val)
case <-time.After(50 * time.Millisecond):
}
_, exists, _ := f.Get(mkFifoObj("foo", ""))
if exists {
t.Errorf("item did not get removed")
}
}
func TestFIFO_addReplace(t *testing.T) {
f := NewFIFO(testFifoObjectKeyFunc)
f.Add(mkFifoObj("foo", 10))
f.Replace([]interface{}{mkFifoObj("foo", 15)}, "15")
got := make(chan testFifoObject, 2)
go func() {
for {
got <- Pop(f).(testFifoObject)
}
}()
first := <-got
if e, a := 15, first.val; e != a {
t.Errorf("Didn't get updated value (%v), got %v", e, a)
}
select {
case unexpected := <-got:
t.Errorf("Got second value %v", unexpected.val)
case <-time.After(50 * time.Millisecond):
}
_, exists, _ := f.Get(mkFifoObj("foo", ""))
if exists {
t.Errorf("item did not get removed")
}
}
func TestFIFO_detectLineJumpers(t *testing.T) {
f := NewFIFO(testFifoObjectKeyFunc)
f.Add(mkFifoObj("foo", 10))
f.Add(mkFifoObj("bar", 1))
f.Add(mkFifoObj("foo", 11))
f.Add(mkFifoObj("foo", 13))
f.Add(mkFifoObj("zab", 30))
if e, a := 13, Pop(f).(testFifoObject).val; a != e {
t.Fatalf("expected %d, got %d", e, a)
}
f.Add(mkFifoObj("foo", 14)) // ensure foo doesn't jump back in line
if e, a := 1, Pop(f).(testFifoObject).val; a != e {
t.Fatalf("expected %d, got %d", e, a)
}
if e, a := 30, Pop(f).(testFifoObject).val; a != e {
t.Fatalf("expected %d, got %d", e, a)
}
if e, a := 14, Pop(f).(testFifoObject).val; a != e {
t.Fatalf("expected %d, got %d", e, a)
}
}
func TestFIFO_addIfNotPresent(t *testing.T) {
f := NewFIFO(testFifoObjectKeyFunc)
f.Add(mkFifoObj("a", 1))
f.Add(mkFifoObj("b", 2))
f.AddIfNotPresent(mkFifoObj("b", 3))
f.AddIfNotPresent(mkFifoObj("c", 4))
if e, a := 3, len(f.items); a != e {
t.Fatalf("expected queue length %d, got %d", e, a)
}
expectedValues := []int{1, 2, 4}
for _, expected := range expectedValues {
if actual := Pop(f).(testFifoObject).val; actual != expected {
t.Fatalf("expected value %d, got %d", expected, actual)
}
}
}
func TestFIFO_HasSynced(t *testing.T) {
tests := []struct {
actions []func(f *FIFO)
expectedSynced bool
}{
{
actions: []func(f *FIFO){},
expectedSynced: false,
},
{
actions: []func(f *FIFO){
func(f *FIFO) { f.Add(mkFifoObj("a", 1)) },
},
expectedSynced: true,
},
{
actions: []func(f *FIFO){
func(f *FIFO) { f.Replace([]interface{}{}, "0") },
},
expectedSynced: true,
},
{
actions: []func(f *FIFO){
func(f *FIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") },
},
expectedSynced: false,
},
{
actions: []func(f *FIFO){
func(f *FIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") },
func(f *FIFO) { Pop(f) },
},
expectedSynced: false,
},
{
actions: []func(f *FIFO){
func(f *FIFO) { f.Replace([]interface{}{mkFifoObj("a", 1), mkFifoObj("b", 2)}, "0") },
func(f *FIFO) { Pop(f) },
func(f *FIFO) { Pop(f) },
},
expectedSynced: true,
},
}
for i, test := range tests {
f := NewFIFO(testFifoObjectKeyFunc)
for _, action := range test.actions {
action(f)
}
if e, a := test.expectedSynced, f.HasSynced(); a != e {
t.Errorf("test case %v failed, expected: %v , got %v", i, e, a)
}
}
}

View File

@ -1,323 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file implements a heap data structure.
package cache
import (
"container/heap"
"fmt"
"sync"
)
const (
closedMsg = "heap is closed"
)
type LessFunc func(interface{}, interface{}) bool
type heapItem struct {
obj interface{} // The object which is stored in the heap.
index int // The index of the object's key in the Heap.queue.
}
type itemKeyValue struct {
key string
obj interface{}
}
// heapData is an internal struct that implements the standard heap interface
// and keeps the data stored in the heap.
type heapData struct {
// items is a map from key of the objects to the objects and their index.
// We depend on the property that items in the map are in the queue and vice versa.
items map[string]*heapItem
// queue implements a heap data structure and keeps the order of elements
// according to the heap invariant. The queue keeps the keys of objects stored
// in "items".
queue []string
// keyFunc is used to make the key used for queued item insertion and retrieval, and
// should be deterministic.
keyFunc KeyFunc
// lessFunc is used to compare two objects in the heap.
lessFunc LessFunc
}
var (
_ = heap.Interface(&heapData{}) // heapData is a standard heap
)
// Less compares two objects and returns true if the first one should go
// in front of the second one in the heap.
func (h *heapData) Less(i, j int) bool {
if i > len(h.queue) || j > len(h.queue) {
return false
}
itemi, ok := h.items[h.queue[i]]
if !ok {
return false
}
itemj, ok := h.items[h.queue[j]]
if !ok {
return false
}
return h.lessFunc(itemi.obj, itemj.obj)
}
// Len returns the number of items in the Heap.
func (h *heapData) Len() int { return len(h.queue) }
// Swap implements swapping of two elements in the heap. This is a part of standard
// heap interface and should never be called directly.
func (h *heapData) Swap(i, j int) {
h.queue[i], h.queue[j] = h.queue[j], h.queue[i]
item := h.items[h.queue[i]]
item.index = i
item = h.items[h.queue[j]]
item.index = j
}
// Push is supposed to be called by heap.Push only.
func (h *heapData) Push(kv interface{}) {
keyValue := kv.(*itemKeyValue)
n := len(h.queue)
h.items[keyValue.key] = &heapItem{keyValue.obj, n}
h.queue = append(h.queue, keyValue.key)
}
// Pop is supposed to be called by heap.Pop only.
func (h *heapData) Pop() interface{} {
key := h.queue[len(h.queue)-1]
h.queue = h.queue[0 : len(h.queue)-1]
item, ok := h.items[key]
if !ok {
// This is an error
return nil
}
delete(h.items, key)
return item.obj
}
// Heap is a thread-safe producer/consumer queue that implements a heap data structure.
// It can be used to implement priority queues and similar data structures.
type Heap struct {
lock sync.RWMutex
cond sync.Cond
// data stores objects and has a queue that keeps their ordering according
// to the heap invariant.
data *heapData
// closed indicates that the queue is closed.
// It is mainly used to let Pop() exit its control loop while waiting for an item.
closed bool
}
// Close the Heap and signals condition variables that may be waiting to pop
// items from the heap.
func (h *Heap) Close() {
h.lock.Lock()
defer h.lock.Unlock()
h.closed = true
h.cond.Broadcast()
}
// Add inserts an item, and puts it in the queue. The item is updated if it
// already exists.
func (h *Heap) Add(obj interface{}) error {
key, err := h.data.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
h.lock.Lock()
defer h.lock.Unlock()
if h.closed {
return fmt.Errorf(closedMsg)
}
if _, exists := h.data.items[key]; exists {
h.data.items[key].obj = obj
heap.Fix(h.data, h.data.items[key].index)
} else {
h.addIfNotPresentLocked(key, obj)
}
h.cond.Broadcast()
return nil
}
// Adds all the items in the list to the queue and then signals the condition
// variable. It is useful when the caller would like to add all of the items
// to the queue before consumer starts processing them.
func (h *Heap) BulkAdd(list []interface{}) error {
h.lock.Lock()
defer h.lock.Unlock()
if h.closed {
return fmt.Errorf(closedMsg)
}
for _, obj := range list {
key, err := h.data.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
if _, exists := h.data.items[key]; exists {
h.data.items[key].obj = obj
heap.Fix(h.data, h.data.items[key].index)
} else {
h.addIfNotPresentLocked(key, obj)
}
}
h.cond.Broadcast()
return nil
}
// AddIfNotPresent inserts an item, and puts it in the queue. If an item with
// the key is present in the map, no changes is made to the item.
//
// This is useful in a single producer/consumer scenario so that the consumer can
// safely retry items without contending with the producer and potentially enqueueing
// stale items.
func (h *Heap) AddIfNotPresent(obj interface{}) error {
id, err := h.data.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
h.lock.Lock()
defer h.lock.Unlock()
if h.closed {
return fmt.Errorf(closedMsg)
}
h.addIfNotPresentLocked(id, obj)
h.cond.Broadcast()
return nil
}
// addIfNotPresentLocked assumes the lock is already held and adds the provided
// item to the queue if it does not already exist.
func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) {
if _, exists := h.data.items[key]; exists {
return
}
heap.Push(h.data, &itemKeyValue{key, obj})
}
// Update is the same as Add in this implementation. When the item does not
// exist, it is added.
func (h *Heap) Update(obj interface{}) error {
return h.Add(obj)
}
// Delete removes an item.
func (h *Heap) Delete(obj interface{}) error {
key, err := h.data.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
h.lock.Lock()
defer h.lock.Unlock()
if item, ok := h.data.items[key]; ok {
heap.Remove(h.data, item.index)
return nil
}
return fmt.Errorf("object not found")
}
// Pop waits until an item is ready. If multiple items are
// ready, they are returned in the order given by Heap.data.lessFunc.
func (h *Heap) Pop() (interface{}, error) {
h.lock.Lock()
defer h.lock.Unlock()
for len(h.data.queue) == 0 {
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
// When Close() is called, the h.closed is set and the condition is broadcast,
// which causes this loop to continue and return from the Pop().
if h.closed {
return nil, fmt.Errorf("heap is closed")
}
h.cond.Wait()
}
obj := heap.Pop(h.data)
if obj != nil {
return obj, nil
} else {
return nil, fmt.Errorf("object was removed from heap data")
}
}
// List returns a list of all the items.
func (h *Heap) List() []interface{} {
h.lock.RLock()
defer h.lock.RUnlock()
list := make([]interface{}, 0, len(h.data.items))
for _, item := range h.data.items {
list = append(list, item.obj)
}
return list
}
// ListKeys returns a list of all the keys of the objects currently in the Heap.
func (h *Heap) ListKeys() []string {
h.lock.RLock()
defer h.lock.RUnlock()
list := make([]string, 0, len(h.data.items))
for key := range h.data.items {
list = append(list, key)
}
return list
}
// Get returns the requested item, or sets exists=false.
func (h *Heap) Get(obj interface{}) (interface{}, bool, error) {
key, err := h.data.keyFunc(obj)
if err != nil {
return nil, false, KeyError{obj, err}
}
return h.GetByKey(key)
}
// GetByKey returns the requested item, or sets exists=false.
func (h *Heap) GetByKey(key string) (interface{}, bool, error) {
h.lock.RLock()
defer h.lock.RUnlock()
item, exists := h.data.items[key]
if !exists {
return nil, false, nil
}
return item.obj, true, nil
}
// IsClosed returns true if the queue is closed.
func (h *Heap) IsClosed() bool {
h.lock.RLock()
defer h.lock.RUnlock()
if h.closed {
return true
}
return false
}
// NewHeap returns a Heap which can be used to queue up items to process.
func NewHeap(keyFn KeyFunc, lessFn LessFunc) *Heap {
h := &Heap{
data: &heapData{
items: map[string]*heapItem{},
queue: []string{},
keyFunc: keyFn,
lessFunc: lessFn,
},
}
h.cond.L = &h.lock
return h
}

View File

@ -1,382 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"sync"
"testing"
"time"
)
func testHeapObjectKeyFunc(obj interface{}) (string, error) {
return obj.(testHeapObject).name, nil
}
type testHeapObject struct {
name string
val interface{}
}
func mkHeapObj(name string, val interface{}) testHeapObject {
return testHeapObject{name: name, val: val}
}
func compareInts(val1 interface{}, val2 interface{}) bool {
first := val1.(testHeapObject).val.(int)
second := val2.(testHeapObject).val.(int)
return first < second
}
// TestHeapBasic tests Heap invariant and synchronization.
func TestHeapBasic(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
var wg sync.WaitGroup
wg.Add(2)
const amount = 500
var i, u int
// Insert items in the heap in opposite orders in two go routines.
go func() {
for i = amount; i > 0; i-- {
h.Add(mkHeapObj(string([]rune{'a', rune(i)}), i))
}
wg.Done()
}()
go func() {
for u = 0; u < amount; u++ {
h.Add(mkHeapObj(string([]rune{'b', rune(u)}), u+1))
}
wg.Done()
}()
// Wait for the two go routines to finish.
wg.Wait()
// Make sure that the numbers are popped in ascending order.
prevNum := 0
for i := 0; i < amount*2; i++ {
obj, err := h.Pop()
num := obj.(testHeapObject).val.(int)
// All the items must be sorted.
if err != nil || prevNum > num {
t.Errorf("got %v out of order, last was %v", obj, prevNum)
}
prevNum = num
}
}
// Tests Heap.Add and ensures that heap invariant is preserved after adding items.
func TestHeap_Add(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
h.Add(mkHeapObj("foo", 10))
h.Add(mkHeapObj("bar", 1))
h.Add(mkHeapObj("baz", 11))
h.Add(mkHeapObj("zab", 30))
h.Add(mkHeapObj("foo", 13)) // This updates "foo".
item, err := h.Pop()
if e, a := 1, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
item, err = h.Pop()
if e, a := 11, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
h.Delete(mkHeapObj("baz", 11)) // Nothing is deleted.
h.Add(mkHeapObj("foo", 14)) // foo is updated.
item, err = h.Pop()
if e, a := 14, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
item, err = h.Pop()
if e, a := 30, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
}
// TestHeap_BulkAdd tests Heap.BulkAdd functionality and ensures that all the
// items given to BulkAdd are added to the queue before Pop reads them.
func TestHeap_BulkAdd(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
const amount = 500
// Insert items in the heap in opposite orders in a go routine.
go func() {
l := []interface{}{}
for i := amount; i > 0; i-- {
l = append(l, mkHeapObj(string([]rune{'a', rune(i)}), i))
}
h.BulkAdd(l)
}()
prevNum := -1
for i := 0; i < amount; i++ {
obj, err := h.Pop()
num := obj.(testHeapObject).val.(int)
// All the items must be sorted.
if err != nil || prevNum >= num {
t.Errorf("got %v out of order, last was %v", obj, prevNum)
}
prevNum = num
}
}
// TestHeapEmptyPop tests that pop returns properly after heap is closed.
func TestHeapEmptyPop(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
go func() {
time.Sleep(1 * time.Second)
h.Close()
}()
_, err := h.Pop()
if err == nil || err.Error() != closedMsg {
t.Errorf("pop should have returned heap closed error: %v", err)
}
}
// TestHeap_AddIfNotPresent tests Heap.AddIfNotPresent and ensures that heap
// invariant is preserved after adding items.
func TestHeap_AddIfNotPresent(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
h.AddIfNotPresent(mkHeapObj("foo", 10))
h.AddIfNotPresent(mkHeapObj("bar", 1))
h.AddIfNotPresent(mkHeapObj("baz", 11))
h.AddIfNotPresent(mkHeapObj("zab", 30))
h.AddIfNotPresent(mkHeapObj("foo", 13)) // This is not added.
if len := len(h.data.items); len != 4 {
t.Errorf("unexpected number of items: %d", len)
}
if val := h.data.items["foo"].obj.(testHeapObject).val; val != 10 {
t.Errorf("unexpected value: %d", val)
}
item, err := h.Pop()
if e, a := 1, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
item, err = h.Pop()
if e, a := 10, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
// bar is already popped. Let's add another one.
h.AddIfNotPresent(mkHeapObj("bar", 14))
item, err = h.Pop()
if e, a := 11, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
item, err = h.Pop()
if e, a := 14, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
}
// TestHeap_Delete tests Heap.Delete and ensures that heap invariant is
// preserved after deleting items.
func TestHeap_Delete(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
h.Add(mkHeapObj("foo", 10))
h.Add(mkHeapObj("bar", 1))
h.Add(mkHeapObj("bal", 31))
h.Add(mkHeapObj("baz", 11))
// Delete head. Delete should work with "key" and doesn't care about the value.
if err := h.Delete(mkHeapObj("bar", 200)); err != nil {
t.Fatalf("Failed to delete head.")
}
item, err := h.Pop()
if e, a := 10, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
h.Add(mkHeapObj("zab", 30))
h.Add(mkHeapObj("faz", 30))
len := h.data.Len()
// Delete non-existing item.
if err = h.Delete(mkHeapObj("non-existent", 10)); err == nil || len != h.data.Len() {
t.Fatalf("Didn't expect any item removal")
}
// Delete tail.
if err = h.Delete(mkHeapObj("bal", 31)); err != nil {
t.Fatalf("Failed to delete tail.")
}
// Delete one of the items with value 30.
if err = h.Delete(mkHeapObj("zab", 30)); err != nil {
t.Fatalf("Failed to delete item.")
}
item, err = h.Pop()
if e, a := 11, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
item, err = h.Pop()
if e, a := 30, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
if h.data.Len() != 0 {
t.Fatalf("expected an empty heap.")
}
}
// TestHeap_Update tests Heap.Update and ensures that heap invariant is
// preserved after adding items.
func TestHeap_Update(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
h.Add(mkHeapObj("foo", 10))
h.Add(mkHeapObj("bar", 1))
h.Add(mkHeapObj("bal", 31))
h.Add(mkHeapObj("baz", 11))
// Update an item to a value that should push it to the head.
h.Update(mkHeapObj("baz", 0))
if h.data.queue[0] != "baz" || h.data.items["baz"].index != 0 {
t.Fatalf("expected baz to be at the head")
}
item, err := h.Pop()
if e, a := 0, item.(testHeapObject).val; err != nil || a != e {
t.Fatalf("expected %d, got %d", e, a)
}
// Update bar to push it farther back in the queue.
h.Update(mkHeapObj("bar", 100))
if h.data.queue[0] != "foo" || h.data.items["foo"].index != 0 {
t.Fatalf("expected foo to be at the head")
}
}
// TestHeap_Get tests Heap.Get.
func TestHeap_Get(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
h.Add(mkHeapObj("foo", 10))
h.Add(mkHeapObj("bar", 1))
h.Add(mkHeapObj("bal", 31))
h.Add(mkHeapObj("baz", 11))
// Get works with the key.
obj, exists, err := h.Get(mkHeapObj("baz", 0))
if err != nil || exists == false || obj.(testHeapObject).val != 11 {
t.Fatalf("unexpected error in getting element")
}
// Get non-existing object.
_, exists, err = h.Get(mkHeapObj("non-existing", 0))
if err != nil || exists == true {
t.Fatalf("didn't expect to get any object")
}
}
// TestHeap_GetByKey tests Heap.GetByKey and is very similar to TestHeap_Get.
func TestHeap_GetByKey(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
h.Add(mkHeapObj("foo", 10))
h.Add(mkHeapObj("bar", 1))
h.Add(mkHeapObj("bal", 31))
h.Add(mkHeapObj("baz", 11))
obj, exists, err := h.GetByKey("baz")
if err != nil || exists == false || obj.(testHeapObject).val != 11 {
t.Fatalf("unexpected error in getting element")
}
// Get non-existing object.
_, exists, err = h.GetByKey("non-existing")
if err != nil || exists == true {
t.Fatalf("didn't expect to get any object")
}
}
// TestHeap_Close tests Heap.Close and Heap.IsClosed functions.
func TestHeap_Close(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
h.Add(mkHeapObj("foo", 10))
h.Add(mkHeapObj("bar", 1))
if h.IsClosed() {
t.Fatalf("didn't expect heap to be closed")
}
h.Close()
if !h.IsClosed() {
t.Fatalf("expect heap to be closed")
}
}
// TestHeap_List tests Heap.List function.
func TestHeap_List(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
list := h.List()
if len(list) != 0 {
t.Errorf("expected an empty list")
}
items := map[string]int{
"foo": 10,
"bar": 1,
"bal": 30,
"baz": 11,
"faz": 30,
}
for k, v := range items {
h.Add(mkHeapObj(k, v))
}
list = h.List()
if len(list) != len(items) {
t.Errorf("expected %d items, got %d", len(items), len(list))
}
for _, obj := range list {
heapObj := obj.(testHeapObject)
v, ok := items[heapObj.name]
if !ok || v != heapObj.val {
t.Errorf("unexpected item in the list: %v", heapObj)
}
}
}
// TestHeap_ListKeys tests Heap.ListKeys function. Scenario is the same as
// TestHeap_list.
func TestHeap_ListKeys(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
list := h.ListKeys()
if len(list) != 0 {
t.Errorf("expected an empty list")
}
items := map[string]int{
"foo": 10,
"bar": 1,
"bal": 30,
"baz": 11,
"faz": 30,
}
for k, v := range items {
h.Add(mkHeapObj(k, v))
}
list = h.ListKeys()
if len(list) != len(items) {
t.Errorf("expected %d items, got %d", len(items), len(list))
}
for _, key := range list {
_, ok := items[key]
if !ok {
t.Errorf("unexpected item in the list: %v", key)
}
}
}
// TestHeapAddAfterClose tests that heap returns an error if anything is added
// after it is closed.
func TestHeapAddAfterClose(t *testing.T) {
h := NewHeap(testHeapObjectKeyFunc, compareInts)
h.Close()
if err := h.Add(mkHeapObj("test", 1)); err == nil || err.Error() != closedMsg {
t.Errorf("expected heap closed error")
}
if err := h.AddIfNotPresent(mkHeapObj("test", 1)); err == nil || err.Error() != closedMsg {
t.Errorf("expected heap closed error")
}
if err := h.BulkAdd([]interface{}{mkHeapObj("test", 1)}); err == nil || err.Error() != closedMsg {
t.Errorf("expected heap closed error")
}
}

View File

@ -1,87 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/util/sets"
)
// Indexer is a storage interface that lets you list objects using multiple indexing functions
type Indexer interface {
Store
// Retrieve list of objects that match on the named indexing function
Index(indexName string, obj interface{}) ([]interface{}, error)
// IndexKeys returns the set of keys that match on the named indexing function.
IndexKeys(indexName, indexKey string) ([]string, error)
// ListIndexFuncValues returns the list of generated values of an Index func
ListIndexFuncValues(indexName string) []string
// ByIndex lists object that match on the named indexing function with the exact key
ByIndex(indexName, indexKey string) ([]interface{}, error)
// GetIndexer return the indexers
GetIndexers() Indexers
// AddIndexers adds more indexers to this store. If you call this after you already have data
// in the store, the results are undefined.
AddIndexers(newIndexers Indexers) error
}
// IndexFunc knows how to provide an indexed value for an object.
type IndexFunc func(obj interface{}) ([]string, error)
// IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns
// unique values for every object. This is conversion can create errors when more than one key is found. You
// should prefer to make proper key and index functions.
func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc {
return func(obj interface{}) (string, error) {
indexKeys, err := indexFunc(obj)
if err != nil {
return "", err
}
if len(indexKeys) > 1 {
return "", fmt.Errorf("too many keys: %v", indexKeys)
}
if len(indexKeys) == 0 {
return "", fmt.Errorf("unexpected empty indexKeys")
}
return indexKeys[0], nil
}
}
const (
NamespaceIndex string = "namespace"
)
// MetaNamespaceIndexFunc is a default index function that indexes based on an object's namespace
func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) {
meta, err := meta.Accessor(obj)
if err != nil {
return []string{""}, fmt.Errorf("object has no meta: %v", err)
}
return []string{meta.GetNamespace()}, nil
}
// Index maps the indexed value to a set of keys in the store that match on that value
type Index map[string]sets.String
// Indexers maps a name to a IndexFunc
type Indexers map[string]IndexFunc
// Indices maps a name to an Index
type Indices map[string]Index

View File

@ -1,134 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"k8s.io/apimachinery/pkg/util/sets"
"strings"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func testIndexFunc(obj interface{}) ([]string, error) {
pod := obj.(*v1.Pod)
return []string{pod.Labels["foo"]}, nil
}
func TestGetIndexFuncValues(t *testing.T) {
index := NewIndexer(MetaNamespaceKeyFunc, Indexers{"testmodes": testIndexFunc})
pod1 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "one", Labels: map[string]string{"foo": "bar"}}}
pod2 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "two", Labels: map[string]string{"foo": "bar"}}}
pod3 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "tre", Labels: map[string]string{"foo": "biz"}}}
index.Add(pod1)
index.Add(pod2)
index.Add(pod3)
keys := index.ListIndexFuncValues("testmodes")
if len(keys) != 2 {
t.Errorf("Expected 2 keys but got %v", len(keys))
}
for _, key := range keys {
if key != "bar" && key != "biz" {
t.Errorf("Expected only 'bar' or 'biz' but got %s", key)
}
}
}
func testUsersIndexFunc(obj interface{}) ([]string, error) {
pod := obj.(*v1.Pod)
usersString := pod.Annotations["users"]
return strings.Split(usersString, ","), nil
}
func TestMultiIndexKeys(t *testing.T) {
index := NewIndexer(MetaNamespaceKeyFunc, Indexers{"byUser": testUsersIndexFunc})
pod1 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "one", Annotations: map[string]string{"users": "ernie,bert"}}}
pod2 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "two", Annotations: map[string]string{"users": "bert,oscar"}}}
pod3 := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "tre", Annotations: map[string]string{"users": "ernie,elmo"}}}
index.Add(pod1)
index.Add(pod2)
index.Add(pod3)
expected := map[string]sets.String{}
expected["ernie"] = sets.NewString("one", "tre")
expected["bert"] = sets.NewString("one", "two")
expected["elmo"] = sets.NewString("tre")
expected["oscar"] = sets.NewString("two")
expected["elmo"] = sets.NewString() // let's just make sure we don't get anything back in this case
{
for k, v := range expected {
found := sets.String{}
indexResults, err := index.ByIndex("byUser", k)
if err != nil {
t.Errorf("Unexpected error %v", err)
}
for _, item := range indexResults {
found.Insert(item.(*v1.Pod).Name)
}
items := v.List()
if !found.HasAll(items...) {
t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List())
}
}
}
index.Delete(pod3)
erniePods, err := index.ByIndex("byUser", "ernie")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(erniePods) != 1 {
t.Errorf("Expected 1 pods but got %v", len(erniePods))
}
for _, erniePod := range erniePods {
if erniePod.(*v1.Pod).Name != "one" {
t.Errorf("Expected only 'one' but got %s", erniePod.(*v1.Pod).Name)
}
}
elmoPods, err := index.ByIndex("byUser", "elmo")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(elmoPods) != 0 {
t.Errorf("Expected 0 pods but got %v", len(elmoPods))
}
copyOfPod2 := pod2.DeepCopy()
copyOfPod2.Annotations["users"] = "oscar"
index.Update(copyOfPod2)
bertPods, err := index.ByIndex("byUser", "bert")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(bertPods) != 1 {
t.Errorf("Expected 1 pods but got %v", len(bertPods))
}
for _, bertPod := range bertPods {
if bertPod.(*v1.Pod).Name != "one" {
t.Errorf("Expected only 'one' but got %s", bertPod.(*v1.Pod).Name)
}
}
}

View File

@ -1,160 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"k8s.io/klog"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// AppendFunc is used to add a matching item to whatever list the caller is using
type AppendFunc func(interface{})
func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
for _, m := range store.List() {
metadata, err := meta.Accessor(m)
if err != nil {
return err
}
if selector.Matches(labels.Set(metadata.GetLabels())) {
appendFn(m)
}
}
return nil
}
func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error {
if namespace == metav1.NamespaceAll {
for _, m := range indexer.List() {
metadata, err := meta.Accessor(m)
if err != nil {
return err
}
if selector.Matches(labels.Set(metadata.GetLabels())) {
appendFn(m)
}
}
return nil
}
items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace})
if err != nil {
// Ignore error; do slow search without index.
klog.Warningf("can not retrieve list of objects using index : %v", err)
for _, m := range indexer.List() {
metadata, err := meta.Accessor(m)
if err != nil {
return err
}
if metadata.GetNamespace() == namespace && selector.Matches(labels.Set(metadata.GetLabels())) {
appendFn(m)
}
}
return nil
}
for _, m := range items {
metadata, err := meta.Accessor(m)
if err != nil {
return err
}
if selector.Matches(labels.Set(metadata.GetLabels())) {
appendFn(m)
}
}
return nil
}
// GenericLister is a lister skin on a generic Indexer
type GenericLister interface {
// List will return all objects across namespaces
List(selector labels.Selector) (ret []runtime.Object, err error)
// Get will attempt to retrieve assuming that name==key
Get(name string) (runtime.Object, error)
// ByNamespace will give you a GenericNamespaceLister for one namespace
ByNamespace(namespace string) GenericNamespaceLister
}
// GenericNamespaceLister is a lister skin on a generic Indexer
type GenericNamespaceLister interface {
// List will return all objects in this namespace
List(selector labels.Selector) (ret []runtime.Object, err error)
// Get will attempt to retrieve by namespace and name
Get(name string) (runtime.Object, error)
}
func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister {
return &genericLister{indexer: indexer, resource: resource}
}
type genericLister struct {
indexer Indexer
resource schema.GroupResource
}
func (s *genericLister) List(selector labels.Selector) (ret []runtime.Object, err error) {
err = ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(runtime.Object))
})
return ret, err
}
func (s *genericLister) ByNamespace(namespace string) GenericNamespaceLister {
return &genericNamespaceLister{indexer: s.indexer, namespace: namespace, resource: s.resource}
}
func (s *genericLister) Get(name string) (runtime.Object, error) {
obj, exists, err := s.indexer.GetByKey(name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(s.resource, name)
}
return obj.(runtime.Object), nil
}
type genericNamespaceLister struct {
indexer Indexer
namespace string
resource schema.GroupResource
}
func (s *genericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) {
err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(runtime.Object))
})
return ret, err
}
func (s *genericNamespaceLister) Get(name string) (runtime.Object, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(s.resource, name)
}
return obj.(runtime.Object), nil
}

View File

@ -1,104 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/pager"
)
// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource.
type ListerWatcher interface {
// List should return a list type object; the Items field will be extracted, and the
// ResourceVersion field will be used to start the watch in the right place.
List(options metav1.ListOptions) (runtime.Object, error)
// Watch should begin a watch at the specified version.
Watch(options metav1.ListOptions) (watch.Interface, error)
}
// ListFunc knows how to list resources
type ListFunc func(options metav1.ListOptions) (runtime.Object, error)
// WatchFunc knows how to watch resources
type WatchFunc func(options metav1.ListOptions) (watch.Interface, error)
// ListWatch knows how to list and watch a set of apiserver resources. It satisfies the ListerWatcher interface.
// It is a convenience function for users of NewReflector, etc.
// ListFunc and WatchFunc must not be nil
type ListWatch struct {
ListFunc ListFunc
WatchFunc WatchFunc
// DisableChunking requests no chunking for this list watcher.
DisableChunking bool
}
// Getter interface knows how to access Get method from RESTClient.
type Getter interface {
Get() *restclient.Request
}
// NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector.
func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch {
optionsModifier := func(options *metav1.ListOptions) {
options.FieldSelector = fieldSelector.String()
}
return NewFilteredListWatchFromClient(c, resource, namespace, optionsModifier)
}
// NewFilteredListWatchFromClient creates a new ListWatch from the specified client, resource, namespace, and option modifier.
// Option modifier is a function takes a ListOptions and modifies the consumed ListOptions. Provide customized modifier function
// to apply modification to ListOptions with a field selector, a label selector, or any other desired options.
func NewFilteredListWatchFromClient(c Getter, resource string, namespace string, optionsModifier func(options *metav1.ListOptions)) *ListWatch {
listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
optionsModifier(&options)
return c.Get().
Namespace(namespace).
Resource(resource).
VersionedParams(&options, metav1.ParameterCodec).
Do().
Get()
}
watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
options.Watch = true
optionsModifier(&options)
return c.Get().
Namespace(namespace).
Resource(resource).
VersionedParams(&options, metav1.ParameterCodec).
Watch()
}
return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
}
// List a set of apiserver resources
func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) {
if !lw.DisableChunking {
return pager.New(pager.SimplePageFunc(lw.ListFunc)).List(context.TODO(), options)
}
return lw.ListFunc(options)
}
// Watch a set of apiserver resources
func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error) {
return lw.WatchFunc(options)
}

View File

@ -1,261 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"strconv"
"sync"
"time"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
utilcache "k8s.io/apimachinery/pkg/util/cache"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
)
// MutationCache is able to take the result of update operations and stores them in an LRU
// that can be used to provide a more current view of a requested object. It requires interpreting
// resourceVersions for comparisons.
// Implementations must be thread-safe.
// TODO find a way to layer this into an informer/lister
type MutationCache interface {
GetByKey(key string) (interface{}, bool, error)
ByIndex(indexName, indexKey string) ([]interface{}, error)
Mutation(interface{})
}
type ResourceVersionComparator interface {
CompareResourceVersion(lhs, rhs runtime.Object) int
}
// NewIntegerResourceVersionMutationCache returns a MutationCache that understands how to
// deal with objects that have a resource version that:
//
// - is an integer
// - increases when updated
// - is comparable across the same resource in a namespace
//
// Most backends will have these semantics. Indexer may be nil. ttl controls how long an item
// remains in the mutation cache before it is removed.
//
// If includeAdds is true, objects in the mutation cache will be returned even if they don't exist
// in the underlying store. This is only safe if your use of the cache can handle mutation entries
// remaining in the cache for up to ttl when mutations and deletes occur very closely in time.
func NewIntegerResourceVersionMutationCache(backingCache Store, indexer Indexer, ttl time.Duration, includeAdds bool) MutationCache {
return &mutationCache{
backingCache: backingCache,
indexer: indexer,
mutationCache: utilcache.NewLRUExpireCache(100),
comparator: etcdObjectVersioner{},
ttl: ttl,
includeAdds: includeAdds,
}
}
// mutationCache doesn't guarantee that it returns values added via Mutation since they can page out and
// since you can't distinguish between, "didn't observe create" and "was deleted after create",
// if the key is missing from the backing cache, we always return it as missing
type mutationCache struct {
lock sync.Mutex
backingCache Store
indexer Indexer
mutationCache *utilcache.LRUExpireCache
includeAdds bool
ttl time.Duration
comparator ResourceVersionComparator
}
// GetByKey is never guaranteed to return back the value set in Mutation. It could be paged out, it could
// be older than another copy, the backingCache may be more recent or, you might have written twice into the same key.
// You get a value that was valid at some snapshot of time and will always return the newer of backingCache and mutationCache.
func (c *mutationCache) GetByKey(key string) (interface{}, bool, error) {
c.lock.Lock()
defer c.lock.Unlock()
obj, exists, err := c.backingCache.GetByKey(key)
if err != nil {
return nil, false, err
}
if !exists {
if !c.includeAdds {
// we can't distinguish between, "didn't observe create" and "was deleted after create", so
// if the key is missing, we always return it as missing
return nil, false, nil
}
obj, exists = c.mutationCache.Get(key)
if !exists {
return nil, false, nil
}
}
objRuntime, ok := obj.(runtime.Object)
if !ok {
return obj, true, nil
}
return c.newerObject(key, objRuntime), true, nil
}
// ByIndex returns the newer objects that match the provided index and indexer key.
// Will return an error if no indexer was provided.
func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, error) {
c.lock.Lock()
defer c.lock.Unlock()
if c.indexer == nil {
return nil, fmt.Errorf("no indexer has been provided to the mutation cache")
}
keys, err := c.indexer.IndexKeys(name, indexKey)
if err != nil {
return nil, err
}
var items []interface{}
keySet := sets.NewString()
for _, key := range keys {
keySet.Insert(key)
obj, exists, err := c.indexer.GetByKey(key)
if err != nil {
return nil, err
}
if !exists {
continue
}
if objRuntime, ok := obj.(runtime.Object); ok {
items = append(items, c.newerObject(key, objRuntime))
} else {
items = append(items, obj)
}
}
if c.includeAdds {
fn := c.indexer.GetIndexers()[name]
// Keys() is returned oldest to newest, so full traversal does not alter the LRU behavior
for _, key := range c.mutationCache.Keys() {
updated, ok := c.mutationCache.Get(key)
if !ok {
continue
}
if keySet.Has(key.(string)) {
continue
}
elements, err := fn(updated)
if err != nil {
klog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err)
continue
}
for _, inIndex := range elements {
if inIndex != indexKey {
continue
}
items = append(items, updated)
break
}
}
}
return items, nil
}
// newerObject checks the mutation cache for a newer object and returns one if found. If the
// mutated object is older than the backing object, it is removed from the Must be
// called while the lock is held.
func (c *mutationCache) newerObject(key string, backing runtime.Object) runtime.Object {
mutatedObj, exists := c.mutationCache.Get(key)
if !exists {
return backing
}
mutatedObjRuntime, ok := mutatedObj.(runtime.Object)
if !ok {
return backing
}
if c.comparator.CompareResourceVersion(backing, mutatedObjRuntime) >= 0 {
c.mutationCache.Remove(key)
return backing
}
return mutatedObjRuntime
}
// Mutation adds a change to the cache that can be returned in GetByKey if it is newer than the backingCache
// copy. If you call Mutation twice with the same object on different threads, one will win, but its not defined
// which one. This doesn't affect correctness, since the GetByKey guaranteed of "later of these two caches" is
// preserved, but you may not get the version of the object you want. The object you get is only guaranteed to
// "one that was valid at some point in time", not "the one that I want".
func (c *mutationCache) Mutation(obj interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
key, err := DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
// this is a "nice to have", so failures shouldn't do anything weird
utilruntime.HandleError(err)
return
}
if objRuntime, ok := obj.(runtime.Object); ok {
if mutatedObj, exists := c.mutationCache.Get(key); exists {
if mutatedObjRuntime, ok := mutatedObj.(runtime.Object); ok {
if c.comparator.CompareResourceVersion(objRuntime, mutatedObjRuntime) < 0 {
return
}
}
}
}
c.mutationCache.Add(key, obj, c.ttl)
}
// etcdObjectVersioner implements versioning and extracting etcd node information
// for objects that have an embedded ObjectMeta or ListMeta field.
type etcdObjectVersioner struct{}
// ObjectResourceVersion implements Versioner
func (a etcdObjectVersioner) ObjectResourceVersion(obj runtime.Object) (uint64, error) {
accessor, err := meta.Accessor(obj)
if err != nil {
return 0, err
}
version := accessor.GetResourceVersion()
if len(version) == 0 {
return 0, nil
}
return strconv.ParseUint(version, 10, 64)
}
// CompareResourceVersion compares etcd resource versions. Outside this API they are all strings,
// but etcd resource versions are special, they're actually ints, so we can easily compare them.
func (a etcdObjectVersioner) CompareResourceVersion(lhs, rhs runtime.Object) int {
lhsVersion, err := a.ObjectResourceVersion(lhs)
if err != nil {
// coder error
panic(err)
}
rhsVersion, err := a.ObjectResourceVersion(rhs)
if err != nil {
// coder error
panic(err)
}
if lhsVersion == rhsVersion {
return 0
}
if lhsVersion < rhsVersion {
return -1
}
return 1
}

View File

@ -1,130 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"os"
"reflect"
"strconv"
"sync"
"time"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/diff"
)
var mutationDetectionEnabled = false
func init() {
mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR"))
}
type CacheMutationDetector interface {
AddObject(obj interface{})
Run(stopCh <-chan struct{})
}
func NewCacheMutationDetector(name string) CacheMutationDetector {
if !mutationDetectionEnabled {
return dummyMutationDetector{}
}
klog.Warningln("Mutation detector is enabled, this will result in memory leakage.")
return &defaultCacheMutationDetector{name: name, period: 1 * time.Second}
}
type dummyMutationDetector struct{}
func (dummyMutationDetector) Run(stopCh <-chan struct{}) {
}
func (dummyMutationDetector) AddObject(obj interface{}) {
}
// defaultCacheMutationDetector gives a way to detect if a cached object has been mutated
// It has a list of cached objects and their copies. I haven't thought of a way
// to see WHO is mutating it, just that it's getting mutated.
type defaultCacheMutationDetector struct {
name string
period time.Duration
lock sync.Mutex
cachedObjs []cacheObj
// failureFunc is injectable for unit testing. If you don't have it, the process will panic.
// This panic is intentional, since turning on this detection indicates you want a strong
// failure signal. This failure is effectively a p0 bug and you can't trust process results
// after a mutation anyway.
failureFunc func(message string)
}
// cacheObj holds the actual object and a copy
type cacheObj struct {
cached interface{}
copied interface{}
}
func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) {
// we DON'T want protection from panics. If we're running this code, we want to die
for {
d.CompareObjects()
select {
case <-stopCh:
return
case <-time.After(d.period):
}
}
}
// AddObject makes a deep copy of the object for later comparison. It only works on runtime.Object
// but that covers the vast majority of our cached objects
func (d *defaultCacheMutationDetector) AddObject(obj interface{}) {
if _, ok := obj.(DeletedFinalStateUnknown); ok {
return
}
if obj, ok := obj.(runtime.Object); ok {
copiedObj := obj.DeepCopyObject()
d.lock.Lock()
defer d.lock.Unlock()
d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj})
}
}
func (d *defaultCacheMutationDetector) CompareObjects() {
d.lock.Lock()
defer d.lock.Unlock()
altered := false
for i, obj := range d.cachedObjs {
if !reflect.DeepEqual(obj.cached, obj.copied) {
fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectDiff(obj.cached, obj.copied))
altered = true
}
}
if altered {
msg := fmt.Sprintf("cache %s modified", d.name)
if d.failureFunc != nil {
d.failureFunc(msg)
return
}
panic(msg)
}
}

View File

@ -1,81 +0,0 @@
// +build !race
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
)
func TestMutationDetector(t *testing.T) {
fakeWatch := watch.NewFake()
lw := &testLW{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return fakeWatch, nil
},
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return &v1.PodList{}, nil
},
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "anything",
Labels: map[string]string{"check": "foo"},
},
}
stopCh := make(chan struct{})
defer close(stopCh)
addReceived := make(chan bool)
mutationFound := make(chan bool)
informer := NewSharedInformer(lw, &v1.Pod{}, 1*time.Second).(*sharedIndexInformer)
informer.cacheMutationDetector = &defaultCacheMutationDetector{
name: "name",
period: 1 * time.Second,
failureFunc: func(message string) {
mutationFound <- true
},
}
informer.AddEventHandler(
ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
addReceived <- true
},
},
)
go informer.Run(stopCh)
fakeWatch.Add(pod)
select {
case <-addReceived:
}
pod.Labels["change"] = "true"
select {
case <-mutationFound:
}
}

View File

@ -1,58 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"sync"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/wait"
)
const (
concurrencyLevel = 5
)
func BenchmarkListener(b *testing.B) {
var notification addNotification
var swg sync.WaitGroup
swg.Add(b.N)
b.SetParallelism(concurrencyLevel)
// Preallocate enough space so that benchmark does not run out of it
pl := newProcessListener(&ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
swg.Done()
},
}, 0, 0, time.Now(), 1024*1024)
var wg wait.Group
defer wg.Wait() // Wait for .run and .pop to stop
defer close(pl.addCh) // Tell .run and .pop to stop
wg.Start(pl.run)
wg.Start(pl.pop)
b.ReportAllocs()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
pl.add(notification)
}
})
swg.Wait() // Block until all notifications have been received
b.StopTimer()
}

View File

@ -1,378 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"errors"
"fmt"
"io"
"math/rand"
"net"
"net/url"
"reflect"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/naming"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/klog"
)
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
type Reflector struct {
// name identifies this reflector. By default it will be a file:line if possible.
name string
// metrics tracks basic metric information about the reflector
metrics *reflectorMetrics
// The type of object we expect to place in the store.
expectedType reflect.Type
// The destination to sync up with the watch source
store Store
// listerWatcher is used to perform lists and watches.
listerWatcher ListerWatcher
// period controls timing between one watch ending and
// the beginning of the next one.
period time.Duration
resyncPeriod time.Duration
ShouldResync func() bool
// clock allows tests to manipulate time
clock clock.Clock
// lastSyncResourceVersion is the resource version token last
// observed when doing a sync with the underlying store
// it is thread safe, but not synchronized with the underlying store
lastSyncResourceVersion string
// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
lastSyncResourceVersionMutex sync.RWMutex
}
var (
// We try to spread the load on apiserver by setting timeouts for
// watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout].
minWatchTimeout = 5 * time.Minute
)
// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector
// The indexer is configured to key on namespace
func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) {
indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{"namespace": MetaNamespaceIndexFunc})
reflector = NewReflector(lw, expectedType, indexer, resyncPeriod)
return indexer, reflector
}
// NewReflector creates a new Reflector object which will keep the given store up to
// date with the server's contents for the given resource. Reflector promises to
// only put things in the store that have the type of expectedType, unless expectedType
// is nil. If resyncPeriod is non-zero, then lists will be executed after every
// resyncPeriod, so that you can use reflectors to periodically process everything as
// well as incrementally processing the things that change.
func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod)
}
// reflectorDisambiguator is used to disambiguate started reflectors.
// initialized to an unstable value to ensure meaning isn't attributed to the suffix.
var reflectorDisambiguator = int64(time.Now().UnixNano() % 12345)
// NewNamedReflector same as NewReflector, but with a specified name for logging
func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
reflectorSuffix := atomic.AddInt64(&reflectorDisambiguator, 1)
r := &Reflector{
name: name,
// we need this to be unique per process (some names are still the same) but obvious who it belongs to
metrics: newReflectorMetrics(makeValidPrometheusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))),
listerWatcher: lw,
store: store,
expectedType: reflect.TypeOf(expectedType),
period: time.Second,
resyncPeriod: resyncPeriod,
clock: &clock.RealClock{},
}
return r
}
func makeValidPrometheusMetricLabel(in string) string {
// this isn't perfect, but it removes our common characters
return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in)
}
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
// call chains to NewReflector, so they'd be low entropy names for reflectors
var internalPackages = []string{"client-go/tools/cache/"}
// Run starts a watch and handles watch events. Will restart the watch if it is closed.
// Run will exit when stopCh is closed.
func (r *Reflector) Run(stopCh <-chan struct{}) {
klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
wait.Until(func() {
if err := r.ListAndWatch(stopCh); err != nil {
utilruntime.HandleError(err)
}
}, r.period, stopCh)
}
var (
// nothing will ever be sent down this channel
neverExitWatch <-chan time.Time = make(chan time.Time)
// Used to indicate that watching stopped so that a resync could happen.
errorResyncRequested = errors.New("resync channel fired")
// Used to indicate that watching stopped because of a signal from the stop
// channel passed in from a client of the reflector.
errorStopRequested = errors.New("Stop requested")
)
// resyncChan returns a channel which will receive something when a resync is
// required, and a cleanup function.
func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
if r.resyncPeriod == 0 {
return neverExitWatch, func() bool { return false }
}
// The cleanup function is required: imagine the scenario where watches
// always fail so we end up listing frequently. Then, if we don't
// manually stop the timer, we could end up with many timers active
// concurrently.
t := r.clock.NewTimer(r.resyncPeriod)
return t.C(), t.Stop
}
// ListAndWatch first lists all items and get the resource version at the moment of call,
// and then use the resource version to watch.
// It returns error if ListAndWatch didn't even try to initialize watch.
func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
klog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name)
var resourceVersion string
// Explicitly set "0" as resource version - it's fine for the List()
// to be served from cache and potentially be delayed relative to
// etcd contents. Reflector framework will catch up via Watch() eventually.
options := metav1.ListOptions{ResourceVersion: "0"}
r.metrics.numberOfLists.Inc()
start := r.clock.Now()
list, err := r.listerWatcher.List(options)
if err != nil {
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err)
}
r.metrics.listDuration.Observe(time.Since(start).Seconds())
listMetaInterface, err := meta.ListAccessor(list)
if err != nil {
return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err)
}
resourceVersion = listMetaInterface.GetResourceVersion()
items, err := meta.ExtractList(list)
if err != nil {
return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err)
}
r.metrics.numberOfItemsInList.Observe(float64(len(items)))
if err := r.syncWith(items, resourceVersion); err != nil {
return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err)
}
r.setLastSyncResourceVersion(resourceVersion)
resyncerrc := make(chan error, 1)
cancelCh := make(chan struct{})
defer close(cancelCh)
go func() {
resyncCh, cleanup := r.resyncChan()
defer func() {
cleanup() // Call the last one written into cleanup
}()
for {
select {
case <-resyncCh:
case <-stopCh:
return
case <-cancelCh:
return
}
if r.ShouldResync == nil || r.ShouldResync() {
klog.V(4).Infof("%s: forcing resync", r.name)
if err := r.store.Resync(); err != nil {
resyncerrc <- err
return
}
}
cleanup()
resyncCh, cleanup = r.resyncChan()
}
}()
for {
// give the stopCh a chance to stop the loop, even in case of continue statements further down on errors
select {
case <-stopCh:
return nil
default:
}
timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
options = metav1.ListOptions{
ResourceVersion: resourceVersion,
// We want to avoid situations of hanging watchers. Stop any wachers that do not
// receive any events within the timeout window.
TimeoutSeconds: &timeoutSeconds,
}
r.metrics.numberOfWatches.Inc()
w, err := r.listerWatcher.Watch(options)
if err != nil {
switch err {
case io.EOF:
// watch closed normally
case io.ErrUnexpectedEOF:
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
default:
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err))
}
// If this is "connection refused" error, it means that most likely apiserver is not responsive.
// It doesn't make sense to re-list all objects because most likely we will be able to restart
// watch where we ended.
// If that's the case wait and resend watch request.
if urlError, ok := err.(*url.Error); ok {
if opError, ok := urlError.Err.(*net.OpError); ok {
if errno, ok := opError.Err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED {
time.Sleep(time.Second)
continue
}
}
}
return nil
}
if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil {
if err != errorStopRequested {
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
}
return nil
}
}
}
// syncWith replaces the store's items with the given list.
func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error {
found := make([]interface{}, 0, len(items))
for _, item := range items {
found = append(found, item)
}
return r.store.Replace(found, resourceVersion)
}
// watchHandler watches w and keeps *resourceVersion up to date.
func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error {
start := r.clock.Now()
eventCount := 0
// Stopping the watcher should be idempotent and if we return from this function there's no way
// we're coming back in with the same watch interface.
defer w.Stop()
// update metrics
defer func() {
r.metrics.numberOfItemsInWatch.Observe(float64(eventCount))
r.metrics.watchDuration.Observe(time.Since(start).Seconds())
}()
loop:
for {
select {
case <-stopCh:
return errorStopRequested
case err := <-errc:
return err
case event, ok := <-w.ResultChan():
if !ok {
break loop
}
if event.Type == watch.Error {
return apierrs.FromObject(event.Object)
}
if e, a := r.expectedType, reflect.TypeOf(event.Object); e != nil && e != a {
utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a))
continue
}
meta, err := meta.Accessor(event.Object)
if err != nil {
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
continue
}
newResourceVersion := meta.GetResourceVersion()
switch event.Type {
case watch.Added:
err := r.store.Add(event.Object)
if err != nil {
utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", r.name, event.Object, err))
}
case watch.Modified:
err := r.store.Update(event.Object)
if err != nil {
utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", r.name, event.Object, err))
}
case watch.Deleted:
// TODO: Will any consumers need access to the "last known
// state", which is passed in event.Object? If so, may need
// to change this.
err := r.store.Delete(event.Object)
if err != nil {
utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err))
}
default:
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
}
*resourceVersion = newResourceVersion
r.setLastSyncResourceVersion(newResourceVersion)
eventCount++
}
}
watchDuration := r.clock.Now().Sub(start)
if watchDuration < 1*time.Second && eventCount == 0 {
r.metrics.numberOfShortWatches.Inc()
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
}
klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
return nil
}
// LastSyncResourceVersion is the resource version observed when last sync with the underlying store
// The value returned is not synchronized with access to the underlying store and is not thread-safe
func (r *Reflector) LastSyncResourceVersion() string {
r.lastSyncResourceVersionMutex.RLock()
defer r.lastSyncResourceVersionMutex.RUnlock()
return r.lastSyncResourceVersion
}
func (r *Reflector) setLastSyncResourceVersion(v string) {
r.lastSyncResourceVersionMutex.Lock()
defer r.lastSyncResourceVersionMutex.Unlock()
r.lastSyncResourceVersion = v
rv, err := strconv.Atoi(v)
if err == nil {
r.metrics.lastResourceVersion.Set(float64(rv))
}
}

View File

@ -1,119 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file provides abstractions for setting the provider (e.g., prometheus)
// of metrics.
package cache
import (
"sync"
)
// GaugeMetric represents a single numerical value that can arbitrarily go up
// and down.
type GaugeMetric interface {
Set(float64)
}
// CounterMetric represents a single numerical value that only ever
// goes up.
type CounterMetric interface {
Inc()
}
// SummaryMetric captures individual observations.
type SummaryMetric interface {
Observe(float64)
}
type noopMetric struct{}
func (noopMetric) Inc() {}
func (noopMetric) Dec() {}
func (noopMetric) Observe(float64) {}
func (noopMetric) Set(float64) {}
type reflectorMetrics struct {
numberOfLists CounterMetric
listDuration SummaryMetric
numberOfItemsInList SummaryMetric
numberOfWatches CounterMetric
numberOfShortWatches CounterMetric
watchDuration SummaryMetric
numberOfItemsInWatch SummaryMetric
lastResourceVersion GaugeMetric
}
// MetricsProvider generates various metrics used by the reflector.
type MetricsProvider interface {
NewListsMetric(name string) CounterMetric
NewListDurationMetric(name string) SummaryMetric
NewItemsInListMetric(name string) SummaryMetric
NewWatchesMetric(name string) CounterMetric
NewShortWatchesMetric(name string) CounterMetric
NewWatchDurationMetric(name string) SummaryMetric
NewItemsInWatchMetric(name string) SummaryMetric
NewLastResourceVersionMetric(name string) GaugeMetric
}
type noopMetricsProvider struct{}
func (noopMetricsProvider) NewListsMetric(name string) CounterMetric { return noopMetric{} }
func (noopMetricsProvider) NewListDurationMetric(name string) SummaryMetric { return noopMetric{} }
func (noopMetricsProvider) NewItemsInListMetric(name string) SummaryMetric { return noopMetric{} }
func (noopMetricsProvider) NewWatchesMetric(name string) CounterMetric { return noopMetric{} }
func (noopMetricsProvider) NewShortWatchesMetric(name string) CounterMetric { return noopMetric{} }
func (noopMetricsProvider) NewWatchDurationMetric(name string) SummaryMetric { return noopMetric{} }
func (noopMetricsProvider) NewItemsInWatchMetric(name string) SummaryMetric { return noopMetric{} }
func (noopMetricsProvider) NewLastResourceVersionMetric(name string) GaugeMetric {
return noopMetric{}
}
var metricsFactory = struct {
metricsProvider MetricsProvider
setProviders sync.Once
}{
metricsProvider: noopMetricsProvider{},
}
func newReflectorMetrics(name string) *reflectorMetrics {
var ret *reflectorMetrics
if len(name) == 0 {
return ret
}
return &reflectorMetrics{
numberOfLists: metricsFactory.metricsProvider.NewListsMetric(name),
listDuration: metricsFactory.metricsProvider.NewListDurationMetric(name),
numberOfItemsInList: metricsFactory.metricsProvider.NewItemsInListMetric(name),
numberOfWatches: metricsFactory.metricsProvider.NewWatchesMetric(name),
numberOfShortWatches: metricsFactory.metricsProvider.NewShortWatchesMetric(name),
watchDuration: metricsFactory.metricsProvider.NewWatchDurationMetric(name),
numberOfItemsInWatch: metricsFactory.metricsProvider.NewItemsInWatchMetric(name),
lastResourceVersion: metricsFactory.metricsProvider.NewLastResourceVersionMetric(name),
}
}
// SetReflectorMetricsProvider sets the metrics provider
func SetReflectorMetricsProvider(metricsProvider MetricsProvider) {
metricsFactory.setProviders.Do(func() {
metricsFactory.metricsProvider = metricsProvider
})
}

View File

@ -1,389 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"errors"
"fmt"
"math/rand"
"strconv"
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
)
var nevererrc chan error
type testLW struct {
ListFunc func(options metav1.ListOptions) (runtime.Object, error)
WatchFunc func(options metav1.ListOptions) (watch.Interface, error)
}
func (t *testLW) List(options metav1.ListOptions) (runtime.Object, error) {
return t.ListFunc(options)
}
func (t *testLW) Watch(options metav1.ListOptions) (watch.Interface, error) {
return t.WatchFunc(options)
}
func TestCloseWatchChannelOnError(t *testing.T) {
r := NewReflector(&testLW{}, &v1.Pod{}, NewStore(MetaNamespaceKeyFunc), 0)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar"}}
fw := watch.NewFake()
r.listerWatcher = &testLW{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return fw, nil
},
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "1"}}, nil
},
}
go r.ListAndWatch(wait.NeverStop)
fw.Error(pod)
select {
case _, ok := <-fw.ResultChan():
if ok {
t.Errorf("Watch channel left open after cancellation")
}
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("the cancellation is at least %s late", wait.ForeverTestTimeout.String())
break
}
}
func TestRunUntil(t *testing.T) {
stopCh := make(chan struct{})
store := NewStore(MetaNamespaceKeyFunc)
r := NewReflector(&testLW{}, &v1.Pod{}, store, 0)
fw := watch.NewFake()
r.listerWatcher = &testLW{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return fw, nil
},
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "1"}}, nil
},
}
go r.Run(stopCh)
// Synchronously add a dummy pod into the watch channel so we
// know the RunUntil go routine is in the watch handler.
fw.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar"}})
close(stopCh)
select {
case _, ok := <-fw.ResultChan():
if ok {
t.Errorf("Watch channel left open after stopping the watch")
}
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("the cancellation is at least %s late", wait.ForeverTestTimeout.String())
break
}
}
func TestReflectorResyncChan(t *testing.T) {
s := NewStore(MetaNamespaceKeyFunc)
g := NewReflector(&testLW{}, &v1.Pod{}, s, time.Millisecond)
a, _ := g.resyncChan()
b := time.After(wait.ForeverTestTimeout)
select {
case <-a:
t.Logf("got timeout as expected")
case <-b:
t.Errorf("resyncChan() is at least 99 milliseconds late??")
}
}
func BenchmarkReflectorResyncChanMany(b *testing.B) {
s := NewStore(MetaNamespaceKeyFunc)
g := NewReflector(&testLW{}, &v1.Pod{}, s, 25*time.Millisecond)
// The improvement to this (calling the timer's Stop() method) makes
// this benchmark about 40% faster.
for i := 0; i < b.N; i++ {
g.resyncPeriod = time.Duration(rand.Float64() * float64(time.Millisecond) * 25)
_, stop := g.resyncChan()
stop()
}
}
func TestReflectorWatchHandlerError(t *testing.T) {
s := NewStore(MetaNamespaceKeyFunc)
g := NewReflector(&testLW{}, &v1.Pod{}, s, 0)
fw := watch.NewFake()
go func() {
fw.Stop()
}()
var resumeRV string
err := g.watchHandler(fw, &resumeRV, nevererrc, wait.NeverStop)
if err == nil {
t.Errorf("unexpected non-error")
}
}
func TestReflectorWatchHandler(t *testing.T) {
s := NewStore(MetaNamespaceKeyFunc)
g := NewReflector(&testLW{}, &v1.Pod{}, s, 0)
fw := watch.NewFake()
s.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}})
s.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar"}})
go func() {
fw.Add(&v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "rejected"}})
fw.Delete(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}})
fw.Modify(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar", ResourceVersion: "55"}})
fw.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "baz", ResourceVersion: "32"}})
fw.Stop()
}()
var resumeRV string
err := g.watchHandler(fw, &resumeRV, nevererrc, wait.NeverStop)
if err != nil {
t.Errorf("unexpected error %v", err)
}
mkPod := func(id string, rv string) *v1.Pod {
return &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: id, ResourceVersion: rv}}
}
table := []struct {
Pod *v1.Pod
exists bool
}{
{mkPod("foo", ""), false},
{mkPod("rejected", ""), false},
{mkPod("bar", "55"), true},
{mkPod("baz", "32"), true},
}
for _, item := range table {
obj, exists, _ := s.Get(item.Pod)
if e, a := item.exists, exists; e != a {
t.Errorf("%v: expected %v, got %v", item.Pod, e, a)
}
if !exists {
continue
}
if e, a := item.Pod.ResourceVersion, obj.(*v1.Pod).ResourceVersion; e != a {
t.Errorf("%v: expected %v, got %v", item.Pod, e, a)
}
}
// RV should send the last version we see.
if e, a := "32", resumeRV; e != a {
t.Errorf("expected %v, got %v", e, a)
}
// last sync resource version should be the last version synced with store
if e, a := "32", g.LastSyncResourceVersion(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
}
func TestReflectorStopWatch(t *testing.T) {
s := NewStore(MetaNamespaceKeyFunc)
g := NewReflector(&testLW{}, &v1.Pod{}, s, 0)
fw := watch.NewFake()
var resumeRV string
stopWatch := make(chan struct{}, 1)
stopWatch <- struct{}{}
err := g.watchHandler(fw, &resumeRV, nevererrc, stopWatch)
if err != errorStopRequested {
t.Errorf("expected stop error, got %q", err)
}
}
func TestReflectorListAndWatch(t *testing.T) {
createdFakes := make(chan *watch.FakeWatcher)
// The ListFunc says that it's at revision 1. Therefore, we expect our WatchFunc
// to get called at the beginning of the watch with 1, and again with 3 when we
// inject an error.
expectedRVs := []string{"1", "3"}
lw := &testLW{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
rv := options.ResourceVersion
fw := watch.NewFake()
if e, a := expectedRVs[0], rv; e != a {
t.Errorf("Expected rv %v, but got %v", e, a)
}
expectedRVs = expectedRVs[1:]
// channel is not buffered because the for loop below needs to block. But
// we don't want to block here, so report the new fake via a go routine.
go func() { createdFakes <- fw }()
return fw, nil
},
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "1"}}, nil
},
}
s := NewFIFO(MetaNamespaceKeyFunc)
r := NewReflector(lw, &v1.Pod{}, s, 0)
go r.ListAndWatch(wait.NeverStop)
ids := []string{"foo", "bar", "baz", "qux", "zoo"}
var fw *watch.FakeWatcher
for i, id := range ids {
if fw == nil {
fw = <-createdFakes
}
sendingRV := strconv.FormatUint(uint64(i+2), 10)
fw.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: id, ResourceVersion: sendingRV}})
if sendingRV == "3" {
// Inject a failure.
fw.Stop()
fw = nil
}
}
// Verify we received the right ids with the right resource versions.
for i, id := range ids {
pod := Pop(s).(*v1.Pod)
if e, a := id, pod.Name; e != a {
t.Errorf("%v: Expected %v, got %v", i, e, a)
}
if e, a := strconv.FormatUint(uint64(i+2), 10), pod.ResourceVersion; e != a {
t.Errorf("%v: Expected %v, got %v", i, e, a)
}
}
if len(expectedRVs) != 0 {
t.Error("called watchStarter an unexpected number of times")
}
}
func TestReflectorListAndWatchWithErrors(t *testing.T) {
mkPod := func(id string, rv string) *v1.Pod {
return &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: id, ResourceVersion: rv}}
}
mkList := func(rv string, pods ...*v1.Pod) *v1.PodList {
list := &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: rv}}
for _, pod := range pods {
list.Items = append(list.Items, *pod)
}
return list
}
table := []struct {
list *v1.PodList
listErr error
events []watch.Event
watchErr error
}{
{
list: mkList("1"),
events: []watch.Event{
{Type: watch.Added, Object: mkPod("foo", "2")},
{Type: watch.Added, Object: mkPod("bar", "3")},
},
}, {
list: mkList("3", mkPod("foo", "2"), mkPod("bar", "3")),
events: []watch.Event{
{Type: watch.Deleted, Object: mkPod("foo", "4")},
{Type: watch.Added, Object: mkPod("qux", "5")},
},
}, {
listErr: fmt.Errorf("a list error"),
}, {
list: mkList("5", mkPod("bar", "3"), mkPod("qux", "5")),
watchErr: fmt.Errorf("a watch error"),
}, {
list: mkList("5", mkPod("bar", "3"), mkPod("qux", "5")),
events: []watch.Event{
{Type: watch.Added, Object: mkPod("baz", "6")},
},
}, {
list: mkList("6", mkPod("bar", "3"), mkPod("qux", "5"), mkPod("baz", "6")),
},
}
s := NewFIFO(MetaNamespaceKeyFunc)
for line, item := range table {
if item.list != nil {
// Test that the list is what currently exists in the store.
current := s.List()
checkMap := map[string]string{}
for _, item := range current {
pod := item.(*v1.Pod)
checkMap[pod.Name] = pod.ResourceVersion
}
for _, pod := range item.list.Items {
if e, a := pod.ResourceVersion, checkMap[pod.Name]; e != a {
t.Errorf("%v: expected %v, got %v for pod %v", line, e, a, pod.Name)
}
}
if e, a := len(item.list.Items), len(checkMap); e != a {
t.Errorf("%v: expected %v, got %v", line, e, a)
}
}
watchRet, watchErr := item.events, item.watchErr
lw := &testLW{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
if watchErr != nil {
return nil, watchErr
}
watchErr = fmt.Errorf("second watch")
fw := watch.NewFake()
go func() {
for _, e := range watchRet {
fw.Action(e.Type, e.Object)
}
fw.Stop()
}()
return fw, nil
},
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return item.list, item.listErr
},
}
r := NewReflector(lw, &v1.Pod{}, s, 0)
r.ListAndWatch(wait.NeverStop)
}
}
func TestReflectorResync(t *testing.T) {
iteration := 0
stopCh := make(chan struct{})
rerr := errors.New("expected resync reached")
s := &FakeCustomStore{
ResyncFunc: func() error {
iteration++
if iteration == 2 {
return rerr
}
return nil
},
}
lw := &testLW{
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
fw := watch.NewFake()
return fw, nil
},
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return &v1.PodList{ListMeta: metav1.ListMeta{ResourceVersion: "0"}}, nil
},
}
resyncPeriod := 1 * time.Millisecond
r := NewReflector(lw, &v1.Pod{}, s, resyncPeriod)
if err := r.ListAndWatch(stopCh); err != nil {
// error from Resync is not propaged up to here.
t.Errorf("expected error %v", err)
}
if iteration != 2 {
t.Errorf("exactly 2 iterations were expected, got: %v", iteration)
}
}

View File

@ -1,597 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"sync"
"time"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/buffer"
"k8s.io/client-go/util/retry"
"k8s.io/klog"
)
// SharedInformer has a shared data cache and is capable of distributing notifications for changes
// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is
// one behavior change compared to a standard Informer. When you receive a notification, the cache
// will be AT LEAST as fresh as the notification, but it MAY be more fresh. You should NOT depend
// on the contents of the cache exactly matching the notification you've received in handler
// functions. If there was a create, followed by a delete, the cache may NOT have your item. This
// has advantages over the broadcaster since it allows us to share a common cache across many
// controllers. Extending the broadcaster would have required us keep duplicate caches for each
// watch.
type SharedInformer interface {
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
// period. Events to a single handler are delivered sequentially, but there is no coordination
// between different handlers.
AddEventHandler(handler ResourceEventHandler)
// AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the
// specified resync period. Events to a single handler are delivered sequentially, but there is
// no coordination between different handlers.
AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration)
// GetStore returns the Store.
GetStore() Store
// GetController gives back a synthetic interface that "votes" to start the informer
GetController() Controller
// Run starts the shared informer, which will be stopped when stopCh is closed.
Run(stopCh <-chan struct{})
// HasSynced returns true if the shared informer's store has synced.
HasSynced() bool
// LastSyncResourceVersion is the resource version observed when last synced with the underlying
// store. The value returned is not synchronized with access to the underlying store and is not
// thread-safe.
LastSyncResourceVersion() string
}
type SharedIndexInformer interface {
SharedInformer
// AddIndexers add indexers to the informer before it starts.
AddIndexers(indexers Indexers) error
GetIndexer() Indexer
}
// NewSharedInformer creates a new instance for the listwatcher.
func NewSharedInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
return NewSharedIndexInformer(lw, objType, resyncPeriod, Indexers{})
}
// NewSharedIndexInformer creates a new instance for the listwatcher.
func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
realClock := &clock.RealClock{}
sharedIndexInformer := &sharedIndexInformer{
processor: &sharedProcessor{clock: realClock},
indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
listerWatcher: lw,
objectType: objType,
resyncCheckPeriod: defaultEventHandlerResyncPeriod,
defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod,
cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)),
clock: realClock,
}
return sharedIndexInformer
}
// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced.
type InformerSynced func() bool
const (
// syncedPollPeriod controls how often you look at the status of your sync funcs
syncedPollPeriod = 100 * time.Millisecond
// initialBufferSize is the initial number of event notifications that can be buffered.
initialBufferSize = 1024
)
// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false
// if the controller should shutdown
func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
err := wait.PollUntil(syncedPollPeriod,
func() (bool, error) {
for _, syncFunc := range cacheSyncs {
if !syncFunc() {
return false, nil
}
}
return true, nil
},
stopCh)
if err != nil {
klog.V(2).Infof("stop requested")
return false
}
klog.V(4).Infof("caches populated")
return true
}
type sharedIndexInformer struct {
indexer Indexer
controller Controller
processor *sharedProcessor
cacheMutationDetector CacheMutationDetector
// This block is tracked to handle late initialization of the controller
listerWatcher ListerWatcher
objectType runtime.Object
// resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call
// shouldResync to check if any of our listeners need a resync.
resyncCheckPeriod time.Duration
// defaultEventHandlerResyncPeriod is the default resync period for any handlers added via
// AddEventHandler (i.e. they don't specify one and just want to use the shared informer's default
// value).
defaultEventHandlerResyncPeriod time.Duration
// clock allows for testability
clock clock.Clock
started, stopped bool
startedLock sync.Mutex
// blockDeltas gives a way to stop all event distribution so that a late event handler
// can safely join the shared informer.
blockDeltas sync.Mutex
}
// dummyController hides the fact that a SharedInformer is different from a dedicated one
// where a caller can `Run`. The run method is disconnected in this case, because higher
// level logic will decide when to start the SharedInformer and related controller.
// Because returning information back is always asynchronous, the legacy callers shouldn't
// notice any change in behavior.
type dummyController struct {
informer *sharedIndexInformer
}
func (v *dummyController) Run(stopCh <-chan struct{}) {
}
func (v *dummyController) HasSynced() bool {
return v.informer.HasSynced()
}
func (c *dummyController) LastSyncResourceVersion() string {
return ""
}
type updateNotification struct {
oldObj interface{}
newObj interface{}
}
type addNotification struct {
newObj interface{}
}
type deleteNotification struct {
oldObj interface{}
}
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, s.indexer)
cfg := &Config{
Queue: fifo,
ListerWatcher: s.listerWatcher,
ObjectType: s.objectType,
FullResyncPeriod: s.resyncCheckPeriod,
RetryOnError: false,
ShouldResync: s.processor.shouldResync,
Process: s.HandleDeltas,
}
func() {
s.startedLock.Lock()
defer s.startedLock.Unlock()
s.controller = New(cfg)
s.controller.(*controller).clock = s.clock
s.started = true
}()
// Separate stop channel because Processor should be stopped strictly after controller
processorStopCh := make(chan struct{})
var wg wait.Group
defer wg.Wait() // Wait for Processor to stop
defer close(processorStopCh) // Tell Processor to stop
wg.StartWithChannel(processorStopCh, s.cacheMutationDetector.Run)
wg.StartWithChannel(processorStopCh, s.processor.run)
defer func() {
s.startedLock.Lock()
defer s.startedLock.Unlock()
s.stopped = true // Don't want any new listeners
}()
s.controller.Run(stopCh)
}
func (s *sharedIndexInformer) HasSynced() bool {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.controller == nil {
return false
}
return s.controller.HasSynced()
}
func (s *sharedIndexInformer) LastSyncResourceVersion() string {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.controller == nil {
return ""
}
return s.controller.LastSyncResourceVersion()
}
func (s *sharedIndexInformer) GetStore() Store {
return s.indexer
}
func (s *sharedIndexInformer) GetIndexer() Indexer {
return s.indexer
}
func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.started {
return fmt.Errorf("informer has already started")
}
return s.indexer.AddIndexers(indexers)
}
func (s *sharedIndexInformer) GetController() Controller {
return &dummyController{informer: s}
}
func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) {
s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod)
}
func determineResyncPeriod(desired, check time.Duration) time.Duration {
if desired == 0 {
return desired
}
if check == 0 {
klog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired)
return 0
}
if desired < check {
klog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check)
return check
}
return desired
}
const minimumResyncPeriod = 1 * time.Second
func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.stopped {
klog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler)
return
}
if resyncPeriod > 0 {
if resyncPeriod < minimumResyncPeriod {
klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod)
resyncPeriod = minimumResyncPeriod
}
if resyncPeriod < s.resyncCheckPeriod {
if s.started {
klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
resyncPeriod = s.resyncCheckPeriod
} else {
// if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update
// resyncCheckPeriod to match resyncPeriod and adjust the resync periods of all the listeners
// accordingly
s.resyncCheckPeriod = resyncPeriod
s.processor.resyncCheckPeriodChanged(resyncPeriod)
}
}
}
listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize)
if !s.started {
s.processor.addListener(listener)
return
}
// in order to safely join, we have to
// 1. stop sending add/update/delete notifications
// 2. do a list against the store
// 3. send synthetic "Add" events to the new handler
// 4. unblock
s.blockDeltas.Lock()
defer s.blockDeltas.Unlock()
s.processor.addListener(listener)
for _, item := range s.indexer.List() {
listener.add(addNotification{newObj: item})
}
}
func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
s.blockDeltas.Lock()
defer s.blockDeltas.Unlock()
// from oldest to newest
for _, d := range obj.(Deltas) {
switch d.Type {
case Sync, Added, Updated:
isSync := d.Type == Sync
s.cacheMutationDetector.AddObject(d.Object)
if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
if err := s.indexer.Update(d.Object); err != nil {
return err
}
s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}, isSync)
} else {
if err := s.indexer.Add(d.Object); err != nil {
return err
}
s.processor.distribute(addNotification{newObj: d.Object}, isSync)
}
case Deleted:
if err := s.indexer.Delete(d.Object); err != nil {
return err
}
s.processor.distribute(deleteNotification{oldObj: d.Object}, false)
}
}
return nil
}
type sharedProcessor struct {
listenersStarted bool
listenersLock sync.RWMutex
listeners []*processorListener
syncingListeners []*processorListener
clock clock.Clock
wg wait.Group
}
func (p *sharedProcessor) addListener(listener *processorListener) {
p.listenersLock.Lock()
defer p.listenersLock.Unlock()
p.addListenerLocked(listener)
if p.listenersStarted {
p.wg.Start(listener.run)
p.wg.Start(listener.pop)
}
}
func (p *sharedProcessor) addListenerLocked(listener *processorListener) {
p.listeners = append(p.listeners, listener)
p.syncingListeners = append(p.syncingListeners, listener)
}
func (p *sharedProcessor) distribute(obj interface{}, sync bool) {
p.listenersLock.RLock()
defer p.listenersLock.RUnlock()
if sync {
for _, listener := range p.syncingListeners {
listener.add(obj)
}
} else {
for _, listener := range p.listeners {
listener.add(obj)
}
}
}
func (p *sharedProcessor) run(stopCh <-chan struct{}) {
func() {
p.listenersLock.RLock()
defer p.listenersLock.RUnlock()
for _, listener := range p.listeners {
p.wg.Start(listener.run)
p.wg.Start(listener.pop)
}
p.listenersStarted = true
}()
<-stopCh
p.listenersLock.RLock()
defer p.listenersLock.RUnlock()
for _, listener := range p.listeners {
close(listener.addCh) // Tell .pop() to stop. .pop() will tell .run() to stop
}
p.wg.Wait() // Wait for all .pop() and .run() to stop
}
// shouldResync queries every listener to determine if any of them need a resync, based on each
// listener's resyncPeriod.
func (p *sharedProcessor) shouldResync() bool {
p.listenersLock.Lock()
defer p.listenersLock.Unlock()
p.syncingListeners = []*processorListener{}
resyncNeeded := false
now := p.clock.Now()
for _, listener := range p.listeners {
// need to loop through all the listeners to see if they need to resync so we can prepare any
// listeners that are going to be resyncing.
if listener.shouldResync(now) {
resyncNeeded = true
p.syncingListeners = append(p.syncingListeners, listener)
listener.determineNextResync(now)
}
}
return resyncNeeded
}
func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Duration) {
p.listenersLock.RLock()
defer p.listenersLock.RUnlock()
for _, listener := range p.listeners {
resyncPeriod := determineResyncPeriod(listener.requestedResyncPeriod, resyncCheckPeriod)
listener.setResyncPeriod(resyncPeriod)
}
}
type processorListener struct {
nextCh chan interface{}
addCh chan interface{}
handler ResourceEventHandler
// pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed.
// There is one per listener, but a failing/stalled listener will have infinite pendingNotifications
// added until we OOM.
// TODO: This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but
// we should try to do something better.
pendingNotifications buffer.RingGrowing
// requestedResyncPeriod is how frequently the listener wants a full resync from the shared informer
requestedResyncPeriod time.Duration
// resyncPeriod is how frequently the listener wants a full resync from the shared informer. This
// value may differ from requestedResyncPeriod if the shared informer adjusts it to align with the
// informer's overall resync check period.
resyncPeriod time.Duration
// nextResync is the earliest time the listener should get a full resync
nextResync time.Time
// resyncLock guards access to resyncPeriod and nextResync
resyncLock sync.Mutex
}
func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener {
ret := &processorListener{
nextCh: make(chan interface{}),
addCh: make(chan interface{}),
handler: handler,
pendingNotifications: *buffer.NewRingGrowing(bufferSize),
requestedResyncPeriod: requestedResyncPeriod,
resyncPeriod: resyncPeriod,
}
ret.determineNextResync(now)
return ret
}
func (p *processorListener) add(notification interface{}) {
p.addCh <- notification
}
func (p *processorListener) pop() {
defer utilruntime.HandleCrash()
defer close(p.nextCh) // Tell .run() to stop
var nextCh chan<- interface{}
var notification interface{}
for {
select {
case nextCh <- notification:
// Notification dispatched
var ok bool
notification, ok = p.pendingNotifications.ReadOne()
if !ok { // Nothing to pop
nextCh = nil // Disable this select case
}
case notificationToAdd, ok := <-p.addCh:
if !ok {
return
}
if notification == nil { // No notification to pop (and pendingNotifications is empty)
// Optimize the case - skip adding to pendingNotifications
notification = notificationToAdd
nextCh = p.nextCh
} else { // There is already a notification waiting to be dispatched
p.pendingNotifications.WriteOne(notificationToAdd)
}
}
}
}
func (p *processorListener) run() {
// this call blocks until the channel is closed. When a panic happens during the notification
// we will catch it, **the offending item will be skipped!**, and after a short delay (one second)
// the next notification will be attempted. This is usually better than the alternative of never
// delivering again.
stopCh := make(chan struct{})
wait.Until(func() {
// this gives us a few quick retries before a long pause and then a few more quick retries
err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
for next := range p.nextCh {
switch notification := next.(type) {
case updateNotification:
p.handler.OnUpdate(notification.oldObj, notification.newObj)
case addNotification:
p.handler.OnAdd(notification.newObj)
case deleteNotification:
p.handler.OnDelete(notification.oldObj)
default:
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next))
}
}
// the only way to get here is if the p.nextCh is empty and closed
return true, nil
})
// the only way to get here is if the p.nextCh is empty and closed
if err == nil {
close(stopCh)
}
}, 1*time.Minute, stopCh)
}
// shouldResync deterimines if the listener needs a resync. If the listener's resyncPeriod is 0,
// this always returns false.
func (p *processorListener) shouldResync(now time.Time) bool {
p.resyncLock.Lock()
defer p.resyncLock.Unlock()
if p.resyncPeriod == 0 {
return false
}
return now.After(p.nextResync) || now.Equal(p.nextResync)
}
func (p *processorListener) determineNextResync(now time.Time) {
p.resyncLock.Lock()
defer p.resyncLock.Unlock()
p.nextResync = now.Add(p.resyncPeriod)
}
func (p *processorListener) setResyncPeriod(resyncPeriod time.Duration) {
p.resyncLock.Lock()
defer p.resyncLock.Unlock()
p.resyncPeriod = resyncPeriod
}

View File

@ -1,265 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"sync"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
fcache "k8s.io/client-go/tools/cache/testing"
)
type testListener struct {
lock sync.RWMutex
resyncPeriod time.Duration
expectedItemNames sets.String
receivedItemNames []string
name string
}
func newTestListener(name string, resyncPeriod time.Duration, expected ...string) *testListener {
l := &testListener{
resyncPeriod: resyncPeriod,
expectedItemNames: sets.NewString(expected...),
name: name,
}
return l
}
func (l *testListener) OnAdd(obj interface{}) {
l.handle(obj)
}
func (l *testListener) OnUpdate(old, new interface{}) {
l.handle(new)
}
func (l *testListener) OnDelete(obj interface{}) {
}
func (l *testListener) handle(obj interface{}) {
key, _ := MetaNamespaceKeyFunc(obj)
fmt.Printf("%s: handle: %v\n", l.name, key)
l.lock.Lock()
defer l.lock.Unlock()
objectMeta, _ := meta.Accessor(obj)
l.receivedItemNames = append(l.receivedItemNames, objectMeta.GetName())
}
func (l *testListener) ok() bool {
fmt.Println("polling")
err := wait.PollImmediate(100*time.Millisecond, 2*time.Second, func() (bool, error) {
if l.satisfiedExpectations() {
return true, nil
}
return false, nil
})
if err != nil {
return false
}
// wait just a bit to allow any unexpected stragglers to come in
fmt.Println("sleeping")
time.Sleep(1 * time.Second)
fmt.Println("final check")
return l.satisfiedExpectations()
}
func (l *testListener) satisfiedExpectations() bool {
l.lock.RLock()
defer l.lock.RUnlock()
return len(l.receivedItemNames) == l.expectedItemNames.Len() && sets.NewString(l.receivedItemNames...).Equal(l.expectedItemNames)
}
func TestListenerResyncPeriods(t *testing.T) {
// source simulates an apiserver object endpoint.
source := fcache.NewFakeControllerSource()
source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1"}})
source.Add(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod2"}})
// create the shared informer and resync every 1s
informer := NewSharedInformer(source, &v1.Pod{}, 1*time.Second).(*sharedIndexInformer)
clock := clock.NewFakeClock(time.Now())
informer.clock = clock
informer.processor.clock = clock
// listener 1, never resync
listener1 := newTestListener("listener1", 0, "pod1", "pod2")
informer.AddEventHandlerWithResyncPeriod(listener1, listener1.resyncPeriod)
// listener 2, resync every 2s
listener2 := newTestListener("listener2", 2*time.Second, "pod1", "pod2")
informer.AddEventHandlerWithResyncPeriod(listener2, listener2.resyncPeriod)
// listener 3, resync every 3s
listener3 := newTestListener("listener3", 3*time.Second, "pod1", "pod2")
informer.AddEventHandlerWithResyncPeriod(listener3, listener3.resyncPeriod)
listeners := []*testListener{listener1, listener2, listener3}
stop := make(chan struct{})
defer close(stop)
go informer.Run(stop)
// ensure all listeners got the initial List
for _, listener := range listeners {
if !listener.ok() {
t.Errorf("%s: expected %v, got %v", listener.name, listener.expectedItemNames, listener.receivedItemNames)
}
}
// reset
for _, listener := range listeners {
listener.receivedItemNames = []string{}
}
// advance so listener2 gets a resync
clock.Step(2 * time.Second)
// make sure listener2 got the resync
if !listener2.ok() {
t.Errorf("%s: expected %v, got %v", listener2.name, listener2.expectedItemNames, listener2.receivedItemNames)
}
// wait a bit to give errant items a chance to go to 1 and 3
time.Sleep(1 * time.Second)
// make sure listeners 1 and 3 got nothing
if len(listener1.receivedItemNames) != 0 {
t.Errorf("listener1: should not have resynced (got %d)", len(listener1.receivedItemNames))
}
if len(listener3.receivedItemNames) != 0 {
t.Errorf("listener3: should not have resynced (got %d)", len(listener3.receivedItemNames))
}
// reset
for _, listener := range listeners {
listener.receivedItemNames = []string{}
}
// advance so listener3 gets a resync
clock.Step(1 * time.Second)
// make sure listener3 got the resync
if !listener3.ok() {
t.Errorf("%s: expected %v, got %v", listener3.name, listener3.expectedItemNames, listener3.receivedItemNames)
}
// wait a bit to give errant items a chance to go to 1 and 2
time.Sleep(1 * time.Second)
// make sure listeners 1 and 2 got nothing
if len(listener1.receivedItemNames) != 0 {
t.Errorf("listener1: should not have resynced (got %d)", len(listener1.receivedItemNames))
}
if len(listener2.receivedItemNames) != 0 {
t.Errorf("listener2: should not have resynced (got %d)", len(listener2.receivedItemNames))
}
}
func TestResyncCheckPeriod(t *testing.T) {
// source simulates an apiserver object endpoint.
source := fcache.NewFakeControllerSource()
// create the shared informer and resync every 12 hours
informer := NewSharedInformer(source, &v1.Pod{}, 12*time.Hour).(*sharedIndexInformer)
clock := clock.NewFakeClock(time.Now())
informer.clock = clock
informer.processor.clock = clock
// listener 1, never resync
listener1 := newTestListener("listener1", 0)
informer.AddEventHandlerWithResyncPeriod(listener1, listener1.resyncPeriod)
if e, a := 12*time.Hour, informer.resyncCheckPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := time.Duration(0), informer.processor.listeners[0].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
// listener 2, resync every minute
listener2 := newTestListener("listener2", 1*time.Minute)
informer.AddEventHandlerWithResyncPeriod(listener2, listener2.resyncPeriod)
if e, a := 1*time.Minute, informer.resyncCheckPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := time.Duration(0), informer.processor.listeners[0].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := 1*time.Minute, informer.processor.listeners[1].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
// listener 3, resync every 55 seconds
listener3 := newTestListener("listener3", 55*time.Second)
informer.AddEventHandlerWithResyncPeriod(listener3, listener3.resyncPeriod)
if e, a := 55*time.Second, informer.resyncCheckPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := time.Duration(0), informer.processor.listeners[0].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := 1*time.Minute, informer.processor.listeners[1].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := 55*time.Second, informer.processor.listeners[2].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
// listener 4, resync every 5 seconds
listener4 := newTestListener("listener4", 5*time.Second)
informer.AddEventHandlerWithResyncPeriod(listener4, listener4.resyncPeriod)
if e, a := 5*time.Second, informer.resyncCheckPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := time.Duration(0), informer.processor.listeners[0].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := 1*time.Minute, informer.processor.listeners[1].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := 55*time.Second, informer.processor.listeners[2].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
if e, a := 5*time.Second, informer.processor.listeners[3].resyncPeriod; e != a {
t.Errorf("expected %d, got %d", e, a)
}
}
// verify that https://github.com/kubernetes/kubernetes/issues/59822 is fixed
func TestSharedInformerInitializationRace(t *testing.T) {
source := fcache.NewFakeControllerSource()
informer := NewSharedInformer(source, &v1.Pod{}, 1*time.Second).(*sharedIndexInformer)
listener := newTestListener("raceListener", 0)
stop := make(chan struct{})
go informer.AddEventHandlerWithResyncPeriod(listener, listener.resyncPeriod)
go informer.Run(stop)
close(stop)
}

View File

@ -1,244 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"strings"
"k8s.io/apimachinery/pkg/api/meta"
)
// Store is a generic object storage interface. Reflector knows how to watch a server
// and update a store. A generic store is provided, which allows Reflector to be used
// as a local caching system, and an LRU store, which allows Reflector to work like a
// queue of items yet to be processed.
//
// Store makes no assumptions about stored object identity; it is the responsibility
// of a Store implementation to provide a mechanism to correctly key objects and to
// define the contract for obtaining objects by some arbitrary key type.
type Store interface {
Add(obj interface{}) error
Update(obj interface{}) error
Delete(obj interface{}) error
List() []interface{}
ListKeys() []string
Get(obj interface{}) (item interface{}, exists bool, err error)
GetByKey(key string) (item interface{}, exists bool, err error)
// Replace will delete the contents of the store, using instead the
// given list. Store takes ownership of the list, you should not reference
// it after calling this function.
Replace([]interface{}, string) error
Resync() error
}
// KeyFunc knows how to make a key from an object. Implementations should be deterministic.
type KeyFunc func(obj interface{}) (string, error)
// KeyError will be returned any time a KeyFunc gives an error; it includes the object
// at fault.
type KeyError struct {
Obj interface{}
Err error
}
// Error gives a human-readable description of the error.
func (k KeyError) Error() string {
return fmt.Sprintf("couldn't create key for object %+v: %v", k.Obj, k.Err)
}
// ExplicitKey can be passed to MetaNamespaceKeyFunc if you have the key for
// the object but not the object itself.
type ExplicitKey string
// MetaNamespaceKeyFunc is a convenient default KeyFunc which knows how to make
// keys for API objects which implement meta.Interface.
// The key uses the format <namespace>/<name> unless <namespace> is empty, then
// it's just <name>.
//
// TODO: replace key-as-string with a key-as-struct so that this
// packing/unpacking won't be necessary.
func MetaNamespaceKeyFunc(obj interface{}) (string, error) {
if key, ok := obj.(ExplicitKey); ok {
return string(key), nil
}
meta, err := meta.Accessor(obj)
if err != nil {
return "", fmt.Errorf("object has no meta: %v", err)
}
if len(meta.GetNamespace()) > 0 {
return meta.GetNamespace() + "/" + meta.GetName(), nil
}
return meta.GetName(), nil
}
// SplitMetaNamespaceKey returns the namespace and name that
// MetaNamespaceKeyFunc encoded into key.
//
// TODO: replace key-as-string with a key-as-struct so that this
// packing/unpacking won't be necessary.
func SplitMetaNamespaceKey(key string) (namespace, name string, err error) {
parts := strings.Split(key, "/")
switch len(parts) {
case 1:
// name only, no namespace
return "", parts[0], nil
case 2:
// namespace and name
return parts[0], parts[1], nil
}
return "", "", fmt.Errorf("unexpected key format: %q", key)
}
// cache responsibilities are limited to:
// 1. Computing keys for objects via keyFunc
// 2. Invoking methods of a ThreadSafeStorage interface
type cache struct {
// cacheStorage bears the burden of thread safety for the cache
cacheStorage ThreadSafeStore
// keyFunc is used to make the key for objects stored in and retrieved from items, and
// should be deterministic.
keyFunc KeyFunc
}
var _ Store = &cache{}
// Add inserts an item into the cache.
func (c *cache) Add(obj interface{}) error {
key, err := c.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
c.cacheStorage.Add(key, obj)
return nil
}
// Update sets an item in the cache to its updated state.
func (c *cache) Update(obj interface{}) error {
key, err := c.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
c.cacheStorage.Update(key, obj)
return nil
}
// Delete removes an item from the cache.
func (c *cache) Delete(obj interface{}) error {
key, err := c.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
c.cacheStorage.Delete(key)
return nil
}
// List returns a list of all the items.
// List is completely threadsafe as long as you treat all items as immutable.
func (c *cache) List() []interface{} {
return c.cacheStorage.List()
}
// ListKeys returns a list of all the keys of the objects currently
// in the cache.
func (c *cache) ListKeys() []string {
return c.cacheStorage.ListKeys()
}
// GetIndexers returns the indexers of cache
func (c *cache) GetIndexers() Indexers {
return c.cacheStorage.GetIndexers()
}
// Index returns a list of items that match on the index function
// Index is thread-safe so long as you treat all items as immutable
func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error) {
return c.cacheStorage.Index(indexName, obj)
}
func (c *cache) IndexKeys(indexName, indexKey string) ([]string, error) {
return c.cacheStorage.IndexKeys(indexName, indexKey)
}
// ListIndexFuncValues returns the list of generated values of an Index func
func (c *cache) ListIndexFuncValues(indexName string) []string {
return c.cacheStorage.ListIndexFuncValues(indexName)
}
func (c *cache) ByIndex(indexName, indexKey string) ([]interface{}, error) {
return c.cacheStorage.ByIndex(indexName, indexKey)
}
func (c *cache) AddIndexers(newIndexers Indexers) error {
return c.cacheStorage.AddIndexers(newIndexers)
}
// Get returns the requested item, or sets exists=false.
// Get is completely threadsafe as long as you treat all items as immutable.
func (c *cache) Get(obj interface{}) (item interface{}, exists bool, err error) {
key, err := c.keyFunc(obj)
if err != nil {
return nil, false, KeyError{obj, err}
}
return c.GetByKey(key)
}
// GetByKey returns the request item, or exists=false.
// GetByKey is completely threadsafe as long as you treat all items as immutable.
func (c *cache) GetByKey(key string) (item interface{}, exists bool, err error) {
item, exists = c.cacheStorage.Get(key)
return item, exists, nil
}
// Replace will delete the contents of 'c', using instead the given list.
// 'c' takes ownership of the list, you should not reference the list again
// after calling this function.
func (c *cache) Replace(list []interface{}, resourceVersion string) error {
items := make(map[string]interface{}, len(list))
for _, item := range list {
key, err := c.keyFunc(item)
if err != nil {
return KeyError{item, err}
}
items[key] = item
}
c.cacheStorage.Replace(items, resourceVersion)
return nil
}
// Resync touches all items in the store to force processing
func (c *cache) Resync() error {
return c.cacheStorage.Resync()
}
// NewStore returns a Store implemented simply with a map and a lock.
func NewStore(keyFunc KeyFunc) Store {
return &cache{
cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}),
keyFunc: keyFunc,
}
}
// NewIndexer returns an Indexer implemented simply with a map and a lock.
func NewIndexer(keyFunc KeyFunc, indexers Indexers) Indexer {
return &cache{
cacheStorage: NewThreadSafeStore(indexers, Indices{}),
keyFunc: keyFunc,
}
}

View File

@ -1,156 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"testing"
"k8s.io/apimachinery/pkg/util/sets"
)
// Test public interface
func doTestStore(t *testing.T, store Store) {
mkObj := func(id string, val string) testStoreObject {
return testStoreObject{id: id, val: val}
}
store.Add(mkObj("foo", "bar"))
if item, ok, _ := store.Get(mkObj("foo", "")); !ok {
t.Errorf("didn't find inserted item")
} else {
if e, a := "bar", item.(testStoreObject).val; e != a {
t.Errorf("expected %v, got %v", e, a)
}
}
store.Update(mkObj("foo", "baz"))
if item, ok, _ := store.Get(mkObj("foo", "")); !ok {
t.Errorf("didn't find inserted item")
} else {
if e, a := "baz", item.(testStoreObject).val; e != a {
t.Errorf("expected %v, got %v", e, a)
}
}
store.Delete(mkObj("foo", ""))
if _, ok, _ := store.Get(mkObj("foo", "")); ok {
t.Errorf("found deleted item??")
}
// Test List.
store.Add(mkObj("a", "b"))
store.Add(mkObj("c", "d"))
store.Add(mkObj("e", "e"))
{
found := sets.String{}
for _, item := range store.List() {
found.Insert(item.(testStoreObject).val)
}
if !found.HasAll("b", "d", "e") {
t.Errorf("missing items, found: %v", found)
}
if len(found) != 3 {
t.Errorf("extra items")
}
}
// Test Replace.
store.Replace([]interface{}{
mkObj("foo", "foo"),
mkObj("bar", "bar"),
}, "0")
{
found := sets.String{}
for _, item := range store.List() {
found.Insert(item.(testStoreObject).val)
}
if !found.HasAll("foo", "bar") {
t.Errorf("missing items")
}
if len(found) != 2 {
t.Errorf("extra items")
}
}
}
// Test public interface
func doTestIndex(t *testing.T, indexer Indexer) {
mkObj := func(id string, val string) testStoreObject {
return testStoreObject{id: id, val: val}
}
// Test Index
expected := map[string]sets.String{}
expected["b"] = sets.NewString("a", "c")
expected["f"] = sets.NewString("e")
expected["h"] = sets.NewString("g")
indexer.Add(mkObj("a", "b"))
indexer.Add(mkObj("c", "b"))
indexer.Add(mkObj("e", "f"))
indexer.Add(mkObj("g", "h"))
{
for k, v := range expected {
found := sets.String{}
indexResults, err := indexer.Index("by_val", mkObj("", k))
if err != nil {
t.Errorf("Unexpected error %v", err)
}
for _, item := range indexResults {
found.Insert(item.(testStoreObject).id)
}
items := v.List()
if !found.HasAll(items...) {
t.Errorf("missing items, index %s, expected %v but found %v", k, items, found.List())
}
}
}
}
func testStoreKeyFunc(obj interface{}) (string, error) {
return obj.(testStoreObject).id, nil
}
func testStoreIndexFunc(obj interface{}) ([]string, error) {
return []string{obj.(testStoreObject).val}, nil
}
func testStoreIndexers() Indexers {
indexers := Indexers{}
indexers["by_val"] = testStoreIndexFunc
return indexers
}
type testStoreObject struct {
id string
val string
}
func TestCache(t *testing.T) {
doTestStore(t, NewStore(testStoreKeyFunc))
}
func TestFIFOCache(t *testing.T) {
doTestStore(t, NewFIFO(testStoreKeyFunc))
}
func TestUndeltaStore(t *testing.T) {
nop := func([]interface{}) {}
doTestStore(t, NewUndeltaStore(nop, testStoreKeyFunc))
}
func TestIndex(t *testing.T) {
doTestIndex(t, NewIndexer(testStoreKeyFunc, testStoreIndexers()))
}

View File

@ -1,255 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"errors"
"math/rand"
"strconv"
"sync"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
)
func NewFakeControllerSource() *FakeControllerSource {
return &FakeControllerSource{
Items: map[nnu]runtime.Object{},
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
}
}
func NewFakePVControllerSource() *FakePVControllerSource {
return &FakePVControllerSource{
FakeControllerSource{
Items: map[nnu]runtime.Object{},
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
}}
}
func NewFakePVCControllerSource() *FakePVCControllerSource {
return &FakePVCControllerSource{
FakeControllerSource{
Items: map[nnu]runtime.Object{},
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
}}
}
// FakeControllerSource implements listing/watching for testing.
type FakeControllerSource struct {
lock sync.RWMutex
Items map[nnu]runtime.Object
changes []watch.Event // one change per resourceVersion
Broadcaster *watch.Broadcaster
}
type FakePVControllerSource struct {
FakeControllerSource
}
type FakePVCControllerSource struct {
FakeControllerSource
}
// namespace, name, uid to be used as a key.
type nnu struct {
namespace, name string
uid types.UID
}
// Add adds an object to the set and sends an add event to watchers.
// obj's ResourceVersion is set.
func (f *FakeControllerSource) Add(obj runtime.Object) {
f.Change(watch.Event{Type: watch.Added, Object: obj}, 1)
}
// Modify updates an object in the set and sends a modified event to watchers.
// obj's ResourceVersion is set.
func (f *FakeControllerSource) Modify(obj runtime.Object) {
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 1)
}
// Delete deletes an object from the set and sends a delete event to watchers.
// obj's ResourceVersion is set.
func (f *FakeControllerSource) Delete(lastValue runtime.Object) {
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 1)
}
// AddDropWatch adds an object to the set but forgets to send an add event to
// watchers.
// obj's ResourceVersion is set.
func (f *FakeControllerSource) AddDropWatch(obj runtime.Object) {
f.Change(watch.Event{Type: watch.Added, Object: obj}, 0)
}
// ModifyDropWatch updates an object in the set but forgets to send a modify
// event to watchers.
// obj's ResourceVersion is set.
func (f *FakeControllerSource) ModifyDropWatch(obj runtime.Object) {
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 0)
}
// DeleteDropWatch deletes an object from the set but forgets to send a delete
// event to watchers.
// obj's ResourceVersion is set.
func (f *FakeControllerSource) DeleteDropWatch(lastValue runtime.Object) {
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 0)
}
func (f *FakeControllerSource) key(accessor metav1.Object) nnu {
return nnu{accessor.GetNamespace(), accessor.GetName(), accessor.GetUID()}
}
// Change records the given event (setting the object's resource version) and
// sends a watch event with the specified probability.
func (f *FakeControllerSource) Change(e watch.Event, watchProbability float64) {
f.lock.Lock()
defer f.lock.Unlock()
accessor, err := meta.Accessor(e.Object)
if err != nil {
panic(err) // this is test code only
}
resourceVersion := len(f.changes) + 1
accessor.SetResourceVersion(strconv.Itoa(resourceVersion))
f.changes = append(f.changes, e)
key := f.key(accessor)
switch e.Type {
case watch.Added, watch.Modified:
f.Items[key] = e.Object
case watch.Deleted:
delete(f.Items, key)
}
if rand.Float64() < watchProbability {
f.Broadcaster.Action(e.Type, e.Object)
}
}
func (f *FakeControllerSource) getListItemsLocked() ([]runtime.Object, error) {
list := make([]runtime.Object, 0, len(f.Items))
for _, obj := range f.Items {
// Must make a copy to allow clients to modify the object.
// Otherwise, if they make a change and write it back, they
// will inadvertently change our canonical copy (in
// addition to racing with other clients).
list = append(list, obj.DeepCopyObject())
}
return list, nil
}
// List returns a list object, with its resource version set.
func (f *FakeControllerSource) List(options metav1.ListOptions) (runtime.Object, error) {
f.lock.RLock()
defer f.lock.RUnlock()
list, err := f.getListItemsLocked()
if err != nil {
return nil, err
}
listObj := &v1.List{}
if err := meta.SetList(listObj, list); err != nil {
return nil, err
}
listAccessor, err := meta.ListAccessor(listObj)
if err != nil {
return nil, err
}
resourceVersion := len(f.changes)
listAccessor.SetResourceVersion(strconv.Itoa(resourceVersion))
return listObj, nil
}
// List returns a list object, with its resource version set.
func (f *FakePVControllerSource) List(options metav1.ListOptions) (runtime.Object, error) {
f.lock.RLock()
defer f.lock.RUnlock()
list, err := f.FakeControllerSource.getListItemsLocked()
if err != nil {
return nil, err
}
listObj := &v1.PersistentVolumeList{}
if err := meta.SetList(listObj, list); err != nil {
return nil, err
}
listAccessor, err := meta.ListAccessor(listObj)
if err != nil {
return nil, err
}
resourceVersion := len(f.changes)
listAccessor.SetResourceVersion(strconv.Itoa(resourceVersion))
return listObj, nil
}
// List returns a list object, with its resource version set.
func (f *FakePVCControllerSource) List(options metav1.ListOptions) (runtime.Object, error) {
f.lock.RLock()
defer f.lock.RUnlock()
list, err := f.FakeControllerSource.getListItemsLocked()
if err != nil {
return nil, err
}
listObj := &v1.PersistentVolumeClaimList{}
if err := meta.SetList(listObj, list); err != nil {
return nil, err
}
listAccessor, err := meta.ListAccessor(listObj)
if err != nil {
return nil, err
}
resourceVersion := len(f.changes)
listAccessor.SetResourceVersion(strconv.Itoa(resourceVersion))
return listObj, nil
}
// Watch returns a watch, which will be pre-populated with all changes
// after resourceVersion.
func (f *FakeControllerSource) Watch(options metav1.ListOptions) (watch.Interface, error) {
f.lock.RLock()
defer f.lock.RUnlock()
rc, err := strconv.Atoi(options.ResourceVersion)
if err != nil {
return nil, err
}
if rc < len(f.changes) {
changes := []watch.Event{}
for _, c := range f.changes[rc:] {
// Must make a copy to allow clients to modify the
// object. Otherwise, if they make a change and write
// it back, they will inadvertently change the our
// canonical copy (in addition to racing with other
// clients).
changes = append(changes, watch.Event{Type: c.Type, Object: c.Object.DeepCopyObject()})
}
return f.Broadcaster.WatchWithPrefix(changes), nil
} else if rc > len(f.changes) {
return nil, errors.New("resource version in the future not supported by this fake")
}
return f.Broadcaster.Watch(), nil
}
// Shutdown closes the underlying broadcaster, waiting for events to be
// delivered. It's an error to call any method after calling shutdown. This is
// enforced by Shutdown() leaving f locked.
func (f *FakeControllerSource) Shutdown() {
f.lock.Lock() // Purposely no unlock.
f.Broadcaster.Shutdown()
}

View File

@ -1,95 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"sync"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
)
// ensure the watch delivers the requested and only the requested items.
func consume(t *testing.T, w watch.Interface, rvs []string, done *sync.WaitGroup) {
defer done.Done()
for _, rv := range rvs {
got, ok := <-w.ResultChan()
if !ok {
t.Errorf("%#v: unexpected channel close, wanted %v", rvs, rv)
return
}
gotRV := got.Object.(*v1.Pod).ObjectMeta.ResourceVersion
if e, a := rv, gotRV; e != a {
t.Errorf("wanted %v, got %v", e, a)
} else {
t.Logf("Got %v as expected", gotRV)
}
}
// We should not get anything else.
got, open := <-w.ResultChan()
if open {
t.Errorf("%#v: unwanted object %#v", rvs, got)
}
}
func TestRCNumber(t *testing.T) {
pod := func(name string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
}
wg := &sync.WaitGroup{}
wg.Add(3)
source := NewFakeControllerSource()
source.Add(pod("foo"))
source.Modify(pod("foo"))
source.Modify(pod("foo"))
w, err := source.Watch(metav1.ListOptions{ResourceVersion: "1"})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
go consume(t, w, []string{"2", "3"}, wg)
list, err := source.List(metav1.ListOptions{})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if e, a := "3", list.(*v1.List).ResourceVersion; e != a {
t.Errorf("wanted %v, got %v", e, a)
}
w2, err := source.Watch(metav1.ListOptions{ResourceVersion: "2"})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
go consume(t, w2, []string{"3"}, wg)
w3, err := source.Watch(metav1.ListOptions{ResourceVersion: "3"})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
go consume(t, w3, []string{}, wg)
source.Shutdown()
wg.Wait()
}

View File

@ -1,304 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"sync"
"k8s.io/apimachinery/pkg/util/sets"
)
// ThreadSafeStore is an interface that allows concurrent access to a storage backend.
// TL;DR caveats: you must not modify anything returned by Get or List as it will break
// the indexing feature in addition to not being thread safe.
//
// The guarantees of thread safety provided by List/Get are only valid if the caller
// treats returned items as read-only. For example, a pointer inserted in the store
// through `Add` will be returned as is by `Get`. Multiple clients might invoke `Get`
// on the same key and modify the pointer in a non-thread-safe way. Also note that
// modifying objects stored by the indexers (if any) will *not* automatically lead
// to a re-index. So it's not a good idea to directly modify the objects returned by
// Get/List, in general.
type ThreadSafeStore interface {
Add(key string, obj interface{})
Update(key string, obj interface{})
Delete(key string)
Get(key string) (item interface{}, exists bool)
List() []interface{}
ListKeys() []string
Replace(map[string]interface{}, string)
Index(indexName string, obj interface{}) ([]interface{}, error)
IndexKeys(indexName, indexKey string) ([]string, error)
ListIndexFuncValues(name string) []string
ByIndex(indexName, indexKey string) ([]interface{}, error)
GetIndexers() Indexers
// AddIndexers adds more indexers to this store. If you call this after you already have data
// in the store, the results are undefined.
AddIndexers(newIndexers Indexers) error
Resync() error
}
// threadSafeMap implements ThreadSafeStore
type threadSafeMap struct {
lock sync.RWMutex
items map[string]interface{}
// indexers maps a name to an IndexFunc
indexers Indexers
// indices maps a name to an Index
indices Indices
}
func (c *threadSafeMap) Add(key string, obj interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
oldObject := c.items[key]
c.items[key] = obj
c.updateIndices(oldObject, obj, key)
}
func (c *threadSafeMap) Update(key string, obj interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
oldObject := c.items[key]
c.items[key] = obj
c.updateIndices(oldObject, obj, key)
}
func (c *threadSafeMap) Delete(key string) {
c.lock.Lock()
defer c.lock.Unlock()
if obj, exists := c.items[key]; exists {
c.deleteFromIndices(obj, key)
delete(c.items, key)
}
}
func (c *threadSafeMap) Get(key string) (item interface{}, exists bool) {
c.lock.RLock()
defer c.lock.RUnlock()
item, exists = c.items[key]
return item, exists
}
func (c *threadSafeMap) List() []interface{} {
c.lock.RLock()
defer c.lock.RUnlock()
list := make([]interface{}, 0, len(c.items))
for _, item := range c.items {
list = append(list, item)
}
return list
}
// ListKeys returns a list of all the keys of the objects currently
// in the threadSafeMap.
func (c *threadSafeMap) ListKeys() []string {
c.lock.RLock()
defer c.lock.RUnlock()
list := make([]string, 0, len(c.items))
for key := range c.items {
list = append(list, key)
}
return list
}
func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion string) {
c.lock.Lock()
defer c.lock.Unlock()
c.items = items
// rebuild any index
c.indices = Indices{}
for key, item := range c.items {
c.updateIndices(nil, item, key)
}
}
// Index returns a list of items that match on the index function
// Index is thread-safe so long as you treat all items as immutable
func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) {
c.lock.RLock()
defer c.lock.RUnlock()
indexFunc := c.indexers[indexName]
if indexFunc == nil {
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
}
indexKeys, err := indexFunc(obj)
if err != nil {
return nil, err
}
index := c.indices[indexName]
// need to de-dupe the return list. Since multiple keys are allowed, this can happen.
returnKeySet := sets.String{}
for _, indexKey := range indexKeys {
set := index[indexKey]
for _, key := range set.UnsortedList() {
returnKeySet.Insert(key)
}
}
list := make([]interface{}, 0, returnKeySet.Len())
for absoluteKey := range returnKeySet {
list = append(list, c.items[absoluteKey])
}
return list, nil
}
// ByIndex returns a list of items that match an exact value on the index function
func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, error) {
c.lock.RLock()
defer c.lock.RUnlock()
indexFunc := c.indexers[indexName]
if indexFunc == nil {
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
}
index := c.indices[indexName]
set := index[indexKey]
list := make([]interface{}, 0, set.Len())
for _, key := range set.List() {
list = append(list, c.items[key])
}
return list, nil
}
// IndexKeys returns a list of keys that match on the index function.
// IndexKeys is thread-safe so long as you treat all items as immutable.
func (c *threadSafeMap) IndexKeys(indexName, indexKey string) ([]string, error) {
c.lock.RLock()
defer c.lock.RUnlock()
indexFunc := c.indexers[indexName]
if indexFunc == nil {
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
}
index := c.indices[indexName]
set := index[indexKey]
return set.List(), nil
}
func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string {
c.lock.RLock()
defer c.lock.RUnlock()
index := c.indices[indexName]
names := make([]string, 0, len(index))
for key := range index {
names = append(names, key)
}
return names
}
func (c *threadSafeMap) GetIndexers() Indexers {
return c.indexers
}
func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error {
c.lock.Lock()
defer c.lock.Unlock()
if len(c.items) > 0 {
return fmt.Errorf("cannot add indexers to running index")
}
oldKeys := sets.StringKeySet(c.indexers)
newKeys := sets.StringKeySet(newIndexers)
if oldKeys.HasAny(newKeys.List()...) {
return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys))
}
for k, v := range newIndexers {
c.indexers[k] = v
}
return nil
}
// updateIndices modifies the objects location in the managed indexes, if this is an update, you must provide an oldObj
// updateIndices must be called from a function that already has a lock on the cache
func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) {
// if we got an old object, we need to remove it before we add it again
if oldObj != nil {
c.deleteFromIndices(oldObj, key)
}
for name, indexFunc := range c.indexers {
indexValues, err := indexFunc(newObj)
if err != nil {
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
}
index := c.indices[name]
if index == nil {
index = Index{}
c.indices[name] = index
}
for _, indexValue := range indexValues {
set := index[indexValue]
if set == nil {
set = sets.String{}
index[indexValue] = set
}
set.Insert(key)
}
}
}
// deleteFromIndices removes the object from each of the managed indexes
// it is intended to be called from a function that already has a lock on the cache
func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) {
for name, indexFunc := range c.indexers {
indexValues, err := indexFunc(obj)
if err != nil {
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
}
index := c.indices[name]
if index == nil {
continue
}
for _, indexValue := range indexValues {
set := index[indexValue]
if set != nil {
set.Delete(key)
}
}
}
}
func (c *threadSafeMap) Resync() error {
// Nothing to do
return nil
}
func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore {
return &threadSafeMap{
items: map[string]interface{}{},
indexers: indexers,
indices: indices,
}
}

View File

@ -1,83 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
// UndeltaStore listens to incremental updates and sends complete state on every change.
// It implements the Store interface so that it can receive a stream of mirrored objects
// from Reflector. Whenever it receives any complete (Store.Replace) or incremental change
// (Store.Add, Store.Update, Store.Delete), it sends the complete state by calling PushFunc.
// It is thread-safe. It guarantees that every change (Add, Update, Replace, Delete) results
// in one call to PushFunc, but sometimes PushFunc may be called twice with the same values.
// PushFunc should be thread safe.
type UndeltaStore struct {
Store
PushFunc func([]interface{})
}
// Assert that it implements the Store interface.
var _ Store = &UndeltaStore{}
// Note about thread safety. The Store implementation (cache.cache) uses a lock for all methods.
// In the functions below, the lock gets released and reacquired betweend the {Add,Delete,etc}
// and the List. So, the following can happen, resulting in two identical calls to PushFunc.
// time thread 1 thread 2
// 0 UndeltaStore.Add(a)
// 1 UndeltaStore.Add(b)
// 2 Store.Add(a)
// 3 Store.Add(b)
// 4 Store.List() -> [a,b]
// 5 Store.List() -> [a,b]
func (u *UndeltaStore) Add(obj interface{}) error {
if err := u.Store.Add(obj); err != nil {
return err
}
u.PushFunc(u.Store.List())
return nil
}
func (u *UndeltaStore) Update(obj interface{}) error {
if err := u.Store.Update(obj); err != nil {
return err
}
u.PushFunc(u.Store.List())
return nil
}
func (u *UndeltaStore) Delete(obj interface{}) error {
if err := u.Store.Delete(obj); err != nil {
return err
}
u.PushFunc(u.Store.List())
return nil
}
func (u *UndeltaStore) Replace(list []interface{}, resourceVersion string) error {
if err := u.Store.Replace(list, resourceVersion); err != nil {
return err
}
u.PushFunc(u.Store.List())
return nil
}
// NewUndeltaStore returns an UndeltaStore implemented with a Store.
func NewUndeltaStore(pushFunc func([]interface{}), keyFunc KeyFunc) *UndeltaStore {
return &UndeltaStore{
Store: NewStore(keyFunc),
PushFunc: pushFunc,
}
}

View File

@ -1,131 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"reflect"
"testing"
)
// store_test.go checks that UndeltaStore conforms to the Store interface
// behavior. This test just tests that it calls the push func in addition.
type testUndeltaObject struct {
name string
val interface{}
}
func testUndeltaKeyFunc(obj interface{}) (string, error) {
return obj.(testUndeltaObject).name, nil
}
/*
var (
o1 interface{} = t{1}
o2 interface{} = t{2}
l1 []interface{} = []interface{}{t{1}}
)
*/
func TestUpdateCallsPush(t *testing.T) {
mkObj := func(name string, val interface{}) testUndeltaObject {
return testUndeltaObject{name: name, val: val}
}
var got []interface{}
var callcount int = 0
push := func(m []interface{}) {
callcount++
got = m
}
u := NewUndeltaStore(push, testUndeltaKeyFunc)
u.Add(mkObj("a", 2))
u.Update(mkObj("a", 1))
if callcount != 2 {
t.Errorf("Expected 2 calls, got %d", callcount)
}
l := []interface{}{mkObj("a", 1)}
if !reflect.DeepEqual(l, got) {
t.Errorf("Expected %#v, Got %#v", l, got)
}
}
func TestDeleteCallsPush(t *testing.T) {
mkObj := func(name string, val interface{}) testUndeltaObject {
return testUndeltaObject{name: name, val: val}
}
var got []interface{}
var callcount int = 0
push := func(m []interface{}) {
callcount++
got = m
}
u := NewUndeltaStore(push, testUndeltaKeyFunc)
u.Add(mkObj("a", 2))
u.Delete(mkObj("a", ""))
if callcount != 2 {
t.Errorf("Expected 2 calls, got %d", callcount)
}
expected := []interface{}{}
if !reflect.DeepEqual(expected, got) {
t.Errorf("Expected %#v, Got %#v", expected, got)
}
}
func TestReadsDoNotCallPush(t *testing.T) {
push := func(m []interface{}) {
t.Errorf("Unexpected call to push!")
}
u := NewUndeltaStore(push, testUndeltaKeyFunc)
// These should not call push.
_ = u.List()
_, _, _ = u.Get(testUndeltaObject{"a", ""})
}
func TestReplaceCallsPush(t *testing.T) {
mkObj := func(name string, val interface{}) testUndeltaObject {
return testUndeltaObject{name: name, val: val}
}
var got []interface{}
var callcount int = 0
push := func(m []interface{}) {
callcount++
got = m
}
u := NewUndeltaStore(push, testUndeltaKeyFunc)
m := []interface{}{mkObj("a", 1)}
u.Replace(m, "0")
if callcount != 1 {
t.Errorf("Expected 1 calls, got %d", callcount)
}
expected := []interface{}{mkObj("a", 1)}
if !reflect.DeepEqual(expected, got) {
t.Errorf("Expected %#v, Got %#v", expected, got)
}
}

View File

@ -1,302 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"fmt"
"io/ioutil"
"os"
"reflect"
"testing"
"sigs.k8s.io/yaml"
)
func newMergedConfig(certFile, certContent, keyFile, keyContent, caFile, caContent string, t *testing.T) Config {
if err := ioutil.WriteFile(certFile, []byte(certContent), 0644); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := ioutil.WriteFile(keyFile, []byte(keyContent), 0600); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := ioutil.WriteFile(caFile, []byte(caContent), 0644); err != nil {
t.Errorf("unexpected error: %v", err)
}
return Config{
AuthInfos: map[string]*AuthInfo{
"red-user": {Token: "red-token", ClientCertificateData: []byte(certContent), ClientKeyData: []byte(keyContent)},
"blue-user": {Token: "blue-token", ClientCertificate: certFile, ClientKey: keyFile}},
Clusters: map[string]*Cluster{
"cow-cluster": {Server: "http://cow.org:8080", CertificateAuthorityData: []byte(caContent)},
"chicken-cluster": {Server: "http://chicken.org:8080", CertificateAuthority: caFile}},
Contexts: map[string]*Context{
"federal-context": {AuthInfo: "red-user", Cluster: "cow-cluster"},
"shaker-context": {AuthInfo: "blue-user", Cluster: "chicken-cluster"}},
CurrentContext: "federal-context",
}
}
func TestMinifySuccess(t *testing.T) {
certFile, _ := ioutil.TempFile("", "")
defer os.Remove(certFile.Name())
keyFile, _ := ioutil.TempFile("", "")
defer os.Remove(keyFile.Name())
caFile, _ := ioutil.TempFile("", "")
defer os.Remove(caFile.Name())
mutatingConfig := newMergedConfig(certFile.Name(), "cert", keyFile.Name(), "key", caFile.Name(), "ca", t)
if err := MinifyConfig(&mutatingConfig); err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(mutatingConfig.Contexts) > 1 {
t.Errorf("unexpected contexts: %v", mutatingConfig.Contexts)
}
if _, exists := mutatingConfig.Contexts["federal-context"]; !exists {
t.Errorf("missing context")
}
if len(mutatingConfig.Clusters) > 1 {
t.Errorf("unexpected clusters: %v", mutatingConfig.Clusters)
}
if _, exists := mutatingConfig.Clusters["cow-cluster"]; !exists {
t.Errorf("missing cluster")
}
if len(mutatingConfig.AuthInfos) > 1 {
t.Errorf("unexpected users: %v", mutatingConfig.AuthInfos)
}
if _, exists := mutatingConfig.AuthInfos["red-user"]; !exists {
t.Errorf("missing user")
}
}
func TestMinifyMissingContext(t *testing.T) {
certFile, _ := ioutil.TempFile("", "")
defer os.Remove(certFile.Name())
keyFile, _ := ioutil.TempFile("", "")
defer os.Remove(keyFile.Name())
caFile, _ := ioutil.TempFile("", "")
defer os.Remove(caFile.Name())
mutatingConfig := newMergedConfig(certFile.Name(), "cert", keyFile.Name(), "key", caFile.Name(), "ca", t)
mutatingConfig.CurrentContext = "missing"
errMsg := "cannot locate context missing"
if err := MinifyConfig(&mutatingConfig); err == nil || err.Error() != errMsg {
t.Errorf("expected %v, got %v", errMsg, err)
}
}
func TestMinifyMissingCluster(t *testing.T) {
certFile, _ := ioutil.TempFile("", "")
defer os.Remove(certFile.Name())
keyFile, _ := ioutil.TempFile("", "")
defer os.Remove(keyFile.Name())
caFile, _ := ioutil.TempFile("", "")
defer os.Remove(caFile.Name())
mutatingConfig := newMergedConfig(certFile.Name(), "cert", keyFile.Name(), "key", caFile.Name(), "ca", t)
delete(mutatingConfig.Clusters, mutatingConfig.Contexts[mutatingConfig.CurrentContext].Cluster)
errMsg := "cannot locate cluster cow-cluster"
if err := MinifyConfig(&mutatingConfig); err == nil || err.Error() != errMsg {
t.Errorf("expected %v, got %v", errMsg, err)
}
}
func TestMinifyMissingAuthInfo(t *testing.T) {
certFile, _ := ioutil.TempFile("", "")
defer os.Remove(certFile.Name())
keyFile, _ := ioutil.TempFile("", "")
defer os.Remove(keyFile.Name())
caFile, _ := ioutil.TempFile("", "")
defer os.Remove(caFile.Name())
mutatingConfig := newMergedConfig(certFile.Name(), "cert", keyFile.Name(), "key", caFile.Name(), "ca", t)
delete(mutatingConfig.AuthInfos, mutatingConfig.Contexts[mutatingConfig.CurrentContext].AuthInfo)
errMsg := "cannot locate user red-user"
if err := MinifyConfig(&mutatingConfig); err == nil || err.Error() != errMsg {
t.Errorf("expected %v, got %v", errMsg, err)
}
}
func TestFlattenSuccess(t *testing.T) {
certFile, _ := ioutil.TempFile("", "")
defer os.Remove(certFile.Name())
keyFile, _ := ioutil.TempFile("", "")
defer os.Remove(keyFile.Name())
caFile, _ := ioutil.TempFile("", "")
defer os.Remove(caFile.Name())
certData := "cert"
keyData := "key"
caData := "ca"
unchangingCluster := "cow-cluster"
unchangingAuthInfo := "red-user"
changingCluster := "chicken-cluster"
changingAuthInfo := "blue-user"
startingConfig := newMergedConfig(certFile.Name(), certData, keyFile.Name(), keyData, caFile.Name(), caData, t)
mutatingConfig := startingConfig
if err := FlattenConfig(&mutatingConfig); err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(mutatingConfig.Contexts) != 2 {
t.Errorf("unexpected contexts: %v", mutatingConfig.Contexts)
}
if !reflect.DeepEqual(startingConfig.Contexts, mutatingConfig.Contexts) {
t.Errorf("expected %v, got %v", startingConfig.Contexts, mutatingConfig.Contexts)
}
if len(mutatingConfig.Clusters) != 2 {
t.Errorf("unexpected clusters: %v", mutatingConfig.Clusters)
}
if !reflect.DeepEqual(startingConfig.Clusters[unchangingCluster], mutatingConfig.Clusters[unchangingCluster]) {
t.Errorf("expected %v, got %v", startingConfig.Clusters[unchangingCluster], mutatingConfig.Clusters[unchangingCluster])
}
if len(mutatingConfig.Clusters[changingCluster].CertificateAuthority) != 0 {
t.Errorf("unexpected caFile")
}
if string(mutatingConfig.Clusters[changingCluster].CertificateAuthorityData) != caData {
t.Errorf("expected %v, got %v", caData, string(mutatingConfig.Clusters[changingCluster].CertificateAuthorityData))
}
if len(mutatingConfig.AuthInfos) != 2 {
t.Errorf("unexpected users: %v", mutatingConfig.AuthInfos)
}
if !reflect.DeepEqual(startingConfig.AuthInfos[unchangingAuthInfo], mutatingConfig.AuthInfos[unchangingAuthInfo]) {
t.Errorf("expected %v, got %v", startingConfig.AuthInfos[unchangingAuthInfo], mutatingConfig.AuthInfos[unchangingAuthInfo])
}
if len(mutatingConfig.AuthInfos[changingAuthInfo].ClientCertificate) != 0 {
t.Errorf("unexpected caFile")
}
if string(mutatingConfig.AuthInfos[changingAuthInfo].ClientCertificateData) != certData {
t.Errorf("expected %v, got %v", certData, string(mutatingConfig.AuthInfos[changingAuthInfo].ClientCertificateData))
}
if len(mutatingConfig.AuthInfos[changingAuthInfo].ClientKey) != 0 {
t.Errorf("unexpected caFile")
}
if string(mutatingConfig.AuthInfos[changingAuthInfo].ClientKeyData) != keyData {
t.Errorf("expected %v, got %v", keyData, string(mutatingConfig.AuthInfos[changingAuthInfo].ClientKeyData))
}
}
func Example_minifyAndShorten() {
certFile, _ := ioutil.TempFile("", "")
defer os.Remove(certFile.Name())
keyFile, _ := ioutil.TempFile("", "")
defer os.Remove(keyFile.Name())
caFile, _ := ioutil.TempFile("", "")
defer os.Remove(caFile.Name())
certData := "cert"
keyData := "key"
caData := "ca"
config := newMergedConfig(certFile.Name(), certData, keyFile.Name(), keyData, caFile.Name(), caData, nil)
MinifyConfig(&config)
ShortenConfig(&config)
output, _ := yaml.Marshal(config)
fmt.Printf("%s", string(output))
// Output:
// clusters:
// cow-cluster:
// LocationOfOrigin: ""
// certificate-authority-data: DATA+OMITTED
// server: http://cow.org:8080
// contexts:
// federal-context:
// LocationOfOrigin: ""
// cluster: cow-cluster
// user: red-user
// current-context: federal-context
// preferences: {}
// users:
// red-user:
// LocationOfOrigin: ""
// client-certificate-data: REDACTED
// client-key-data: REDACTED
// token: red-token
}
func TestShortenSuccess(t *testing.T) {
certFile, _ := ioutil.TempFile("", "")
defer os.Remove(certFile.Name())
keyFile, _ := ioutil.TempFile("", "")
defer os.Remove(keyFile.Name())
caFile, _ := ioutil.TempFile("", "")
defer os.Remove(caFile.Name())
certData := "cert"
keyData := "key"
caData := "ca"
unchangingCluster := "chicken-cluster"
unchangingAuthInfo := "blue-user"
changingCluster := "cow-cluster"
changingAuthInfo := "red-user"
startingConfig := newMergedConfig(certFile.Name(), certData, keyFile.Name(), keyData, caFile.Name(), caData, t)
mutatingConfig := startingConfig
ShortenConfig(&mutatingConfig)
if len(mutatingConfig.Contexts) != 2 {
t.Errorf("unexpected contexts: %v", mutatingConfig.Contexts)
}
if !reflect.DeepEqual(startingConfig.Contexts, mutatingConfig.Contexts) {
t.Errorf("expected %v, got %v", startingConfig.Contexts, mutatingConfig.Contexts)
}
redacted := string(redactedBytes)
dataOmitted := string(dataOmittedBytes)
if len(mutatingConfig.Clusters) != 2 {
t.Errorf("unexpected clusters: %v", mutatingConfig.Clusters)
}
if !reflect.DeepEqual(startingConfig.Clusters[unchangingCluster], mutatingConfig.Clusters[unchangingCluster]) {
t.Errorf("expected %v, got %v", startingConfig.Clusters[unchangingCluster], mutatingConfig.Clusters[unchangingCluster])
}
if string(mutatingConfig.Clusters[changingCluster].CertificateAuthorityData) != dataOmitted {
t.Errorf("expected %v, got %v", dataOmitted, string(mutatingConfig.Clusters[changingCluster].CertificateAuthorityData))
}
if len(mutatingConfig.AuthInfos) != 2 {
t.Errorf("unexpected users: %v", mutatingConfig.AuthInfos)
}
if !reflect.DeepEqual(startingConfig.AuthInfos[unchangingAuthInfo], mutatingConfig.AuthInfos[unchangingAuthInfo]) {
t.Errorf("expected %v, got %v", startingConfig.AuthInfos[unchangingAuthInfo], mutatingConfig.AuthInfos[unchangingAuthInfo])
}
if string(mutatingConfig.AuthInfos[changingAuthInfo].ClientCertificateData) != redacted {
t.Errorf("expected %v, got %v", redacted, string(mutatingConfig.AuthInfos[changingAuthInfo].ClientCertificateData))
}
if string(mutatingConfig.AuthInfos[changingAuthInfo].ClientKeyData) != redacted {
t.Errorf("expected %v, got %v", redacted, string(mutatingConfig.AuthInfos[changingAuthInfo].ClientKeyData))
}
}

View File

@ -1,135 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"fmt"
"sigs.k8s.io/yaml"
)
func Example_emptyConfig() {
defaultConfig := NewConfig()
output, err := yaml.Marshal(defaultConfig)
if err != nil {
fmt.Printf("Unexpected error: %v", err)
}
fmt.Printf("%v", string(output))
// Output:
// clusters: {}
// contexts: {}
// current-context: ""
// preferences: {}
// users: {}
}
func Example_ofOptionsConfig() {
defaultConfig := NewConfig()
defaultConfig.Preferences.Colors = true
defaultConfig.Clusters["alfa"] = &Cluster{
Server: "https://alfa.org:8080",
InsecureSkipTLSVerify: true,
CertificateAuthority: "path/to/my/cert-ca-filename",
}
defaultConfig.Clusters["bravo"] = &Cluster{
Server: "https://bravo.org:8080",
InsecureSkipTLSVerify: false,
}
defaultConfig.AuthInfos["white-mage-via-cert"] = &AuthInfo{
ClientCertificate: "path/to/my/client-cert-filename",
ClientKey: "path/to/my/client-key-filename",
}
defaultConfig.AuthInfos["red-mage-via-token"] = &AuthInfo{
Token: "my-secret-token",
}
defaultConfig.AuthInfos["black-mage-via-auth-provider"] = &AuthInfo{
AuthProvider: &AuthProviderConfig{
Name: "gcp",
Config: map[string]string{
"foo": "bar",
"token": "s3cr3t-t0k3n",
},
},
}
defaultConfig.Contexts["bravo-as-black-mage"] = &Context{
Cluster: "bravo",
AuthInfo: "black-mage-via-auth-provider",
Namespace: "yankee",
}
defaultConfig.Contexts["alfa-as-black-mage"] = &Context{
Cluster: "alfa",
AuthInfo: "black-mage-via-auth-provider",
Namespace: "zulu",
}
defaultConfig.Contexts["alfa-as-white-mage"] = &Context{
Cluster: "alfa",
AuthInfo: "white-mage-via-cert",
}
defaultConfig.CurrentContext = "alfa-as-white-mage"
output, err := yaml.Marshal(defaultConfig)
if err != nil {
fmt.Printf("Unexpected error: %v", err)
}
fmt.Printf("%v", string(output))
// Output:
// clusters:
// alfa:
// LocationOfOrigin: ""
// certificate-authority: path/to/my/cert-ca-filename
// insecure-skip-tls-verify: true
// server: https://alfa.org:8080
// bravo:
// LocationOfOrigin: ""
// server: https://bravo.org:8080
// contexts:
// alfa-as-black-mage:
// LocationOfOrigin: ""
// cluster: alfa
// namespace: zulu
// user: black-mage-via-auth-provider
// alfa-as-white-mage:
// LocationOfOrigin: ""
// cluster: alfa
// user: white-mage-via-cert
// bravo-as-black-mage:
// LocationOfOrigin: ""
// cluster: bravo
// namespace: yankee
// user: black-mage-via-auth-provider
// current-context: alfa-as-white-mage
// preferences:
// colors: true
// users:
// black-mage-via-auth-provider:
// LocationOfOrigin: ""
// auth-provider:
// config:
// foo: bar
// token: s3cr3t-t0k3n
// name: gcp
// red-mage-via-token:
// LocationOfOrigin: ""
// token: my-secret-token
// white-mage-via-cert:
// LocationOfOrigin: ""
// client-certificate: path/to/my/client-cert-filename
// client-key: path/to/my/client-key-filename
}

View File

@ -1,695 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clientcmd
import (
"io/ioutil"
"net/http"
"os"
"reflect"
"strings"
"testing"
"github.com/imdario/mergo"
restclient "k8s.io/client-go/rest"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
func TestMergoSemantics(t *testing.T) {
type U struct {
A string
B int64
}
type T struct {
S []string
X string
Y int64
U U
}
var testDataStruct = []struct {
dst T
src T
expected T
}{
{
dst: T{X: "one"},
src: T{X: "two"},
expected: T{X: "two"},
},
{
dst: T{X: "one", Y: 5, U: U{A: "four", B: 6}},
src: T{X: "two", U: U{A: "three", B: 4}},
expected: T{X: "two", Y: 5, U: U{A: "three", B: 4}},
},
{
dst: T{S: []string{"test3", "test4", "test5"}},
src: T{S: []string{"test1", "test2", "test3"}},
expected: T{S: []string{"test1", "test2", "test3"}},
},
}
for _, data := range testDataStruct {
err := mergo.MergeWithOverwrite(&data.dst, &data.src)
if err != nil {
t.Errorf("error while merging: %s", err)
}
if !reflect.DeepEqual(data.dst, data.expected) {
// The mergo library has previously changed in a an incompatible way.
// example:
//
// https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a
//
// This test verifies that the semantics of the merge are what we expect.
// If they are not, the mergo library may have been updated and broken
// unexpectedly.
t.Errorf("mergo.MergeWithOverwrite did not provide expected output: %+v doesn't match %+v", data.dst, data.expected)
}
}
var testDataMap = []struct {
dst map[string]int
src map[string]int
expected map[string]int
}{
{
dst: map[string]int{"rsc": 6543, "r": 2138, "gri": 1908, "adg": 912, "prt": 22},
src: map[string]int{"rsc": 3711, "r": 2138, "gri": 1908, "adg": 912},
expected: map[string]int{"rsc": 3711, "r": 2138, "gri": 1908, "adg": 912, "prt": 22},
},
}
for _, data := range testDataMap {
err := mergo.MergeWithOverwrite(&data.dst, &data.src)
if err != nil {
t.Errorf("error while merging: %s", err)
}
if !reflect.DeepEqual(data.dst, data.expected) {
// The mergo library has previously changed in a an incompatible way.
// example:
//
// https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a
//
// This test verifies that the semantics of the merge are what we expect.
// If they are not, the mergo library may have been updated and broken
// unexpectedly.
t.Errorf("mergo.MergeWithOverwrite did not provide expected output: %+v doesn't match %+v", data.dst, data.expected)
}
}
}
func createValidTestConfig() *clientcmdapi.Config {
const (
server = "https://anything.com:8080"
token = "the-token"
)
config := clientcmdapi.NewConfig()
config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: server,
}
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
Token: token,
}
config.Contexts["clean"] = &clientcmdapi.Context{
Cluster: "clean",
AuthInfo: "clean",
}
config.CurrentContext = "clean"
return config
}
func createCAValidTestConfig() *clientcmdapi.Config {
config := createValidTestConfig()
config.Clusters["clean"].CertificateAuthorityData = []byte{0, 0}
return config
}
func TestInsecureOverridesCA(t *testing.T) {
config := createCAValidTestConfig()
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
InsecureSkipTLSVerify: true,
},
}, nil)
actualCfg, err := clientBuilder.ClientConfig()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
matchBoolArg(true, actualCfg.Insecure, t)
matchStringArg("", actualCfg.TLSClientConfig.CAFile, t)
matchByteArg(nil, actualCfg.TLSClientConfig.CAData, t)
}
func TestMergeContext(t *testing.T) {
const namespace = "overridden-namespace"
config := createValidTestConfig()
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil)
_, overridden, err := clientBuilder.Namespace()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if overridden {
t.Error("Expected namespace to not be overridden")
}
clientBuilder = NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{
Context: clientcmdapi.Context{
Namespace: namespace,
},
}, nil)
actual, overridden, err := clientBuilder.Namespace()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if !overridden {
t.Error("Expected namespace to be overridden")
}
matchStringArg(namespace, actual, t)
}
func TestModifyContext(t *testing.T) {
expectedCtx := map[string]bool{
"updated": true,
"clean": true,
}
tempPath, err := ioutil.TempFile("", "testclientcmd-")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
defer os.Remove(tempPath.Name())
pathOptions := NewDefaultPathOptions()
config := createValidTestConfig()
pathOptions.GlobalFile = tempPath.Name()
// define new context and assign it - our path options config
config.Contexts["updated"] = &clientcmdapi.Context{
Cluster: "updated",
AuthInfo: "updated",
}
config.CurrentContext = "updated"
if err := ModifyConfig(pathOptions, *config, true); err != nil {
t.Errorf("Unexpected error: %v", err)
}
startingConfig, err := pathOptions.GetStartingConfig()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// make sure the current context was updated
matchStringArg("updated", startingConfig.CurrentContext, t)
// there should now be two contexts
if len(startingConfig.Contexts) != len(expectedCtx) {
t.Fatalf("unexpected nuber of contexts, expecting %v, but found %v", len(expectedCtx), len(startingConfig.Contexts))
}
for key := range startingConfig.Contexts {
if !expectedCtx[key] {
t.Fatalf("expected context %q to exist", key)
}
}
}
func TestCertificateData(t *testing.T) {
caData := []byte("ca-data")
certData := []byte("cert-data")
keyData := []byte("key-data")
config := clientcmdapi.NewConfig()
config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "https://localhost:8443",
CertificateAuthorityData: caData,
}
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
ClientCertificateData: certData,
ClientKeyData: keyData,
}
config.Contexts["clean"] = &clientcmdapi.Context{
Cluster: "clean",
AuthInfo: "clean",
}
config.CurrentContext = "clean"
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil)
clientConfig, err := clientBuilder.ClientConfig()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Make sure cert data gets into config (will override file paths)
matchByteArg(caData, clientConfig.TLSClientConfig.CAData, t)
matchByteArg(certData, clientConfig.TLSClientConfig.CertData, t)
matchByteArg(keyData, clientConfig.TLSClientConfig.KeyData, t)
}
func TestBasicAuthData(t *testing.T) {
username := "myuser"
password := "mypass"
config := clientcmdapi.NewConfig()
config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "https://localhost:8443",
}
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
Username: username,
Password: password,
}
config.Contexts["clean"] = &clientcmdapi.Context{
Cluster: "clean",
AuthInfo: "clean",
}
config.CurrentContext = "clean"
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil)
clientConfig, err := clientBuilder.ClientConfig()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Make sure basic auth data gets into config
matchStringArg(username, clientConfig.Username, t)
matchStringArg(password, clientConfig.Password, t)
}
func TestBasicTokenFile(t *testing.T) {
token := "exampletoken"
f, err := ioutil.TempFile("", "tokenfile")
if err != nil {
t.Errorf("Unexpected error: %v", err)
return
}
defer os.Remove(f.Name())
if err := ioutil.WriteFile(f.Name(), []byte(token), 0644); err != nil {
t.Errorf("Unexpected error: %v", err)
return
}
config := clientcmdapi.NewConfig()
config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "https://localhost:8443",
}
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
TokenFile: f.Name(),
}
config.Contexts["clean"] = &clientcmdapi.Context{
Cluster: "clean",
AuthInfo: "clean",
}
config.CurrentContext = "clean"
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil)
clientConfig, err := clientBuilder.ClientConfig()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
var out *http.Request
clientConfig.WrapTransport(fakeTransport(func(req *http.Request) (*http.Response, error) {
out = req
return &http.Response{}, nil
})).RoundTrip(&http.Request{})
matchStringArg(token, strings.TrimPrefix(out.Header.Get("Authorization"), "Bearer "), t)
}
type fakeTransport func(*http.Request) (*http.Response, error)
func (ft fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) {
return ft(req)
}
func TestPrecedenceTokenFile(t *testing.T) {
token := "exampletoken"
f, err := ioutil.TempFile("", "tokenfile")
if err != nil {
t.Errorf("Unexpected error: %v", err)
return
}
defer os.Remove(f.Name())
if err := ioutil.WriteFile(f.Name(), []byte(token), 0644); err != nil {
t.Errorf("Unexpected error: %v", err)
return
}
config := clientcmdapi.NewConfig()
config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "https://localhost:8443",
}
expectedToken := "expected"
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
Token: expectedToken,
TokenFile: f.Name(),
}
config.Contexts["clean"] = &clientcmdapi.Context{
Cluster: "clean",
AuthInfo: "clean",
}
config.CurrentContext = "clean"
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil)
clientConfig, err := clientBuilder.ClientConfig()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
matchStringArg(expectedToken, clientConfig.BearerToken, t)
}
func TestCreateClean(t *testing.T) {
config := createValidTestConfig()
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{}, nil)
clientConfig, err := clientBuilder.ClientConfig()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
matchStringArg(config.Clusters["clean"].Server, clientConfig.Host, t)
matchStringArg("", clientConfig.APIPath, t)
matchBoolArg(config.Clusters["clean"].InsecureSkipTLSVerify, clientConfig.Insecure, t)
matchStringArg(config.AuthInfos["clean"].Token, clientConfig.BearerToken, t)
}
func TestCreateCleanWithPrefix(t *testing.T) {
tt := []struct {
server string
host string
}{
{"https://anything.com:8080/foo/bar", "https://anything.com:8080/foo/bar"},
{"http://anything.com:8080/foo/bar", "http://anything.com:8080/foo/bar"},
{"http://anything.com:8080/foo/bar/", "http://anything.com:8080/foo/bar/"},
{"http://anything.com:8080/", "http://anything.com:8080/"},
{"http://anything.com:8080//", "http://anything.com:8080//"},
{"anything.com:8080/foo/bar", "anything.com:8080/foo/bar"},
{"anything.com:8080", "anything.com:8080"},
{"anything.com", "anything.com"},
{"anything", "anything"},
}
tt = append(tt, struct{ server, host string }{"", "http://localhost:8080"})
for _, tc := range tt {
config := createValidTestConfig()
cleanConfig := config.Clusters["clean"]
cleanConfig.Server = tc.server
config.Clusters["clean"] = cleanConfig
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{
ClusterDefaults: clientcmdapi.Cluster{Server: "http://localhost:8080"},
}, nil)
clientConfig, err := clientBuilder.ClientConfig()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
matchStringArg(tc.host, clientConfig.Host, t)
}
}
func TestCreateCleanDefault(t *testing.T) {
config := createValidTestConfig()
clientBuilder := NewDefaultClientConfig(*config, &ConfigOverrides{})
clientConfig, err := clientBuilder.ClientConfig()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
matchStringArg(config.Clusters["clean"].Server, clientConfig.Host, t)
matchBoolArg(config.Clusters["clean"].InsecureSkipTLSVerify, clientConfig.Insecure, t)
matchStringArg(config.AuthInfos["clean"].Token, clientConfig.BearerToken, t)
}
func TestCreateCleanDefaultCluster(t *testing.T) {
config := createValidTestConfig()
clientBuilder := NewDefaultClientConfig(*config, &ConfigOverrides{
ClusterDefaults: clientcmdapi.Cluster{Server: "http://localhost:8080"},
})
clientConfig, err := clientBuilder.ClientConfig()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
matchStringArg(config.Clusters["clean"].Server, clientConfig.Host, t)
matchBoolArg(config.Clusters["clean"].InsecureSkipTLSVerify, clientConfig.Insecure, t)
matchStringArg(config.AuthInfos["clean"].Token, clientConfig.BearerToken, t)
}
func TestCreateMissingContextNoDefault(t *testing.T) {
const expectedErrorContains = "Context was not found for specified context"
config := createValidTestConfig()
clientBuilder := NewNonInteractiveClientConfig(*config, "not-present", &ConfigOverrides{}, nil)
_, err := clientBuilder.ClientConfig()
if err == nil {
t.Fatalf("Unexpected error: %v", err)
}
}
func TestCreateMissingContext(t *testing.T) {
const expectedErrorContains = "context was not found for specified context: not-present"
config := createValidTestConfig()
clientBuilder := NewNonInteractiveClientConfig(*config, "not-present", &ConfigOverrides{
ClusterDefaults: clientcmdapi.Cluster{Server: "http://localhost:8080"},
}, nil)
_, err := clientBuilder.ClientConfig()
if err == nil {
t.Fatalf("Expected error: %v", expectedErrorContains)
}
if !strings.Contains(err.Error(), expectedErrorContains) {
t.Fatalf("Expected error: %v, but got %v", expectedErrorContains, err)
}
}
func TestInClusterClientConfigPrecedence(t *testing.T) {
tt := []struct {
overrides *ConfigOverrides
}{
{
overrides: &ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
Server: "https://host-from-overrides.com",
},
},
},
{
overrides: &ConfigOverrides{
AuthInfo: clientcmdapi.AuthInfo{
Token: "https://host-from-overrides.com",
},
},
},
{
overrides: &ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
CertificateAuthority: "/path/to/ca-from-overrides.crt",
},
},
},
{
overrides: &ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
Server: "https://host-from-overrides.com",
},
AuthInfo: clientcmdapi.AuthInfo{
Token: "https://host-from-overrides.com",
},
},
},
{
overrides: &ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
Server: "https://host-from-overrides.com",
CertificateAuthority: "/path/to/ca-from-overrides.crt",
},
},
},
{
overrides: &ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
CertificateAuthority: "/path/to/ca-from-overrides.crt",
},
AuthInfo: clientcmdapi.AuthInfo{
Token: "https://host-from-overrides.com",
},
},
},
{
overrides: &ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
Server: "https://host-from-overrides.com",
CertificateAuthority: "/path/to/ca-from-overrides.crt",
},
AuthInfo: clientcmdapi.AuthInfo{
Token: "https://host-from-overrides.com",
},
},
},
{
overrides: &ConfigOverrides{},
},
}
for _, tc := range tt {
expectedServer := "https://host-from-cluster.com"
expectedToken := "token-from-cluster"
expectedCAFile := "/path/to/ca-from-cluster.crt"
icc := &inClusterClientConfig{
inClusterConfigProvider: func() (*restclient.Config, error) {
return &restclient.Config{
Host: expectedServer,
BearerToken: expectedToken,
TLSClientConfig: restclient.TLSClientConfig{
CAFile: expectedCAFile,
},
}, nil
},
overrides: tc.overrides,
}
clientConfig, err := icc.ClientConfig()
if err != nil {
t.Fatalf("Unxpected error: %v", err)
}
if overridenServer := tc.overrides.ClusterInfo.Server; len(overridenServer) > 0 {
expectedServer = overridenServer
}
if overridenToken := tc.overrides.AuthInfo.Token; len(overridenToken) > 0 {
expectedToken = overridenToken
}
if overridenCAFile := tc.overrides.ClusterInfo.CertificateAuthority; len(overridenCAFile) > 0 {
expectedCAFile = overridenCAFile
}
if clientConfig.Host != expectedServer {
t.Errorf("Expected server %v, got %v", expectedServer, clientConfig.Host)
}
if clientConfig.BearerToken != expectedToken {
t.Errorf("Expected token %v, got %v", expectedToken, clientConfig.BearerToken)
}
if clientConfig.TLSClientConfig.CAFile != expectedCAFile {
t.Errorf("Expected Certificate Authority %v, got %v", expectedCAFile, clientConfig.TLSClientConfig.CAFile)
}
}
}
func matchBoolArg(expected, got bool, t *testing.T) {
if expected != got {
t.Errorf("Expected %v, got %v", expected, got)
}
}
func matchStringArg(expected, got string, t *testing.T) {
if expected != got {
t.Errorf("Expected %q, got %q", expected, got)
}
}
func matchByteArg(expected, got []byte, t *testing.T) {
if !reflect.DeepEqual(expected, got) {
t.Errorf("Expected %v, got %v", expected, got)
}
}
func TestNamespaceOverride(t *testing.T) {
config := &DirectClientConfig{
overrides: &ConfigOverrides{
Context: clientcmdapi.Context{
Namespace: "foo",
},
},
}
ns, overridden, err := config.Namespace()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if !overridden {
t.Errorf("Expected overridden = true")
}
matchStringArg("foo", ns, t)
}
func TestAuthConfigMerge(t *testing.T) {
content := `
apiVersion: v1
clusters:
- cluster:
server: https://localhost:8080
name: foo-cluster
contexts:
- context:
cluster: foo-cluster
user: foo-user
namespace: bar
name: foo-context
current-context: foo-context
kind: Config
users:
- name: foo-user
user:
exec:
apiVersion: client.authentication.k8s.io/v1alpha1
args:
- arg-1
- arg-2
command: foo-command
`
tmpfile, err := ioutil.TempFile("", "kubeconfig")
if err != nil {
t.Error(err)
}
defer os.Remove(tmpfile.Name())
if err := ioutil.WriteFile(tmpfile.Name(), []byte(content), 0666); err != nil {
t.Error(err)
}
config, err := BuildConfigFromFlags("", tmpfile.Name())
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(config.ExecProvider.Args, []string{"arg-1", "arg-2"}) {
t.Errorf("Got args %v when they should be %v\n", config.ExecProvider.Args, []string{"arg-1", "arg-2"})
}
}

View File

@ -1,789 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clientcmd
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"reflect"
"strings"
"testing"
"sigs.k8s.io/yaml"
"k8s.io/apimachinery/pkg/runtime"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest"
)
var (
testConfigAlfa = clientcmdapi.Config{
AuthInfos: map[string]*clientcmdapi.AuthInfo{
"red-user": {Token: "red-token"}},
Clusters: map[string]*clientcmdapi.Cluster{
"cow-cluster": {Server: "http://cow.org:8080"}},
Contexts: map[string]*clientcmdapi.Context{
"federal-context": {AuthInfo: "red-user", Cluster: "cow-cluster", Namespace: "hammer-ns"}},
}
testConfigBravo = clientcmdapi.Config{
AuthInfos: map[string]*clientcmdapi.AuthInfo{
"black-user": {Token: "black-token"}},
Clusters: map[string]*clientcmdapi.Cluster{
"pig-cluster": {Server: "http://pig.org:8080"}},
Contexts: map[string]*clientcmdapi.Context{
"queen-anne-context": {AuthInfo: "black-user", Cluster: "pig-cluster", Namespace: "saw-ns"}},
}
testConfigCharlie = clientcmdapi.Config{
AuthInfos: map[string]*clientcmdapi.AuthInfo{
"green-user": {Token: "green-token"}},
Clusters: map[string]*clientcmdapi.Cluster{
"horse-cluster": {Server: "http://horse.org:8080"}},
Contexts: map[string]*clientcmdapi.Context{
"shaker-context": {AuthInfo: "green-user", Cluster: "horse-cluster", Namespace: "chisel-ns"}},
}
testConfigDelta = clientcmdapi.Config{
AuthInfos: map[string]*clientcmdapi.AuthInfo{
"blue-user": {Token: "blue-token"}},
Clusters: map[string]*clientcmdapi.Cluster{
"chicken-cluster": {Server: "http://chicken.org:8080"}},
Contexts: map[string]*clientcmdapi.Context{
"gothic-context": {AuthInfo: "blue-user", Cluster: "chicken-cluster", Namespace: "plane-ns"}},
}
testConfigConflictAlfa = clientcmdapi.Config{
AuthInfos: map[string]*clientcmdapi.AuthInfo{
"red-user": {Token: "a-different-red-token"},
"yellow-user": {Token: "yellow-token"}},
Clusters: map[string]*clientcmdapi.Cluster{
"cow-cluster": {Server: "http://a-different-cow.org:8080", InsecureSkipTLSVerify: true},
"donkey-cluster": {Server: "http://donkey.org:8080", InsecureSkipTLSVerify: true}},
CurrentContext: "federal-context",
}
)
func TestNonExistentCommandLineFile(t *testing.T) {
loadingRules := ClientConfigLoadingRules{
ExplicitPath: "bogus_file",
}
_, err := loadingRules.Load()
if err == nil {
t.Fatalf("Expected error for missing command-line file, got none")
}
if !strings.Contains(err.Error(), "bogus_file") {
t.Fatalf("Expected error about 'bogus_file', got %s", err.Error())
}
}
func TestToleratingMissingFiles(t *testing.T) {
loadingRules := ClientConfigLoadingRules{
Precedence: []string{"bogus1", "bogus2", "bogus3"},
}
_, err := loadingRules.Load()
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
}
func TestErrorReadingFile(t *testing.T) {
commandLineFile, _ := ioutil.TempFile("", "")
defer os.Remove(commandLineFile.Name())
if err := ioutil.WriteFile(commandLineFile.Name(), []byte("bogus value"), 0644); err != nil {
t.Fatalf("Error creating tempfile: %v", err)
}
loadingRules := ClientConfigLoadingRules{
ExplicitPath: commandLineFile.Name(),
}
_, err := loadingRules.Load()
if err == nil {
t.Fatalf("Expected error for unloadable file, got none")
}
if !strings.Contains(err.Error(), commandLineFile.Name()) {
t.Fatalf("Expected error about '%s', got %s", commandLineFile.Name(), err.Error())
}
}
func TestErrorReadingNonFile(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Couldn't create tmpdir")
}
defer os.RemoveAll(tmpdir)
loadingRules := ClientConfigLoadingRules{
ExplicitPath: tmpdir,
}
_, err = loadingRules.Load()
if err == nil {
t.Fatalf("Expected error for non-file, got none")
}
if !strings.Contains(err.Error(), tmpdir) {
t.Fatalf("Expected error about '%s', got %s", tmpdir, err.Error())
}
}
func TestConflictingCurrentContext(t *testing.T) {
commandLineFile, _ := ioutil.TempFile("", "")
defer os.Remove(commandLineFile.Name())
envVarFile, _ := ioutil.TempFile("", "")
defer os.Remove(envVarFile.Name())
mockCommandLineConfig := clientcmdapi.Config{
CurrentContext: "any-context-value",
}
mockEnvVarConfig := clientcmdapi.Config{
CurrentContext: "a-different-context",
}
WriteToFile(mockCommandLineConfig, commandLineFile.Name())
WriteToFile(mockEnvVarConfig, envVarFile.Name())
loadingRules := ClientConfigLoadingRules{
ExplicitPath: commandLineFile.Name(),
Precedence: []string{envVarFile.Name()},
}
mergedConfig, err := loadingRules.Load()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if mergedConfig.CurrentContext != mockCommandLineConfig.CurrentContext {
t.Errorf("expected %v, got %v", mockCommandLineConfig.CurrentContext, mergedConfig.CurrentContext)
}
}
func TestLoadingEmptyMaps(t *testing.T) {
configFile, _ := ioutil.TempFile("", "")
defer os.Remove(configFile.Name())
mockConfig := clientcmdapi.Config{
CurrentContext: "any-context-value",
}
WriteToFile(mockConfig, configFile.Name())
config, err := LoadFromFile(configFile.Name())
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if config.Clusters == nil {
t.Error("expected config.Clusters to be non-nil")
}
if config.AuthInfos == nil {
t.Error("expected config.AuthInfos to be non-nil")
}
if config.Contexts == nil {
t.Error("expected config.Contexts to be non-nil")
}
}
func TestDuplicateClusterName(t *testing.T) {
configFile, _ := ioutil.TempFile("", "")
defer os.Remove(configFile.Name())
err := ioutil.WriteFile(configFile.Name(), []byte(`
kind: Config
apiVersion: v1
clusters:
- cluster:
api-version: v1
server: https://kubernetes.default.svc:443
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
name: kubeconfig-cluster
- cluster:
api-version: v2
server: https://test.example.server:443
certificate-authority: /var/run/secrets/test.example.io/serviceaccount/ca.crt
name: kubeconfig-cluster
contexts:
- context:
cluster: kubeconfig-cluster
namespace: default
user: kubeconfig-user
name: kubeconfig-context
current-context: kubeconfig-context
users:
- name: kubeconfig-user
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
`), os.FileMode(0755))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
_, err = LoadFromFile(configFile.Name())
if err == nil || !strings.Contains(err.Error(),
"error converting *[]NamedCluster into *map[string]*api.Cluster: duplicate name \"kubeconfig-cluster\" in list") {
t.Error("Expected error in loading duplicate cluster name, got none")
}
}
func TestDuplicateContextName(t *testing.T) {
configFile, _ := ioutil.TempFile("", "")
defer os.Remove(configFile.Name())
err := ioutil.WriteFile(configFile.Name(), []byte(`
kind: Config
apiVersion: v1
clusters:
- cluster:
api-version: v1
server: https://kubernetes.default.svc:443
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
name: kubeconfig-cluster
contexts:
- context:
cluster: kubeconfig-cluster
namespace: default
user: kubeconfig-user
name: kubeconfig-context
- context:
cluster: test-example-cluster
namespace: test-example
user: test-example-user
name: kubeconfig-context
current-context: kubeconfig-context
users:
- name: kubeconfig-user
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
`), os.FileMode(0755))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
_, err = LoadFromFile(configFile.Name())
if err == nil || !strings.Contains(err.Error(),
"error converting *[]NamedContext into *map[string]*api.Context: duplicate name \"kubeconfig-context\" in list") {
t.Error("Expected error in loading duplicate context name, got none")
}
}
func TestDuplicateUserName(t *testing.T) {
configFile, _ := ioutil.TempFile("", "")
defer os.Remove(configFile.Name())
err := ioutil.WriteFile(configFile.Name(), []byte(`
kind: Config
apiVersion: v1
clusters:
- cluster:
api-version: v1
server: https://kubernetes.default.svc:443
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
name: kubeconfig-cluster
contexts:
- context:
cluster: kubeconfig-cluster
namespace: default
user: kubeconfig-user
name: kubeconfig-context
current-context: kubeconfig-context
users:
- name: kubeconfig-user
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
- name: kubeconfig-user
user:
tokenFile: /var/run/secrets/test.example.com/serviceaccount/token
`), os.FileMode(0755))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
_, err = LoadFromFile(configFile.Name())
if err == nil || !strings.Contains(err.Error(),
"error converting *[]NamedAuthInfo into *map[string]*api.AuthInfo: duplicate name \"kubeconfig-user\" in list") {
t.Error("Expected error in loading duplicate user name, got none")
}
}
func TestDuplicateExtensionName(t *testing.T) {
configFile, _ := ioutil.TempFile("", "")
defer os.Remove(configFile.Name())
err := ioutil.WriteFile(configFile.Name(), []byte(`
kind: Config
apiVersion: v1
clusters:
- cluster:
api-version: v1
server: https://kubernetes.default.svc:443
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
name: kubeconfig-cluster
contexts:
- context:
cluster: kubeconfig-cluster
namespace: default
user: kubeconfig-user
name: kubeconfig-context
current-context: kubeconfig-context
users:
- name: kubeconfig-user
user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
extensions:
- extension:
bytes: test
name: test-extension
- extension:
bytes: some-example
name: test-extension
`), os.FileMode(0755))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
_, err = LoadFromFile(configFile.Name())
if err == nil || !strings.Contains(err.Error(),
"error converting *[]NamedExtension into *map[string]runtime.Object: duplicate name \"test-extension\" in list") {
t.Error("Expected error in loading duplicate extension name, got none")
}
}
func TestResolveRelativePaths(t *testing.T) {
pathResolutionConfig1 := clientcmdapi.Config{
AuthInfos: map[string]*clientcmdapi.AuthInfo{
"relative-user-1": {ClientCertificate: "relative/client/cert", ClientKey: "../relative/client/key"},
"absolute-user-1": {ClientCertificate: "/absolute/client/cert", ClientKey: "/absolute/client/key"},
"relative-cmd-1": {Exec: &clientcmdapi.ExecConfig{Command: "../relative/client/cmd"}},
"absolute-cmd-1": {Exec: &clientcmdapi.ExecConfig{Command: "/absolute/client/cmd"}},
"PATH-cmd-1": {Exec: &clientcmdapi.ExecConfig{Command: "cmd"}},
},
Clusters: map[string]*clientcmdapi.Cluster{
"relative-server-1": {CertificateAuthority: "../relative/ca"},
"absolute-server-1": {CertificateAuthority: "/absolute/ca"},
},
}
pathResolutionConfig2 := clientcmdapi.Config{
AuthInfos: map[string]*clientcmdapi.AuthInfo{
"relative-user-2": {ClientCertificate: "relative/client/cert2", ClientKey: "../relative/client/key2"},
"absolute-user-2": {ClientCertificate: "/absolute/client/cert2", ClientKey: "/absolute/client/key2"},
},
Clusters: map[string]*clientcmdapi.Cluster{
"relative-server-2": {CertificateAuthority: "../relative/ca2"},
"absolute-server-2": {CertificateAuthority: "/absolute/ca2"},
},
}
configDir1, _ := ioutil.TempDir("", "")
defer os.RemoveAll(configDir1)
configFile1 := path.Join(configDir1, ".kubeconfig")
configDir1, _ = filepath.Abs(configDir1)
configDir2, _ := ioutil.TempDir("", "")
defer os.RemoveAll(configDir2)
configDir2, _ = ioutil.TempDir(configDir2, "")
configFile2 := path.Join(configDir2, ".kubeconfig")
configDir2, _ = filepath.Abs(configDir2)
WriteToFile(pathResolutionConfig1, configFile1)
WriteToFile(pathResolutionConfig2, configFile2)
loadingRules := ClientConfigLoadingRules{
Precedence: []string{configFile1, configFile2},
}
mergedConfig, err := loadingRules.Load()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
foundClusterCount := 0
for key, cluster := range mergedConfig.Clusters {
if key == "relative-server-1" {
foundClusterCount++
matchStringArg(path.Join(configDir1, pathResolutionConfig1.Clusters["relative-server-1"].CertificateAuthority), cluster.CertificateAuthority, t)
}
if key == "relative-server-2" {
foundClusterCount++
matchStringArg(path.Join(configDir2, pathResolutionConfig2.Clusters["relative-server-2"].CertificateAuthority), cluster.CertificateAuthority, t)
}
if key == "absolute-server-1" {
foundClusterCount++
matchStringArg(pathResolutionConfig1.Clusters["absolute-server-1"].CertificateAuthority, cluster.CertificateAuthority, t)
}
if key == "absolute-server-2" {
foundClusterCount++
matchStringArg(pathResolutionConfig2.Clusters["absolute-server-2"].CertificateAuthority, cluster.CertificateAuthority, t)
}
}
if foundClusterCount != 4 {
t.Errorf("Expected 4 clusters, found %v: %v", foundClusterCount, mergedConfig.Clusters)
}
foundAuthInfoCount := 0
for key, authInfo := range mergedConfig.AuthInfos {
if key == "relative-user-1" {
foundAuthInfoCount++
matchStringArg(path.Join(configDir1, pathResolutionConfig1.AuthInfos["relative-user-1"].ClientCertificate), authInfo.ClientCertificate, t)
matchStringArg(path.Join(configDir1, pathResolutionConfig1.AuthInfos["relative-user-1"].ClientKey), authInfo.ClientKey, t)
}
if key == "relative-user-2" {
foundAuthInfoCount++
matchStringArg(path.Join(configDir2, pathResolutionConfig2.AuthInfos["relative-user-2"].ClientCertificate), authInfo.ClientCertificate, t)
matchStringArg(path.Join(configDir2, pathResolutionConfig2.AuthInfos["relative-user-2"].ClientKey), authInfo.ClientKey, t)
}
if key == "absolute-user-1" {
foundAuthInfoCount++
matchStringArg(pathResolutionConfig1.AuthInfos["absolute-user-1"].ClientCertificate, authInfo.ClientCertificate, t)
matchStringArg(pathResolutionConfig1.AuthInfos["absolute-user-1"].ClientKey, authInfo.ClientKey, t)
}
if key == "absolute-user-2" {
foundAuthInfoCount++
matchStringArg(pathResolutionConfig2.AuthInfos["absolute-user-2"].ClientCertificate, authInfo.ClientCertificate, t)
matchStringArg(pathResolutionConfig2.AuthInfos["absolute-user-2"].ClientKey, authInfo.ClientKey, t)
}
if key == "relative-cmd-1" {
foundAuthInfoCount++
matchStringArg(path.Join(configDir1, pathResolutionConfig1.AuthInfos[key].Exec.Command), authInfo.Exec.Command, t)
}
if key == "absolute-cmd-1" {
foundAuthInfoCount++
matchStringArg(pathResolutionConfig1.AuthInfos[key].Exec.Command, authInfo.Exec.Command, t)
}
if key == "PATH-cmd-1" {
foundAuthInfoCount++
matchStringArg(pathResolutionConfig1.AuthInfos[key].Exec.Command, authInfo.Exec.Command, t)
}
}
if foundAuthInfoCount != 7 {
t.Errorf("Expected 7 users, found %v: %v", foundAuthInfoCount, mergedConfig.AuthInfos)
}
}
func TestMigratingFile(t *testing.T) {
sourceFile, _ := ioutil.TempFile("", "")
defer os.Remove(sourceFile.Name())
destinationFile, _ := ioutil.TempFile("", "")
// delete the file so that we'll write to it
os.Remove(destinationFile.Name())
WriteToFile(testConfigAlfa, sourceFile.Name())
loadingRules := ClientConfigLoadingRules{
MigrationRules: map[string]string{destinationFile.Name(): sourceFile.Name()},
}
if _, err := loadingRules.Load(); err != nil {
t.Errorf("unexpected error %v", err)
}
// the load should have recreated this file
defer os.Remove(destinationFile.Name())
sourceContent, err := ioutil.ReadFile(sourceFile.Name())
if err != nil {
t.Errorf("unexpected error %v", err)
}
destinationContent, err := ioutil.ReadFile(destinationFile.Name())
if err != nil {
t.Errorf("unexpected error %v", err)
}
if !reflect.DeepEqual(sourceContent, destinationContent) {
t.Errorf("source and destination do not match")
}
}
func TestMigratingFileLeaveExistingFileAlone(t *testing.T) {
sourceFile, _ := ioutil.TempFile("", "")
defer os.Remove(sourceFile.Name())
destinationFile, _ := ioutil.TempFile("", "")
defer os.Remove(destinationFile.Name())
WriteToFile(testConfigAlfa, sourceFile.Name())
loadingRules := ClientConfigLoadingRules{
MigrationRules: map[string]string{destinationFile.Name(): sourceFile.Name()},
}
if _, err := loadingRules.Load(); err != nil {
t.Errorf("unexpected error %v", err)
}
destinationContent, err := ioutil.ReadFile(destinationFile.Name())
if err != nil {
t.Errorf("unexpected error %v", err)
}
if len(destinationContent) > 0 {
t.Errorf("destination should not have been touched")
}
}
func TestMigratingFileSourceMissingSkip(t *testing.T) {
sourceFilename := "some-missing-file"
destinationFile, _ := ioutil.TempFile("", "")
// delete the file so that we'll write to it
os.Remove(destinationFile.Name())
loadingRules := ClientConfigLoadingRules{
MigrationRules: map[string]string{destinationFile.Name(): sourceFilename},
}
if _, err := loadingRules.Load(); err != nil {
t.Errorf("unexpected error %v", err)
}
if _, err := os.Stat(destinationFile.Name()); !os.IsNotExist(err) {
t.Errorf("destination should not exist")
}
}
func TestFileLocking(t *testing.T) {
f, _ := ioutil.TempFile("", "")
defer os.Remove(f.Name())
err := lockFile(f.Name())
if err != nil {
t.Errorf("unexpected error while locking file: %v", err)
}
defer unlockFile(f.Name())
err = lockFile(f.Name())
if err == nil {
t.Error("expected error while locking file.")
}
}
func Example_noMergingOnExplicitPaths() {
commandLineFile, _ := ioutil.TempFile("", "")
defer os.Remove(commandLineFile.Name())
envVarFile, _ := ioutil.TempFile("", "")
defer os.Remove(envVarFile.Name())
WriteToFile(testConfigAlfa, commandLineFile.Name())
WriteToFile(testConfigConflictAlfa, envVarFile.Name())
loadingRules := ClientConfigLoadingRules{
ExplicitPath: commandLineFile.Name(),
Precedence: []string{envVarFile.Name()},
}
mergedConfig, err := loadingRules.Load()
json, err := runtime.Encode(clientcmdlatest.Codec, mergedConfig)
if err != nil {
fmt.Printf("Unexpected error: %v", err)
}
output, err := yaml.JSONToYAML(json)
if err != nil {
fmt.Printf("Unexpected error: %v", err)
}
fmt.Printf("%v", string(output))
// Output:
// apiVersion: v1
// clusters:
// - cluster:
// server: http://cow.org:8080
// name: cow-cluster
// contexts:
// - context:
// cluster: cow-cluster
// namespace: hammer-ns
// user: red-user
// name: federal-context
// current-context: ""
// kind: Config
// preferences: {}
// users:
// - name: red-user
// user:
// token: red-token
}
func Example_mergingSomeWithConflict() {
commandLineFile, _ := ioutil.TempFile("", "")
defer os.Remove(commandLineFile.Name())
envVarFile, _ := ioutil.TempFile("", "")
defer os.Remove(envVarFile.Name())
WriteToFile(testConfigAlfa, commandLineFile.Name())
WriteToFile(testConfigConflictAlfa, envVarFile.Name())
loadingRules := ClientConfigLoadingRules{
Precedence: []string{commandLineFile.Name(), envVarFile.Name()},
}
mergedConfig, err := loadingRules.Load()
json, err := runtime.Encode(clientcmdlatest.Codec, mergedConfig)
if err != nil {
fmt.Printf("Unexpected error: %v", err)
}
output, err := yaml.JSONToYAML(json)
if err != nil {
fmt.Printf("Unexpected error: %v", err)
}
fmt.Printf("%v", string(output))
// Output:
// apiVersion: v1
// clusters:
// - cluster:
// server: http://cow.org:8080
// name: cow-cluster
// - cluster:
// insecure-skip-tls-verify: true
// server: http://donkey.org:8080
// name: donkey-cluster
// contexts:
// - context:
// cluster: cow-cluster
// namespace: hammer-ns
// user: red-user
// name: federal-context
// current-context: federal-context
// kind: Config
// preferences: {}
// users:
// - name: red-user
// user:
// token: red-token
// - name: yellow-user
// user:
// token: yellow-token
}
func Example_mergingEverythingNoConflicts() {
commandLineFile, _ := ioutil.TempFile("", "")
defer os.Remove(commandLineFile.Name())
envVarFile, _ := ioutil.TempFile("", "")
defer os.Remove(envVarFile.Name())
currentDirFile, _ := ioutil.TempFile("", "")
defer os.Remove(currentDirFile.Name())
homeDirFile, _ := ioutil.TempFile("", "")
defer os.Remove(homeDirFile.Name())
WriteToFile(testConfigAlfa, commandLineFile.Name())
WriteToFile(testConfigBravo, envVarFile.Name())
WriteToFile(testConfigCharlie, currentDirFile.Name())
WriteToFile(testConfigDelta, homeDirFile.Name())
loadingRules := ClientConfigLoadingRules{
Precedence: []string{commandLineFile.Name(), envVarFile.Name(), currentDirFile.Name(), homeDirFile.Name()},
}
mergedConfig, err := loadingRules.Load()
json, err := runtime.Encode(clientcmdlatest.Codec, mergedConfig)
if err != nil {
fmt.Printf("Unexpected error: %v", err)
}
output, err := yaml.JSONToYAML(json)
if err != nil {
fmt.Printf("Unexpected error: %v", err)
}
fmt.Printf("%v", string(output))
// Output:
// apiVersion: v1
// clusters:
// - cluster:
// server: http://chicken.org:8080
// name: chicken-cluster
// - cluster:
// server: http://cow.org:8080
// name: cow-cluster
// - cluster:
// server: http://horse.org:8080
// name: horse-cluster
// - cluster:
// server: http://pig.org:8080
// name: pig-cluster
// contexts:
// - context:
// cluster: cow-cluster
// namespace: hammer-ns
// user: red-user
// name: federal-context
// - context:
// cluster: chicken-cluster
// namespace: plane-ns
// user: blue-user
// name: gothic-context
// - context:
// cluster: pig-cluster
// namespace: saw-ns
// user: black-user
// name: queen-anne-context
// - context:
// cluster: horse-cluster
// namespace: chisel-ns
// user: green-user
// name: shaker-context
// current-context: ""
// kind: Config
// preferences: {}
// users:
// - name: black-user
// user:
// token: black-token
// - name: blue-user
// user:
// token: blue-token
// - name: green-user
// user:
// token: green-token
// - name: red-user
// user:
// token: red-token
}
func TestDeduplicate(t *testing.T) {
testCases := []struct {
src []string
expect []string
}{
{
src: []string{"a", "b", "c", "d", "e", "f"},
expect: []string{"a", "b", "c", "d", "e", "f"},
},
{
src: []string{"a", "b", "c", "b", "e", "f"},
expect: []string{"a", "b", "c", "e", "f"},
},
{
src: []string{"a", "a", "b", "b", "c", "b"},
expect: []string{"a", "b", "c"},
},
}
for _, testCase := range testCases {
get := deduplicate(testCase.src)
if !reflect.DeepEqual(get, testCase.expect) {
t.Errorf("expect: %v, get: %v", testCase.expect, get)
}
}
}

View File

@ -1,328 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clientcmd
import (
"fmt"
"testing"
restclient "k8s.io/client-go/rest"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
type testLoader struct {
ClientConfigLoader
called bool
config *clientcmdapi.Config
err error
}
func (l *testLoader) Load() (*clientcmdapi.Config, error) {
l.called = true
return l.config, l.err
}
type testClientConfig struct {
config *restclient.Config
namespace string
namespaceSpecified bool
err error
}
func (c *testClientConfig) RawConfig() (clientcmdapi.Config, error) {
return clientcmdapi.Config{}, fmt.Errorf("unexpected call")
}
func (c *testClientConfig) ClientConfig() (*restclient.Config, error) {
return c.config, c.err
}
func (c *testClientConfig) Namespace() (string, bool, error) {
return c.namespace, c.namespaceSpecified, c.err
}
func (c *testClientConfig) ConfigAccess() ConfigAccess {
return nil
}
type testICC struct {
testClientConfig
possible bool
called bool
}
func (icc *testICC) Possible() bool {
icc.called = true
return icc.possible
}
func TestInClusterConfig(t *testing.T) {
default1 := &DirectClientConfig{
config: *createValidTestConfig(),
contextName: "clean",
overrides: &ConfigOverrides{},
}
invalidDefaultConfig := clientcmdapi.NewConfig()
invalidDefaultConfig.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "http://localhost:8080",
}
invalidDefaultConfig.Contexts["other"] = &clientcmdapi.Context{
Cluster: "clean",
}
invalidDefaultConfig.CurrentContext = "clean"
defaultInvalid := &DirectClientConfig{
config: *invalidDefaultConfig,
overrides: &ConfigOverrides{},
}
if _, err := defaultInvalid.ClientConfig(); err == nil || !IsConfigurationInvalid(err) {
t.Fatal(err)
}
config1, err := default1.ClientConfig()
if err != nil {
t.Fatal(err)
}
config2 := &restclient.Config{Host: "config2"}
err1 := fmt.Errorf("unique error")
testCases := map[string]struct {
clientConfig *testClientConfig
icc *testICC
defaultConfig *DirectClientConfig
checkedICC bool
result *restclient.Config
err error
}{
"in-cluster checked on other error": {
clientConfig: &testClientConfig{err: ErrEmptyConfig},
icc: &testICC{},
checkedICC: true,
result: nil,
err: ErrEmptyConfig,
},
"in-cluster not checked on non-empty error": {
clientConfig: &testClientConfig{err: ErrEmptyCluster},
icc: &testICC{},
checkedICC: false,
result: nil,
err: ErrEmptyCluster,
},
"in-cluster checked when config is default": {
defaultConfig: default1,
clientConfig: &testClientConfig{config: config1},
icc: &testICC{},
checkedICC: true,
result: config1,
err: nil,
},
"in-cluster not checked when default config is invalid": {
defaultConfig: defaultInvalid,
clientConfig: &testClientConfig{config: config1},
icc: &testICC{},
checkedICC: false,
result: config1,
err: nil,
},
"in-cluster not checked when config is not equal to default": {
defaultConfig: default1,
clientConfig: &testClientConfig{config: config2},
icc: &testICC{},
checkedICC: false,
result: config2,
err: nil,
},
"in-cluster checked when config is not equal to default and error is empty": {
clientConfig: &testClientConfig{config: config2, err: ErrEmptyConfig},
icc: &testICC{},
checkedICC: true,
result: config2,
err: ErrEmptyConfig,
},
"in-cluster error returned when config is empty": {
clientConfig: &testClientConfig{err: ErrEmptyConfig},
icc: &testICC{
possible: true,
testClientConfig: testClientConfig{
err: err1,
},
},
checkedICC: true,
result: nil,
err: err1,
},
"in-cluster config returned when config is empty": {
clientConfig: &testClientConfig{err: ErrEmptyConfig},
icc: &testICC{
possible: true,
testClientConfig: testClientConfig{
config: config2,
},
},
checkedICC: true,
result: config2,
err: nil,
},
"in-cluster not checked when standard default is invalid": {
defaultConfig: &DefaultClientConfig,
clientConfig: &testClientConfig{config: config2},
icc: &testICC{},
checkedICC: false,
result: config2,
err: nil,
},
}
for name, test := range testCases {
c := &DeferredLoadingClientConfig{icc: test.icc}
c.loader = &ClientConfigLoadingRules{DefaultClientConfig: test.defaultConfig}
c.clientConfig = test.clientConfig
cfg, err := c.ClientConfig()
if test.icc.called != test.checkedICC {
t.Errorf("%s: unexpected in-cluster-config call %t", name, test.icc.called)
}
if err != test.err || cfg != test.result {
t.Errorf("%s: unexpected result: %v %#v", name, err, cfg)
}
}
}
func TestInClusterConfigNamespace(t *testing.T) {
err1 := fmt.Errorf("unique error")
testCases := map[string]struct {
clientConfig *testClientConfig
icc *testICC
checkedICC bool
result string
ok bool
err error
}{
"in-cluster checked on empty error": {
clientConfig: &testClientConfig{err: ErrEmptyConfig},
icc: &testICC{},
checkedICC: true,
err: ErrEmptyConfig,
},
"in-cluster not checked on non-empty error": {
clientConfig: &testClientConfig{err: ErrEmptyCluster},
icc: &testICC{},
err: ErrEmptyCluster,
},
"in-cluster checked when config is default": {
clientConfig: &testClientConfig{},
icc: &testICC{},
checkedICC: true,
},
"in-cluster not checked when config is not equal to default": {
clientConfig: &testClientConfig{namespace: "test", namespaceSpecified: true},
icc: &testICC{},
result: "test",
ok: true,
},
"in-cluster checked when namespace is not specified, but is defaulted": {
clientConfig: &testClientConfig{namespace: "test", namespaceSpecified: false},
icc: &testICC{},
checkedICC: true,
result: "test",
ok: false,
},
"in-cluster error returned when config is empty": {
clientConfig: &testClientConfig{err: ErrEmptyConfig},
icc: &testICC{
possible: true,
testClientConfig: testClientConfig{
err: err1,
},
},
checkedICC: true,
err: err1,
},
"in-cluster config returned when config is empty": {
clientConfig: &testClientConfig{err: ErrEmptyConfig},
icc: &testICC{
possible: true,
testClientConfig: testClientConfig{
namespace: "test",
namespaceSpecified: true,
},
},
checkedICC: true,
result: "test",
ok: true,
},
"in-cluster config returned when config is empty and namespace is defaulted but not explicitly set": {
clientConfig: &testClientConfig{err: ErrEmptyConfig},
icc: &testICC{
possible: true,
testClientConfig: testClientConfig{
namespace: "test",
namespaceSpecified: false,
},
},
checkedICC: true,
result: "test",
ok: false,
},
}
for name, test := range testCases {
c := &DeferredLoadingClientConfig{icc: test.icc}
c.clientConfig = test.clientConfig
ns, ok, err := c.Namespace()
if test.icc.called != test.checkedICC {
t.Errorf("%s: unexpected in-cluster-config call %t", name, test.icc.called)
}
if err != test.err || ns != test.result || ok != test.ok {
t.Errorf("%s: unexpected result: %v %s %t", name, err, ns, ok)
}
}
}

View File

@ -1,50 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clientcmd
import (
"testing"
"github.com/spf13/pflag"
)
func TestNamespacePrefixStrip(t *testing.T) {
testData := map[string]string{
"namespaces/foo": "foo",
"NAMESPACES/foo": "foo",
"NameSpaces/foo": "foo",
"namespace/foo": "foo",
"NAMESPACE/foo": "foo",
"nameSpace/foo": "foo",
"ns/foo": "foo",
"NS/foo": "foo",
"namespaces/": "namespaces/",
"namespace/": "namespace/",
"ns/": "ns/",
}
for before, after := range testData {
overrides := &ConfigOverrides{}
fs := &pflag.FlagSet{}
BindOverrideFlags(overrides, fs, RecommendedConfigOverrideFlags(""))
fs.Parse([]string{"--namespace", before})
if overrides.Context.Namespace != after {
t.Fatalf("Expected %s, got %s", after, overrides.Context.Namespace)
}
}
}

View File

@ -1,574 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clientcmd
import (
"io/ioutil"
"os"
"strings"
"testing"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
func TestConfirmUsableBadInfoButOkConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
config.Clusters["missing ca"] = &clientcmdapi.Cluster{
Server: "anything",
CertificateAuthority: "missing",
}
config.AuthInfos["error"] = &clientcmdapi.AuthInfo{
Username: "anything",
Token: "here",
}
config.Contexts["dirty"] = &clientcmdapi.Context{
Cluster: "missing ca",
AuthInfo: "error",
}
config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "anything",
}
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
Token: "here",
}
config.Contexts["clean"] = &clientcmdapi.Context{
Cluster: "clean",
AuthInfo: "clean",
}
badValidation := configValidationTest{
config: config,
expectedErrorSubstring: []string{"unable to read certificate-authority"},
}
okTest := configValidationTest{
config: config,
}
okTest.testConfirmUsable("clean", t)
badValidation.testConfig(t)
}
func TestConfirmUsableBadInfoConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
config.Clusters["missing ca"] = &clientcmdapi.Cluster{
Server: "anything",
CertificateAuthority: "missing",
}
config.AuthInfos["error"] = &clientcmdapi.AuthInfo{
Username: "anything",
Token: "here",
}
config.Contexts["first"] = &clientcmdapi.Context{
Cluster: "missing ca",
AuthInfo: "error",
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"unable to read certificate-authority"},
}
test.testConfirmUsable("first", t)
}
func TestConfirmUsableEmptyConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"invalid configuration: no configuration has been provided"},
}
test.testConfirmUsable("", t)
}
func TestConfirmUsableMissingConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"invalid configuration: no configuration has been provided"},
}
test.testConfirmUsable("not-here", t)
}
func TestValidateEmptyConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"invalid configuration: no configuration has been provided"},
}
test.testConfig(t)
}
func TestValidateMissingCurrentContextConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
config.CurrentContext = "anything"
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"context was not found for specified "},
}
test.testConfig(t)
}
func TestIsContextNotFound(t *testing.T) {
config := clientcmdapi.NewConfig()
config.CurrentContext = "anything"
err := Validate(*config)
if !IsContextNotFound(err) {
t.Errorf("Expected context not found, but got %v", err)
}
if !IsConfigurationInvalid(err) {
t.Errorf("Expected configuration invalid, but got %v", err)
}
}
func TestIsEmptyConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
err := Validate(*config)
if !IsEmptyConfig(err) {
t.Errorf("Expected context not found, but got %v", err)
}
if !IsConfigurationInvalid(err) {
t.Errorf("Expected configuration invalid, but got %v", err)
}
}
func TestIsConfigurationInvalid(t *testing.T) {
if newErrConfigurationInvalid([]error{}) != nil {
t.Errorf("unexpected error")
}
if newErrConfigurationInvalid([]error{ErrNoContext}) == ErrNoContext {
t.Errorf("unexpected error")
}
if newErrConfigurationInvalid([]error{ErrNoContext, ErrNoContext}) == nil {
t.Errorf("unexpected error")
}
if !IsConfigurationInvalid(newErrConfigurationInvalid([]error{ErrNoContext, ErrNoContext})) {
t.Errorf("unexpected error")
}
}
func TestValidateMissingReferencesConfig(t *testing.T) {
config := clientcmdapi.NewConfig()
config.CurrentContext = "anything"
config.Contexts["anything"] = &clientcmdapi.Context{Cluster: "missing", AuthInfo: "missing"}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"user \"missing\" was not found for context \"anything\"", "cluster \"missing\" was not found for context \"anything\""},
}
test.testContext("anything", t)
test.testConfig(t)
}
func TestValidateEmptyContext(t *testing.T) {
config := clientcmdapi.NewConfig()
config.CurrentContext = "anything"
config.Contexts["anything"] = &clientcmdapi.Context{}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"user was not specified for context \"anything\"", "cluster was not specified for context \"anything\""},
}
test.testContext("anything", t)
test.testConfig(t)
}
func TestValidateEmptyContextName(t *testing.T) {
config := clientcmdapi.NewConfig()
config.CurrentContext = "anything"
config.Contexts[""] = &clientcmdapi.Context{Cluster: "missing", AuthInfo: "missing"}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"empty context name", "is not allowed"},
}
test.testContext("", t)
test.testConfig(t)
}
func TestValidateEmptyClusterInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.Clusters["empty"] = clientcmdapi.NewCluster()
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"cluster has no server defined"},
}
test.testCluster("empty", t)
test.testConfig(t)
}
func TestValidateClusterInfoErrEmptyCluster(t *testing.T) {
cluster := clientcmdapi.NewCluster()
errs := validateClusterInfo("", *cluster)
if len(errs) != 1 {
t.Fatalf("unexpected errors: %v", errs)
}
if errs[0] != ErrEmptyCluster {
t.Errorf("unexpected error: %v", errs[0])
}
}
func TestValidateMissingCAFileClusterInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.Clusters["missing ca"] = &clientcmdapi.Cluster{
Server: "anything",
CertificateAuthority: "missing",
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"unable to read certificate-authority"},
}
test.testCluster("missing ca", t)
test.testConfig(t)
}
func TestValidateCleanClusterInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "anything",
}
test := configValidationTest{
config: config,
}
test.testCluster("clean", t)
test.testConfig(t)
}
func TestValidateCleanWithCAClusterInfo(t *testing.T) {
tempFile, _ := ioutil.TempFile("", "")
defer os.Remove(tempFile.Name())
config := clientcmdapi.NewConfig()
config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "anything",
CertificateAuthority: tempFile.Name(),
}
test := configValidationTest{
config: config,
}
test.testCluster("clean", t)
test.testConfig(t)
}
func TestValidateEmptyAuthInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["error"] = &clientcmdapi.AuthInfo{}
test := configValidationTest{
config: config,
}
test.testAuthInfo("error", t)
test.testConfig(t)
}
func TestValidateCertFilesNotFoundAuthInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["error"] = &clientcmdapi.AuthInfo{
ClientCertificate: "missing",
ClientKey: "missing",
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"unable to read client-cert", "unable to read client-key"},
}
test.testAuthInfo("error", t)
test.testConfig(t)
}
func TestValidateCertDataOverridesFiles(t *testing.T) {
tempFile, _ := ioutil.TempFile("", "")
defer os.Remove(tempFile.Name())
config := clientcmdapi.NewConfig()
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
ClientCertificate: tempFile.Name(),
ClientCertificateData: []byte("certdata"),
ClientKey: tempFile.Name(),
ClientKeyData: []byte("keydata"),
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"client-cert-data and client-cert are both specified", "client-key-data and client-key are both specified"},
}
test.testAuthInfo("clean", t)
test.testConfig(t)
}
func TestValidateCleanCertFilesAuthInfo(t *testing.T) {
tempFile, _ := ioutil.TempFile("", "")
defer os.Remove(tempFile.Name())
config := clientcmdapi.NewConfig()
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
ClientCertificate: tempFile.Name(),
ClientKey: tempFile.Name(),
}
test := configValidationTest{
config: config,
}
test.testAuthInfo("clean", t)
test.testConfig(t)
}
func TestValidateCleanTokenAuthInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
Token: "any-value",
}
test := configValidationTest{
config: config,
}
test.testAuthInfo("clean", t)
test.testConfig(t)
}
func TestValidateMultipleMethodsAuthInfo(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["error"] = &clientcmdapi.AuthInfo{
Token: "token",
Username: "username",
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{"more than one authentication method", "token", "basicAuth"},
}
test.testAuthInfo("error", t)
test.testConfig(t)
}
func TestValidateAuthInfoExec(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["user"] = &clientcmdapi.AuthInfo{
Exec: &clientcmdapi.ExecConfig{
Command: "/bin/example",
APIVersion: "clientauthentication.k8s.io/v1alpha1",
Args: []string{"hello", "world"},
Env: []clientcmdapi.ExecEnvVar{
{Name: "foo", Value: "bar"},
},
},
}
test := configValidationTest{
config: config,
}
test.testAuthInfo("user", t)
test.testConfig(t)
}
func TestValidateAuthInfoExecNoVersion(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["user"] = &clientcmdapi.AuthInfo{
Exec: &clientcmdapi.ExecConfig{
Command: "/bin/example",
},
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{
"apiVersion must be specified for user to use exec authentication plugin",
},
}
test.testAuthInfo("user", t)
test.testConfig(t)
}
func TestValidateAuthInfoExecNoCommand(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["user"] = &clientcmdapi.AuthInfo{
Exec: &clientcmdapi.ExecConfig{
APIVersion: "clientauthentication.k8s.io/v1alpha1",
},
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{
"command must be specified for user to use exec authentication plugin",
},
}
test.testAuthInfo("user", t)
test.testConfig(t)
}
func TestValidateAuthInfoExecWithAuthProvider(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["user"] = &clientcmdapi.AuthInfo{
AuthProvider: &clientcmdapi.AuthProviderConfig{
Name: "oidc",
},
Exec: &clientcmdapi.ExecConfig{
Command: "/bin/example",
APIVersion: "clientauthentication.k8s.io/v1alpha1",
},
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{
"authProvider cannot be provided in combination with an exec plugin for user",
},
}
test.testAuthInfo("user", t)
test.testConfig(t)
}
func TestValidateAuthInfoExecInvalidEnv(t *testing.T) {
config := clientcmdapi.NewConfig()
config.AuthInfos["user"] = &clientcmdapi.AuthInfo{
Exec: &clientcmdapi.ExecConfig{
Command: "/bin/example",
APIVersion: "clientauthentication.k8s.io/v1alpha1",
Env: []clientcmdapi.ExecEnvVar{
{Name: "foo"}, // No value
},
},
}
test := configValidationTest{
config: config,
expectedErrorSubstring: []string{
"env variable foo value must be specified for user to use exec authentication plugin",
},
}
test.testAuthInfo("user", t)
test.testConfig(t)
}
type configValidationTest struct {
config *clientcmdapi.Config
expectedErrorSubstring []string
}
func (c configValidationTest) testContext(contextName string, t *testing.T) {
errs := validateContext(contextName, *c.config.Contexts[contextName], *c.config)
if len(c.expectedErrorSubstring) != 0 {
if len(errs) == 0 {
t.Errorf("Expected error containing: %v", c.expectedErrorSubstring)
}
for _, curr := range c.expectedErrorSubstring {
if len(errs) != 0 && !strings.Contains(utilerrors.NewAggregate(errs).Error(), curr) {
t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, utilerrors.NewAggregate(errs))
}
}
} else {
if len(errs) != 0 {
t.Errorf("Unexpected error: %v", utilerrors.NewAggregate(errs))
}
}
}
func (c configValidationTest) testConfirmUsable(contextName string, t *testing.T) {
err := ConfirmUsable(*c.config, contextName)
if len(c.expectedErrorSubstring) != 0 {
if err == nil {
t.Errorf("Expected error containing: %v", c.expectedErrorSubstring)
} else {
for _, curr := range c.expectedErrorSubstring {
if err != nil && !strings.Contains(err.Error(), curr) {
t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, err)
}
}
}
} else {
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
}
}
func (c configValidationTest) testConfig(t *testing.T) {
err := Validate(*c.config)
if len(c.expectedErrorSubstring) != 0 {
if err == nil {
t.Errorf("Expected error containing: %v", c.expectedErrorSubstring)
} else {
for _, curr := range c.expectedErrorSubstring {
if err != nil && !strings.Contains(err.Error(), curr) {
t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, err)
}
}
if !IsConfigurationInvalid(err) {
t.Errorf("all errors should be configuration invalid: %v", err)
}
}
} else {
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
}
}
func (c configValidationTest) testCluster(clusterName string, t *testing.T) {
errs := validateClusterInfo(clusterName, *c.config.Clusters[clusterName])
if len(c.expectedErrorSubstring) != 0 {
if len(errs) == 0 {
t.Errorf("Expected error containing: %v", c.expectedErrorSubstring)
}
for _, curr := range c.expectedErrorSubstring {
if len(errs) != 0 && !strings.Contains(utilerrors.NewAggregate(errs).Error(), curr) {
t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, utilerrors.NewAggregate(errs))
}
}
} else {
if len(errs) != 0 {
t.Errorf("Unexpected error: %v", utilerrors.NewAggregate(errs))
}
}
}
func (c configValidationTest) testAuthInfo(authInfoName string, t *testing.T) {
errs := validateAuthInfo(authInfoName, *c.config.AuthInfos[authInfoName])
if len(c.expectedErrorSubstring) != 0 {
if len(errs) == 0 {
t.Errorf("Expected error containing: %v", c.expectedErrorSubstring)
}
for _, curr := range c.expectedErrorSubstring {
if len(errs) != 0 && !strings.Contains(utilerrors.NewAggregate(errs).Error(), curr) {
t.Errorf("Expected error containing: %v, but got %v", c.expectedErrorSubstring, utilerrors.NewAggregate(errs))
}
}
} else {
if len(errs) != 0 {
t.Errorf("Unexpected error: %v", utilerrors.NewAggregate(errs))
}
}
}

View File

@ -1,13 +0,0 @@
approvers:
- mikedanese
- timothysc
reviewers:
- wojtek-t
- deads2k
- mikedanese
- gmarek
- eparis
- timothysc
- ingvagabund
- resouer
- goltermann

View File

@ -1,69 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"net/http"
"sync"
"time"
)
// HealthzAdaptor associates the /healthz endpoint with the LeaderElection object.
// It helps deal with the /healthz endpoint being set up prior to the LeaderElection.
// This contains the code needed to act as an adaptor between the leader
// election code the health check code. It allows us to provide health
// status about the leader election. Most specifically about if the leader
// has failed to renew without exiting the process. In that case we should
// report not healthy and rely on the kubelet to take down the process.
type HealthzAdaptor struct {
pointerLock sync.Mutex
le *LeaderElector
timeout time.Duration
}
// Name returns the name of the health check we are implementing.
func (l *HealthzAdaptor) Name() string {
return "leaderElection"
}
// Check is called by the healthz endpoint handler.
// It fails (returns an error) if we own the lease but had not been able to renew it.
func (l *HealthzAdaptor) Check(req *http.Request) error {
l.pointerLock.Lock()
defer l.pointerLock.Unlock()
if l.le == nil {
return nil
}
return l.le.Check(l.timeout)
}
// SetLeaderElection ties a leader election object to a HealthzAdaptor
func (l *HealthzAdaptor) SetLeaderElection(le *LeaderElector) {
l.pointerLock.Lock()
defer l.pointerLock.Unlock()
l.le = le
}
// NewLeaderHealthzAdaptor creates a basic healthz adaptor to monitor a leader election.
// timeout determines the time beyond the lease expiry to be allowed for timeout.
// checks within the timeout period after the lease expires will still return healthy.
func NewLeaderHealthzAdaptor(timeout time.Duration) *HealthzAdaptor {
result := &HealthzAdaptor{
timeout: timeout,
}
return result
}

View File

@ -1,175 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"fmt"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/clock"
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
"net/http"
)
type fakeLock struct {
identity string
}
// Get is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) Get() (ler *rl.LeaderElectionRecord, err error) {
return nil, nil
}
// Create is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) Create(ler rl.LeaderElectionRecord) error {
return nil
}
// Update is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) Update(ler rl.LeaderElectionRecord) error {
return nil
}
// RecordEvent is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) RecordEvent(string) {}
// Identity is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) Identity() string {
return fl.identity
}
// Describe is a dummy to allow us to have a fakeLock for testing.
func (fl *fakeLock) Describe() string {
return "Dummy implementation of lock for testing"
}
// TestLeaderElectionHealthChecker tests that the healthcheck for leader election handles its edge cases.
func TestLeaderElectionHealthChecker(t *testing.T) {
current := time.Now()
req := &http.Request{}
tests := []struct {
description string
expected error
adaptorTimeout time.Duration
elector *LeaderElector
}{
{
description: "call check before leader elector initialized",
expected: nil,
adaptorTimeout: time.Second * 20,
elector: nil,
},
{
description: "call check when the the lease is far expired",
expected: fmt.Errorf("failed election to renew leadership on lease %s", "foo"),
adaptorTimeout: time.Second * 20,
elector: &LeaderElector{
config: LeaderElectionConfig{
Lock: &fakeLock{identity: "healthTest"},
LeaseDuration: time.Minute,
Name: "foo",
},
observedRecord: rl.LeaderElectionRecord{
HolderIdentity: "healthTest",
},
observedTime: current,
clock: clock.NewFakeClock(current.Add(time.Hour)),
},
},
{
description: "call check when the the lease is far expired but held by another server",
expected: nil,
adaptorTimeout: time.Second * 20,
elector: &LeaderElector{
config: LeaderElectionConfig{
Lock: &fakeLock{identity: "healthTest"},
LeaseDuration: time.Minute,
Name: "foo",
},
observedRecord: rl.LeaderElectionRecord{
HolderIdentity: "otherServer",
},
observedTime: current,
clock: clock.NewFakeClock(current.Add(time.Hour)),
},
},
{
description: "call check when the the lease is not expired",
expected: nil,
adaptorTimeout: time.Second * 20,
elector: &LeaderElector{
config: LeaderElectionConfig{
Lock: &fakeLock{identity: "healthTest"},
LeaseDuration: time.Minute,
Name: "foo",
},
observedRecord: rl.LeaderElectionRecord{
HolderIdentity: "healthTest",
},
observedTime: current,
clock: clock.NewFakeClock(current),
},
},
{
description: "call check when the the lease is expired but inside the timeout",
expected: nil,
adaptorTimeout: time.Second * 20,
elector: &LeaderElector{
config: LeaderElectionConfig{
Lock: &fakeLock{identity: "healthTest"},
LeaseDuration: time.Minute,
Name: "foo",
},
observedRecord: rl.LeaderElectionRecord{
HolderIdentity: "healthTest",
},
observedTime: current,
clock: clock.NewFakeClock(current.Add(time.Minute).Add(time.Second)),
},
},
}
for _, test := range tests {
adaptor := NewLeaderHealthzAdaptor(test.adaptorTimeout)
if adaptor.le != nil {
t.Errorf("[%s] leaderChecker started with a LeaderElector %v", test.description, adaptor.le)
}
if test.elector != nil {
test.elector.config.WatchDog = adaptor
adaptor.SetLeaderElection(test.elector)
if adaptor.le == nil {
t.Errorf("[%s] adaptor failed to set the LeaderElector", test.description)
}
}
err := adaptor.Check(req)
if test.expected == nil {
if err == nil {
continue
}
t.Errorf("[%s] called check, expected no error but received \"%v\"", test.description, err)
} else {
if err == nil {
t.Errorf("[%s] called check and failed to received the expected error \"%v\"", test.description, test.expected)
}
if err.Error() != test.expected.Error() {
t.Errorf("[%s] called check, expected %v, received %v", test.description, test.expected, err)
}
}
}
}

View File

@ -1,336 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package leaderelection implements leader election of a set of endpoints.
// It uses an annotation in the endpoints object to store the record of the
// election state.
//
// This implementation does not guarantee that only one client is acting as a
// leader (a.k.a. fencing). A client observes timestamps captured locally to
// infer the state of the leader election. Thus the implementation is tolerant
// to arbitrary clock skew, but is not tolerant to arbitrary clock skew rate.
//
// However the level of tolerance to skew rate can be configured by setting
// RenewDeadline and LeaseDuration appropriately. The tolerance expressed as a
// maximum tolerated ratio of time passed on the fastest node to time passed on
// the slowest node can be approximately achieved with a configuration that sets
// the same ratio of LeaseDuration to RenewDeadline. For example if a user wanted
// to tolerate some nodes progressing forward in time twice as fast as other nodes,
// the user could set LeaseDuration to 60 seconds and RenewDeadline to 30 seconds.
//
// While not required, some method of clock synchronization between nodes in the
// cluster is highly recommended. It's important to keep in mind when configuring
// this client that the tolerance to skew rate varies inversely to master
// availability.
//
// Larger clusters often have a more lenient SLA for API latency. This should be
// taken into account when configuring the client. The rate of leader transitions
// should be monitored and RetryPeriod and LeaseDuration should be increased
// until the rate is stable and acceptably low. It's important to keep in mind
// when configuring this client that the tolerance to API latency varies inversely
// to master availability.
//
// DISCLAIMER: this is an alpha API. This library will likely change significantly
// or even be removed entirely in subsequent releases. Depend on this API at
// your own risk.
package leaderelection
import (
"context"
"fmt"
"reflect"
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/klog"
)
const (
JitterFactor = 1.2
)
// NewLeaderElector creates a LeaderElector from a LeaderElectionConfig
func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) {
if lec.LeaseDuration <= lec.RenewDeadline {
return nil, fmt.Errorf("leaseDuration must be greater than renewDeadline")
}
if lec.RenewDeadline <= time.Duration(JitterFactor*float64(lec.RetryPeriod)) {
return nil, fmt.Errorf("renewDeadline must be greater than retryPeriod*JitterFactor")
}
if lec.LeaseDuration < 1 {
return nil, fmt.Errorf("leaseDuration must be greater than zero")
}
if lec.RenewDeadline < 1 {
return nil, fmt.Errorf("renewDeadline must be greater than zero")
}
if lec.RetryPeriod < 1 {
return nil, fmt.Errorf("retryPeriod must be greater than zero")
}
if lec.Lock == nil {
return nil, fmt.Errorf("Lock must not be nil.")
}
return &LeaderElector{
config: lec,
clock: clock.RealClock{},
}, nil
}
type LeaderElectionConfig struct {
// Lock is the resource that will be used for locking
Lock rl.Interface
// LeaseDuration is the duration that non-leader candidates will
// wait to force acquire leadership. This is measured against time of
// last observed ack.
LeaseDuration time.Duration
// RenewDeadline is the duration that the acting master will retry
// refreshing leadership before giving up.
RenewDeadline time.Duration
// RetryPeriod is the duration the LeaderElector clients should wait
// between tries of actions.
RetryPeriod time.Duration
// Callbacks are callbacks that are triggered during certain lifecycle
// events of the LeaderElector
Callbacks LeaderCallbacks
// WatchDog is the associated health checker
// WatchDog may be null if its not needed/configured.
WatchDog *HealthzAdaptor
// Name is the name of the resource lock for debugging
Name string
}
// LeaderCallbacks are callbacks that are triggered during certain
// lifecycle events of the LeaderElector. These are invoked asynchronously.
//
// possible future callbacks:
// * OnChallenge()
type LeaderCallbacks struct {
// OnStartedLeading is called when a LeaderElector client starts leading
OnStartedLeading func(context.Context)
// OnStoppedLeading is called when a LeaderElector client stops leading
OnStoppedLeading func()
// OnNewLeader is called when the client observes a leader that is
// not the previously observed leader. This includes the first observed
// leader when the client starts.
OnNewLeader func(identity string)
}
// LeaderElector is a leader election client.
type LeaderElector struct {
config LeaderElectionConfig
// internal bookkeeping
observedRecord rl.LeaderElectionRecord
observedTime time.Time
// used to implement OnNewLeader(), may lag slightly from the
// value observedRecord.HolderIdentity if the transition has
// not yet been reported.
reportedLeader string
// clock is wrapper around time to allow for less flaky testing
clock clock.Clock
// name is the name of the resource lock for debugging
name string
}
// Run starts the leader election loop
func (le *LeaderElector) Run(ctx context.Context) {
defer func() {
runtime.HandleCrash()
le.config.Callbacks.OnStoppedLeading()
}()
if !le.acquire(ctx) {
return // ctx signalled done
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go le.config.Callbacks.OnStartedLeading(ctx)
le.renew(ctx)
}
// RunOrDie starts a client with the provided config or panics if the config
// fails to validate.
func RunOrDie(ctx context.Context, lec LeaderElectionConfig) {
le, err := NewLeaderElector(lec)
if err != nil {
panic(err)
}
if lec.WatchDog != nil {
lec.WatchDog.SetLeaderElection(le)
}
le.Run(ctx)
}
// GetLeader returns the identity of the last observed leader or returns the empty string if
// no leader has yet been observed.
func (le *LeaderElector) GetLeader() string {
return le.observedRecord.HolderIdentity
}
// IsLeader returns true if the last observed leader was this client else returns false.
func (le *LeaderElector) IsLeader() bool {
return le.observedRecord.HolderIdentity == le.config.Lock.Identity()
}
// acquire loops calling tryAcquireOrRenew and returns true immediately when tryAcquireOrRenew succeeds.
// Returns false if ctx signals done.
func (le *LeaderElector) acquire(ctx context.Context) bool {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
succeeded := false
desc := le.config.Lock.Describe()
klog.Infof("attempting to acquire leader lease %v...", desc)
wait.JitterUntil(func() {
succeeded = le.tryAcquireOrRenew()
le.maybeReportTransition()
if !succeeded {
klog.V(4).Infof("failed to acquire lease %v", desc)
return
}
le.config.Lock.RecordEvent("became leader")
klog.Infof("successfully acquired lease %v", desc)
cancel()
}, le.config.RetryPeriod, JitterFactor, true, ctx.Done())
return succeeded
}
// renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails or ctx signals done.
func (le *LeaderElector) renew(ctx context.Context) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
wait.Until(func() {
timeoutCtx, timeoutCancel := context.WithTimeout(ctx, le.config.RenewDeadline)
defer timeoutCancel()
err := wait.PollImmediateUntil(le.config.RetryPeriod, func() (bool, error) {
done := make(chan bool, 1)
go func() {
defer close(done)
done <- le.tryAcquireOrRenew()
}()
select {
case <-timeoutCtx.Done():
return false, fmt.Errorf("failed to tryAcquireOrRenew %s", timeoutCtx.Err())
case result := <-done:
return result, nil
}
}, timeoutCtx.Done())
le.maybeReportTransition()
desc := le.config.Lock.Describe()
if err == nil {
klog.V(5).Infof("successfully renewed lease %v", desc)
return
}
le.config.Lock.RecordEvent("stopped leading")
klog.Infof("failed to renew lease %v: %v", desc, err)
cancel()
}, le.config.RetryPeriod, ctx.Done())
}
// tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired,
// else it tries to renew the lease if it has already been acquired. Returns true
// on success else returns false.
func (le *LeaderElector) tryAcquireOrRenew() bool {
now := metav1.Now()
leaderElectionRecord := rl.LeaderElectionRecord{
HolderIdentity: le.config.Lock.Identity(),
LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second),
RenewTime: now,
AcquireTime: now,
}
// 1. obtain or create the ElectionRecord
oldLeaderElectionRecord, err := le.config.Lock.Get()
if err != nil {
if !errors.IsNotFound(err) {
klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err)
return false
}
if err = le.config.Lock.Create(leaderElectionRecord); err != nil {
klog.Errorf("error initially creating leader election record: %v", err)
return false
}
le.observedRecord = leaderElectionRecord
le.observedTime = le.clock.Now()
return true
}
// 2. Record obtained, check the Identity & Time
if !reflect.DeepEqual(le.observedRecord, *oldLeaderElectionRecord) {
le.observedRecord = *oldLeaderElectionRecord
le.observedTime = le.clock.Now()
}
if le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
!le.IsLeader() {
klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)
return false
}
// 3. We're going to try to update. The leaderElectionRecord is set to it's default
// here. Let's correct it before updating.
if le.IsLeader() {
leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime
leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions
} else {
leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1
}
// update the lock itself
if err = le.config.Lock.Update(leaderElectionRecord); err != nil {
klog.Errorf("Failed to update lock: %v", err)
return false
}
le.observedRecord = leaderElectionRecord
le.observedTime = le.clock.Now()
return true
}
func (le *LeaderElector) maybeReportTransition() {
if le.observedRecord.HolderIdentity == le.reportedLeader {
return
}
le.reportedLeader = le.observedRecord.HolderIdentity
if le.config.Callbacks.OnNewLeader != nil {
go le.config.Callbacks.OnNewLeader(le.reportedLeader)
}
}
// Check will determine if the current lease is expired by more than timeout.
func (le *LeaderElector) Check(maxTolerableExpiredLease time.Duration) error {
if !le.IsLeader() {
// Currently not concerned with the case that we are hot standby
return nil
}
// If we are more than timeout seconds after the lease duration that is past the timeout
// on the lease renew. Time to start reporting ourselves as unhealthy. We should have
// died but conditions like deadlock can prevent this. (See #70819)
if le.clock.Since(le.observedTime) > le.config.LeaseDuration+maxTolerableExpiredLease {
return fmt.Errorf("failed election to renew leadership on lease %s", le.config.Name)
}
return nil
}

View File

@ -1,294 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"fmt"
"sync"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
fakecorev1 "k8s.io/client-go/kubernetes/typed/core/v1/fake"
core "k8s.io/client-go/testing"
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
)
func createLockObject(objectType string, objectMeta metav1.ObjectMeta) (obj runtime.Object) {
switch objectType {
case "endpoints":
obj = &v1.Endpoints{ObjectMeta: objectMeta}
case "configmaps":
obj = &v1.ConfigMap{ObjectMeta: objectMeta}
default:
panic("unexpected objType:" + objectType)
}
return
}
// Will test leader election using endpoints as the resource
func TestTryAcquireOrRenewEndpoints(t *testing.T) {
testTryAcquireOrRenew(t, "endpoints")
}
func testTryAcquireOrRenew(t *testing.T, objectType string) {
future := time.Now().Add(1000 * time.Hour)
past := time.Now().Add(-1000 * time.Hour)
tests := []struct {
observedRecord rl.LeaderElectionRecord
observedTime time.Time
reactors []struct {
verb string
reaction core.ReactionFunc
}
expectSuccess bool
transitionLeader bool
outHolder string
}{
// acquire from no object
{
reactors: []struct {
verb string
reaction core.ReactionFunc
}{
{
verb: "get",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.NewNotFound(action.(core.GetAction).GetResource().GroupResource(), action.(core.GetAction).GetName())
},
},
{
verb: "create",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(core.CreateAction).GetObject(), nil
},
},
},
expectSuccess: true,
outHolder: "baz",
},
// acquire from unled object
{
reactors: []struct {
verb string
reaction core.ReactionFunc
}{
{
verb: "get",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
objectMeta := metav1.ObjectMeta{
Namespace: action.GetNamespace(),
Name: action.(core.GetAction).GetName(),
}
return true, createLockObject(objectType, objectMeta), nil
},
},
{
verb: "update",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(core.CreateAction).GetObject(), nil
},
},
},
expectSuccess: true,
transitionLeader: true,
outHolder: "baz",
},
// acquire from led, unacked object
{
reactors: []struct {
verb string
reaction core.ReactionFunc
}{
{
verb: "get",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
objectMeta := metav1.ObjectMeta{
Namespace: action.GetNamespace(),
Name: action.(core.GetAction).GetName(),
Annotations: map[string]string{
rl.LeaderElectionRecordAnnotationKey: `{"holderIdentity":"bing"}`,
},
}
return true, createLockObject(objectType, objectMeta), nil
},
},
{
verb: "update",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(core.CreateAction).GetObject(), nil
},
},
},
observedRecord: rl.LeaderElectionRecord{HolderIdentity: "bing"},
observedTime: past,
expectSuccess: true,
transitionLeader: true,
outHolder: "baz",
},
// don't acquire from led, acked object
{
reactors: []struct {
verb string
reaction core.ReactionFunc
}{
{
verb: "get",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
objectMeta := metav1.ObjectMeta{
Namespace: action.GetNamespace(),
Name: action.(core.GetAction).GetName(),
Annotations: map[string]string{
rl.LeaderElectionRecordAnnotationKey: `{"holderIdentity":"bing"}`,
},
}
return true, createLockObject(objectType, objectMeta), nil
},
},
},
observedTime: future,
expectSuccess: false,
outHolder: "bing",
},
// renew already acquired object
{
reactors: []struct {
verb string
reaction core.ReactionFunc
}{
{
verb: "get",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
objectMeta := metav1.ObjectMeta{
Namespace: action.GetNamespace(),
Name: action.(core.GetAction).GetName(),
Annotations: map[string]string{
rl.LeaderElectionRecordAnnotationKey: `{"holderIdentity":"baz"}`,
},
}
return true, createLockObject(objectType, objectMeta), nil
},
},
{
verb: "update",
reaction: func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(core.CreateAction).GetObject(), nil
},
},
},
observedTime: future,
observedRecord: rl.LeaderElectionRecord{HolderIdentity: "baz"},
expectSuccess: true,
outHolder: "baz",
},
}
for i, test := range tests {
// OnNewLeader is called async so we have to wait for it.
var wg sync.WaitGroup
wg.Add(1)
var reportedLeader string
var lock rl.Interface
objectMeta := metav1.ObjectMeta{Namespace: "foo", Name: "bar"}
resourceLockConfig := rl.ResourceLockConfig{
Identity: "baz",
EventRecorder: &record.FakeRecorder{},
}
c := &fakecorev1.FakeCoreV1{Fake: &core.Fake{}}
for _, reactor := range test.reactors {
c.AddReactor(reactor.verb, objectType, reactor.reaction)
}
c.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
t.Errorf("[%v] unreachable action. testclient called too many times: %+v", i, action)
return true, nil, fmt.Errorf("unreachable action")
})
switch objectType {
case "endpoints":
lock = &rl.EndpointsLock{
EndpointsMeta: objectMeta,
LockConfig: resourceLockConfig,
Client: c,
}
case "configmaps":
lock = &rl.ConfigMapLock{
ConfigMapMeta: objectMeta,
LockConfig: resourceLockConfig,
Client: c,
}
}
lec := LeaderElectionConfig{
Lock: lock,
LeaseDuration: 10 * time.Second,
Callbacks: LeaderCallbacks{
OnNewLeader: func(l string) {
defer wg.Done()
reportedLeader = l
},
},
}
le := &LeaderElector{
config: lec,
observedRecord: test.observedRecord,
observedTime: test.observedTime,
clock: clock.RealClock{},
}
if test.expectSuccess != le.tryAcquireOrRenew() {
t.Errorf("[%v]unexpected result of tryAcquireOrRenew: [succeeded=%v]", i, !test.expectSuccess)
}
le.observedRecord.AcquireTime = metav1.Time{}
le.observedRecord.RenewTime = metav1.Time{}
if le.observedRecord.HolderIdentity != test.outHolder {
t.Errorf("[%v]expected holder:\n\t%+v\ngot:\n\t%+v", i, test.outHolder, le.observedRecord.HolderIdentity)
}
if len(test.reactors) != len(c.Actions()) {
t.Errorf("[%v]wrong number of api interactions", i)
}
if test.transitionLeader && le.observedRecord.LeaderTransitions != 1 {
t.Errorf("[%v]leader should have transitioned but did not", i)
}
if !test.transitionLeader && le.observedRecord.LeaderTransitions != 0 {
t.Errorf("[%v]leader should not have transitioned but did", i)
}
le.maybeReportTransition()
wg.Wait()
if reportedLeader != test.outHolder {
t.Errorf("[%v]reported leader was not the new leader. expected %q, got %q", i, test.outHolder, reportedLeader)
}
}
}
// Will test leader election using configmap as the resource
func TestTryAcquireOrRenewConfigMaps(t *testing.T) {
testTryAcquireOrRenew(t, "configmaps")
}

View File

@ -1,109 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcelock
import (
"encoding/json"
"errors"
"fmt"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
)
// TODO: This is almost a exact replica of Endpoints lock.
// going forwards as we self host more and more components
// and use ConfigMaps as the means to pass that configuration
// data we will likely move to deprecate the Endpoints lock.
type ConfigMapLock struct {
// ConfigMapMeta should contain a Name and a Namespace of a
// ConfigMapMeta object that the LeaderElector will attempt to lead.
ConfigMapMeta metav1.ObjectMeta
Client corev1client.ConfigMapsGetter
LockConfig ResourceLockConfig
cm *v1.ConfigMap
}
// Get returns the election record from a ConfigMap Annotation
func (cml *ConfigMapLock) Get() (*LeaderElectionRecord, error) {
var record LeaderElectionRecord
var err error
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(cml.ConfigMapMeta.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if cml.cm.Annotations == nil {
cml.cm.Annotations = make(map[string]string)
}
if recordBytes, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey]; found {
if err := json.Unmarshal([]byte(recordBytes), &record); err != nil {
return nil, err
}
}
return &record, nil
}
// Create attempts to create a LeaderElectionRecord annotation
func (cml *ConfigMapLock) Create(ler LeaderElectionRecord) error {
recordBytes, err := json.Marshal(ler)
if err != nil {
return err
}
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Create(&v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: cml.ConfigMapMeta.Name,
Namespace: cml.ConfigMapMeta.Namespace,
Annotations: map[string]string{
LeaderElectionRecordAnnotationKey: string(recordBytes),
},
},
})
return err
}
// Update will update an existing annotation on a given resource.
func (cml *ConfigMapLock) Update(ler LeaderElectionRecord) error {
if cml.cm == nil {
return errors.New("configmap not initialized, call get or create first")
}
recordBytes, err := json.Marshal(ler)
if err != nil {
return err
}
cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(cml.cm)
return err
}
// RecordEvent in leader election while adding meta-data
func (cml *ConfigMapLock) RecordEvent(s string) {
events := fmt.Sprintf("%v %v", cml.LockConfig.Identity, s)
cml.LockConfig.EventRecorder.Eventf(&v1.ConfigMap{ObjectMeta: cml.cm.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events)
}
// Describe is used to convert details on current resource lock
// into a string
func (cml *ConfigMapLock) Describe() string {
return fmt.Sprintf("%v/%v", cml.ConfigMapMeta.Namespace, cml.ConfigMapMeta.Name)
}
// returns the Identity of the lock
func (cml *ConfigMapLock) Identity() string {
return cml.LockConfig.Identity
}

View File

@ -1,104 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcelock
import (
"encoding/json"
"errors"
"fmt"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
)
type EndpointsLock struct {
// EndpointsMeta should contain a Name and a Namespace of an
// Endpoints object that the LeaderElector will attempt to lead.
EndpointsMeta metav1.ObjectMeta
Client corev1client.EndpointsGetter
LockConfig ResourceLockConfig
e *v1.Endpoints
}
// Get returns the election record from a Endpoints Annotation
func (el *EndpointsLock) Get() (*LeaderElectionRecord, error) {
var record LeaderElectionRecord
var err error
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(el.EndpointsMeta.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if el.e.Annotations == nil {
el.e.Annotations = make(map[string]string)
}
if recordBytes, found := el.e.Annotations[LeaderElectionRecordAnnotationKey]; found {
if err := json.Unmarshal([]byte(recordBytes), &record); err != nil {
return nil, err
}
}
return &record, nil
}
// Create attempts to create a LeaderElectionRecord annotation
func (el *EndpointsLock) Create(ler LeaderElectionRecord) error {
recordBytes, err := json.Marshal(ler)
if err != nil {
return err
}
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Create(&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: el.EndpointsMeta.Name,
Namespace: el.EndpointsMeta.Namespace,
Annotations: map[string]string{
LeaderElectionRecordAnnotationKey: string(recordBytes),
},
},
})
return err
}
// Update will update and existing annotation on a given resource.
func (el *EndpointsLock) Update(ler LeaderElectionRecord) error {
if el.e == nil {
return errors.New("endpoint not initialized, call get or create first")
}
recordBytes, err := json.Marshal(ler)
if err != nil {
return err
}
el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(el.e)
return err
}
// RecordEvent in leader election while adding meta-data
func (el *EndpointsLock) RecordEvent(s string) {
events := fmt.Sprintf("%v %v", el.LockConfig.Identity, s)
el.LockConfig.EventRecorder.Eventf(&v1.Endpoints{ObjectMeta: el.e.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events)
}
// Describe is used to convert details on current resource lock
// into a string
func (el *EndpointsLock) Describe() string {
return fmt.Sprintf("%v/%v", el.EndpointsMeta.Namespace, el.EndpointsMeta.Name)
}
// returns the Identity of the lock
func (el *EndpointsLock) Identity() string {
return el.LockConfig.Identity
}

View File

@ -1,102 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcelock
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
)
const (
LeaderElectionRecordAnnotationKey = "control-plane.alpha.kubernetes.io/leader"
EndpointsResourceLock = "endpoints"
ConfigMapsResourceLock = "configmaps"
)
// LeaderElectionRecord is the record that is stored in the leader election annotation.
// This information should be used for observational purposes only and could be replaced
// with a random string (e.g. UUID) with only slight modification of this code.
// TODO(mikedanese): this should potentially be versioned
type LeaderElectionRecord struct {
HolderIdentity string `json:"holderIdentity"`
LeaseDurationSeconds int `json:"leaseDurationSeconds"`
AcquireTime metav1.Time `json:"acquireTime"`
RenewTime metav1.Time `json:"renewTime"`
LeaderTransitions int `json:"leaderTransitions"`
}
// ResourceLockConfig common data that exists across different
// resource locks
type ResourceLockConfig struct {
Identity string
EventRecorder record.EventRecorder
}
// Interface offers a common interface for locking on arbitrary
// resources used in leader election. The Interface is used
// to hide the details on specific implementations in order to allow
// them to change over time. This interface is strictly for use
// by the leaderelection code.
type Interface interface {
// Get returns the LeaderElectionRecord
Get() (*LeaderElectionRecord, error)
// Create attempts to create a LeaderElectionRecord
Create(ler LeaderElectionRecord) error
// Update will update and existing LeaderElectionRecord
Update(ler LeaderElectionRecord) error
// RecordEvent is used to record events
RecordEvent(string)
// Identity will return the locks Identity
Identity() string
// Describe is used to convert details on current resource lock
// into a string
Describe() string
}
// Manufacture will create a lock of a given type according to the input parameters
func New(lockType string, ns string, name string, client corev1.CoreV1Interface, rlc ResourceLockConfig) (Interface, error) {
switch lockType {
case EndpointsResourceLock:
return &EndpointsLock{
EndpointsMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
Client: client,
LockConfig: rlc,
}, nil
case ConfigMapsResourceLock:
return &ConfigMapLock{
ConfigMapMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
Client: client,
LockConfig: rlc,
}, nil
default:
return nil, fmt.Errorf("Invalid lock-type %s", lockType)
}
}

View File

@ -1,7 +0,0 @@
reviewers:
- wojtek-t
- eparis
- krousey
- jayunit100
- fgrzadkowski
- tmrts

View File

@ -1,117 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pager
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
const defaultPageSize = 500
// ListPageFunc returns a list object for the given list options.
type ListPageFunc func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error)
// SimplePageFunc adapts a context-less list function into one that accepts a context.
func SimplePageFunc(fn func(opts metav1.ListOptions) (runtime.Object, error)) ListPageFunc {
return func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) {
return fn(opts)
}
}
// ListPager assists client code in breaking large list queries into multiple
// smaller chunks of PageSize or smaller. PageFn is expected to accept a
// metav1.ListOptions that supports paging and return a list. The pager does
// not alter the field or label selectors on the initial options list.
type ListPager struct {
PageSize int64
PageFn ListPageFunc
FullListIfExpired bool
}
// New creates a new pager from the provided pager function using the default
// options. It will fall back to a full list if an expiration error is encountered
// as a last resort.
func New(fn ListPageFunc) *ListPager {
return &ListPager{
PageSize: defaultPageSize,
PageFn: fn,
FullListIfExpired: true,
}
}
// TODO: introduce other types of paging functions - such as those that retrieve from a list
// of namespaces.
// List returns a single list object, but attempts to retrieve smaller chunks from the
// server to reduce the impact on the server. If the chunk attempt fails, it will load
// the full list instead. The Limit field on options, if unset, will default to the page size.
func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
if options.Limit == 0 {
options.Limit = p.PageSize
}
var list *metainternalversion.List
for {
obj, err := p.PageFn(ctx, options)
if err != nil {
if !errors.IsResourceExpired(err) || !p.FullListIfExpired {
return nil, err
}
// the list expired while we were processing, fall back to a full list
options.Limit = 0
options.Continue = ""
return p.PageFn(ctx, options)
}
m, err := meta.ListAccessor(obj)
if err != nil {
return nil, fmt.Errorf("returned object must be a list: %v", err)
}
// exit early and return the object we got if we haven't processed any pages
if len(m.GetContinue()) == 0 && list == nil {
return obj, nil
}
// initialize the list and fill its contents
if list == nil {
list = &metainternalversion.List{Items: make([]runtime.Object, 0, options.Limit+1)}
list.ResourceVersion = m.GetResourceVersion()
list.SelfLink = m.GetSelfLink()
}
if err := meta.EachListItem(obj, func(obj runtime.Object) error {
list.Items = append(list.Items, obj)
return nil
}); err != nil {
return nil, err
}
// if we have no more items, return the list
if len(m.GetContinue()) == 0 {
return list, nil
}
// set the next loop up
options.Continue = m.GetContinue()
}
}

View File

@ -1,206 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pager
import (
"context"
"fmt"
"reflect"
"testing"
"k8s.io/apimachinery/pkg/api/errors"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
)
func list(count int, rv string) *metainternalversion.List {
var list metainternalversion.List
for i := 0; i < count; i++ {
list.Items = append(list.Items, &metav1beta1.PartialObjectMetadata{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%d", i),
},
})
}
list.ResourceVersion = rv
return &list
}
type testPager struct {
t *testing.T
rv string
index int
remaining int
last int
continuing bool
done bool
expectPage int64
}
func (p *testPager) reset() {
p.continuing = false
p.remaining += p.index
p.index = 0
p.last = 0
p.done = false
}
func (p *testPager) PagedList(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
if p.done {
p.t.Errorf("did not expect additional call to paged list")
return nil, fmt.Errorf("unexpected list call")
}
expectedContinue := fmt.Sprintf("%s:%d", p.rv, p.last)
if options.Limit != p.expectPage || (p.continuing && options.Continue != expectedContinue) {
p.t.Errorf("invariant violated, expected limit %d and continue %s, got %#v", p.expectPage, expectedContinue, options)
return nil, fmt.Errorf("invariant violated")
}
var list metainternalversion.List
total := options.Limit
if total == 0 {
total = int64(p.remaining)
}
for i := int64(0); i < total; i++ {
if p.remaining <= 0 {
break
}
list.Items = append(list.Items, &metav1beta1.PartialObjectMetadata{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%d", p.index),
},
})
p.remaining--
p.index++
}
p.last = p.index
if p.remaining > 0 {
list.Continue = fmt.Sprintf("%s:%d", p.rv, p.last)
p.continuing = true
} else {
p.done = true
}
list.ResourceVersion = p.rv
return &list, nil
}
func (p *testPager) ExpiresOnSecondPage(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
if p.continuing {
p.done = true
return nil, errors.NewResourceExpired("this list has expired")
}
return p.PagedList(ctx, options)
}
func (p *testPager) ExpiresOnSecondPageThenFullList(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
if p.continuing {
p.reset()
p.expectPage = 0
return nil, errors.NewResourceExpired("this list has expired")
}
return p.PagedList(ctx, options)
}
func TestListPager_List(t *testing.T) {
type fields struct {
PageSize int64
PageFn ListPageFunc
FullListIfExpired bool
}
type args struct {
ctx context.Context
options metav1.ListOptions
}
tests := []struct {
name string
fields fields
args args
want runtime.Object
wantErr bool
isExpired bool
}{
{
name: "empty page",
fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 0, rv: "rv:20"}).PagedList},
args: args{},
want: list(0, "rv:20"),
},
{
name: "one page",
fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 9, rv: "rv:20"}).PagedList},
args: args{},
want: list(9, "rv:20"),
},
{
name: "one full page",
fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 10, rv: "rv:20"}).PagedList},
args: args{},
want: list(10, "rv:20"),
},
{
name: "two pages",
fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 11, rv: "rv:20"}).PagedList},
args: args{},
want: list(11, "rv:20"),
},
{
name: "three pages",
fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 21, rv: "rv:20"}).PagedList},
args: args{},
want: list(21, "rv:20"),
},
{
name: "expires on second page",
fields: fields{PageSize: 10, PageFn: (&testPager{t: t, expectPage: 10, remaining: 21, rv: "rv:20"}).ExpiresOnSecondPage},
args: args{},
wantErr: true,
isExpired: true,
},
{
name: "expires on second page and then lists",
fields: fields{
FullListIfExpired: true,
PageSize: 10,
PageFn: (&testPager{t: t, expectPage: 10, remaining: 21, rv: "rv:20"}).ExpiresOnSecondPageThenFullList,
},
args: args{},
want: list(21, "rv:20"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := &ListPager{
PageSize: tt.fields.PageSize,
PageFn: tt.fields.PageFn,
FullListIfExpired: tt.fields.FullListIfExpired,
}
got, err := p.List(tt.args.ctx, tt.args.options)
if (err != nil) != tt.wantErr {
t.Errorf("ListPager.List() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.isExpired != errors.IsResourceExpired(err) {
t.Errorf("ListPager.List() error = %v, isExpired %v", err, tt.isExpired)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ListPager.List() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -1,19 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package portforward adds support for SSH-like port forwarding from the client's
// local host to remote containers.
package portforward // import "k8s.io/client-go/tools/portforward"

View File

@ -1,419 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package portforward
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"strconv"
"strings"
"sync"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/runtime"
)
// TODO move to API machinery and re-unify with kubelet/server/portfoward
// The subprotocol "portforward.k8s.io" is used for port forwarding.
const PortForwardProtocolV1Name = "portforward.k8s.io"
// PortForwarder knows how to listen for local connections and forward them to
// a remote pod via an upgraded HTTP request.
type PortForwarder struct {
addresses []listenAddress
ports []ForwardedPort
stopChan <-chan struct{}
dialer httpstream.Dialer
streamConn httpstream.Connection
listeners []io.Closer
Ready chan struct{}
requestIDLock sync.Mutex
requestID int
out io.Writer
errOut io.Writer
}
// ForwardedPort contains a Local:Remote port pairing.
type ForwardedPort struct {
Local uint16
Remote uint16
}
/*
valid port specifications:
5000
- forwards from localhost:5000 to pod:5000
8888:5000
- forwards from localhost:8888 to pod:5000
0:5000
:5000
- selects a random available local port,
forwards from localhost:<random port> to pod:5000
*/
func parsePorts(ports []string) ([]ForwardedPort, error) {
var forwards []ForwardedPort
for _, portString := range ports {
parts := strings.Split(portString, ":")
var localString, remoteString string
if len(parts) == 1 {
localString = parts[0]
remoteString = parts[0]
} else if len(parts) == 2 {
localString = parts[0]
if localString == "" {
// support :5000
localString = "0"
}
remoteString = parts[1]
} else {
return nil, fmt.Errorf("Invalid port format '%s'", portString)
}
localPort, err := strconv.ParseUint(localString, 10, 16)
if err != nil {
return nil, fmt.Errorf("Error parsing local port '%s': %s", localString, err)
}
remotePort, err := strconv.ParseUint(remoteString, 10, 16)
if err != nil {
return nil, fmt.Errorf("Error parsing remote port '%s': %s", remoteString, err)
}
if remotePort == 0 {
return nil, fmt.Errorf("Remote port must be > 0")
}
forwards = append(forwards, ForwardedPort{uint16(localPort), uint16(remotePort)})
}
return forwards, nil
}
type listenAddress struct {
address string
protocol string
failureMode string
}
func parseAddresses(addressesToParse []string) ([]listenAddress, error) {
var addresses []listenAddress
parsed := make(map[string]listenAddress)
for _, address := range addressesToParse {
if address == "localhost" {
ip := listenAddress{address: "127.0.0.1", protocol: "tcp4", failureMode: "all"}
parsed[ip.address] = ip
ip = listenAddress{address: "::1", protocol: "tcp6", failureMode: "all"}
parsed[ip.address] = ip
} else if net.ParseIP(address).To4() != nil {
parsed[address] = listenAddress{address: address, protocol: "tcp4", failureMode: "any"}
} else if net.ParseIP(address) != nil {
parsed[address] = listenAddress{address: address, protocol: "tcp6", failureMode: "any"}
} else {
return nil, fmt.Errorf("%s is not a valid IP", address)
}
}
addresses = make([]listenAddress, len(parsed))
id := 0
for _, v := range parsed {
addresses[id] = v
id++
}
return addresses, nil
}
// New creates a new PortForwarder with localhost listen addresses.
func New(dialer httpstream.Dialer, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) {
return NewOnAddresses(dialer, []string{"localhost"}, ports, stopChan, readyChan, out, errOut)
}
// NewOnAddresses creates a new PortForwarder with custom listen addresses.
func NewOnAddresses(dialer httpstream.Dialer, addresses []string, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) {
if len(addresses) == 0 {
return nil, errors.New("You must specify at least 1 address")
}
parsedAddresses, err := parseAddresses(addresses)
if err != nil {
return nil, err
}
if len(ports) == 0 {
return nil, errors.New("You must specify at least 1 port")
}
parsedPorts, err := parsePorts(ports)
if err != nil {
return nil, err
}
return &PortForwarder{
dialer: dialer,
addresses: parsedAddresses,
ports: parsedPorts,
stopChan: stopChan,
Ready: readyChan,
out: out,
errOut: errOut,
}, nil
}
// ForwardPorts formats and executes a port forwarding request. The connection will remain
// open until stopChan is closed.
func (pf *PortForwarder) ForwardPorts() error {
defer pf.Close()
var err error
pf.streamConn, _, err = pf.dialer.Dial(PortForwardProtocolV1Name)
if err != nil {
return fmt.Errorf("error upgrading connection: %s", err)
}
defer pf.streamConn.Close()
return pf.forward()
}
// forward dials the remote host specific in req, upgrades the request, starts
// listeners for each port specified in ports, and forwards local connections
// to the remote host via streams.
func (pf *PortForwarder) forward() error {
var err error
listenSuccess := false
for _, port := range pf.ports {
err = pf.listenOnPort(&port)
switch {
case err == nil:
listenSuccess = true
default:
if pf.errOut != nil {
fmt.Fprintf(pf.errOut, "Unable to listen on port %d: %v\n", port.Local, err)
}
}
}
if !listenSuccess {
return fmt.Errorf("Unable to listen on any of the requested ports: %v", pf.ports)
}
if pf.Ready != nil {
close(pf.Ready)
}
// wait for interrupt or conn closure
select {
case <-pf.stopChan:
case <-pf.streamConn.CloseChan():
runtime.HandleError(errors.New("lost connection to pod"))
}
return nil
}
// listenOnPort delegates listener creation and waits for connections on requested bind addresses.
// An error is raised based on address groups (default and localhost) and their failure modes
func (pf *PortForwarder) listenOnPort(port *ForwardedPort) error {
var errors []error
failCounters := make(map[string]int, 2)
successCounters := make(map[string]int, 2)
for _, addr := range pf.addresses {
err := pf.listenOnPortAndAddress(port, addr.protocol, addr.address)
if err != nil {
errors = append(errors, err)
failCounters[addr.failureMode]++
} else {
successCounters[addr.failureMode]++
}
}
if successCounters["all"] == 0 && failCounters["all"] > 0 {
return fmt.Errorf("%s: %v", "Listeners failed to create with the following errors", errors)
}
if failCounters["any"] > 0 {
return fmt.Errorf("%s: %v", "Listeners failed to create with the following errors", errors)
}
return nil
}
// listenOnPortAndAddress delegates listener creation and waits for new connections
// in the background f
func (pf *PortForwarder) listenOnPortAndAddress(port *ForwardedPort, protocol string, address string) error {
listener, err := pf.getListener(protocol, address, port)
if err != nil {
return err
}
pf.listeners = append(pf.listeners, listener)
go pf.waitForConnection(listener, *port)
return nil
}
// getListener creates a listener on the interface targeted by the given hostname on the given port with
// the given protocol. protocol is in net.Listen style which basically admits values like tcp, tcp4, tcp6
func (pf *PortForwarder) getListener(protocol string, hostname string, port *ForwardedPort) (net.Listener, error) {
listener, err := net.Listen(protocol, net.JoinHostPort(hostname, strconv.Itoa(int(port.Local))))
if err != nil {
return nil, fmt.Errorf("Unable to create listener: Error %s", err)
}
listenerAddress := listener.Addr().String()
host, localPort, _ := net.SplitHostPort(listenerAddress)
localPortUInt, err := strconv.ParseUint(localPort, 10, 16)
if err != nil {
fmt.Fprintf(pf.out, "Failed to forward from %s:%d -> %d\n", hostname, localPortUInt, port.Remote)
return nil, fmt.Errorf("Error parsing local port: %s from %s (%s)", err, listenerAddress, host)
}
port.Local = uint16(localPortUInt)
if pf.out != nil {
fmt.Fprintf(pf.out, "Forwarding from %s -> %d\n", net.JoinHostPort(hostname, strconv.Itoa(int(localPortUInt))), port.Remote)
}
return listener, nil
}
// waitForConnection waits for new connections to listener and handles them in
// the background.
func (pf *PortForwarder) waitForConnection(listener net.Listener, port ForwardedPort) {
for {
conn, err := listener.Accept()
if err != nil {
// TODO consider using something like https://github.com/hydrogen18/stoppableListener?
if !strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") {
runtime.HandleError(fmt.Errorf("Error accepting connection on port %d: %v", port.Local, err))
}
return
}
go pf.handleConnection(conn, port)
}
}
func (pf *PortForwarder) nextRequestID() int {
pf.requestIDLock.Lock()
defer pf.requestIDLock.Unlock()
id := pf.requestID
pf.requestID++
return id
}
// handleConnection copies data between the local connection and the stream to
// the remote server.
func (pf *PortForwarder) handleConnection(conn net.Conn, port ForwardedPort) {
defer conn.Close()
if pf.out != nil {
fmt.Fprintf(pf.out, "Handling connection for %d\n", port.Local)
}
requestID := pf.nextRequestID()
// create error stream
headers := http.Header{}
headers.Set(v1.StreamType, v1.StreamTypeError)
headers.Set(v1.PortHeader, fmt.Sprintf("%d", port.Remote))
headers.Set(v1.PortForwardRequestIDHeader, strconv.Itoa(requestID))
errorStream, err := pf.streamConn.CreateStream(headers)
if err != nil {
runtime.HandleError(fmt.Errorf("error creating error stream for port %d -> %d: %v", port.Local, port.Remote, err))
return
}
// we're not writing to this stream
errorStream.Close()
errorChan := make(chan error)
go func() {
message, err := ioutil.ReadAll(errorStream)
switch {
case err != nil:
errorChan <- fmt.Errorf("error reading from error stream for port %d -> %d: %v", port.Local, port.Remote, err)
case len(message) > 0:
errorChan <- fmt.Errorf("an error occurred forwarding %d -> %d: %v", port.Local, port.Remote, string(message))
}
close(errorChan)
}()
// create data stream
headers.Set(v1.StreamType, v1.StreamTypeData)
dataStream, err := pf.streamConn.CreateStream(headers)
if err != nil {
runtime.HandleError(fmt.Errorf("error creating forwarding stream for port %d -> %d: %v", port.Local, port.Remote, err))
return
}
localError := make(chan struct{})
remoteDone := make(chan struct{})
go func() {
// Copy from the remote side to the local port.
if _, err := io.Copy(conn, dataStream); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
runtime.HandleError(fmt.Errorf("error copying from remote stream to local connection: %v", err))
}
// inform the select below that the remote copy is done
close(remoteDone)
}()
go func() {
// inform server we're not sending any more data after copy unblocks
defer dataStream.Close()
// Copy from the local port to the remote side.
if _, err := io.Copy(dataStream, conn); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
runtime.HandleError(fmt.Errorf("error copying from local connection to remote stream: %v", err))
// break out of the select below without waiting for the other copy to finish
close(localError)
}
}()
// wait for either a local->remote error or for copying from remote->local to finish
select {
case <-remoteDone:
case <-localError:
}
// always expect something on errorChan (it may be nil)
err = <-errorChan
if err != nil {
runtime.HandleError(err)
}
}
func (pf *PortForwarder) Close() {
// stop all listeners
for _, l := range pf.listeners {
if err := l.Close(); err != nil {
runtime.HandleError(fmt.Errorf("error closing listener: %v", err))
}
}
}
// GetPorts will return the ports that were forwarded; this can be used to
// retrieve the locally-bound port in cases where the input was port 0. This
// function will signal an error if the Ready channel is nil or if the
// listeners are not ready yet; this function will succeed after the Ready
// channel has been closed.
func (pf *PortForwarder) GetPorts() ([]ForwardedPort, error) {
if pf.Ready == nil {
return nil, fmt.Errorf("no Ready channel provided")
}
select {
case <-pf.Ready:
return pf.ports, nil
default:
return nil, fmt.Errorf("listeners not ready")
}
}

View File

@ -1,256 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package portforward
import (
"net"
"os"
"reflect"
"sort"
"strings"
"testing"
"k8s.io/apimachinery/pkg/util/httpstream"
)
type fakeDialer struct {
dialed bool
conn httpstream.Connection
err error
negotiatedProtocol string
}
func (d *fakeDialer) Dial(protocols ...string) (httpstream.Connection, string, error) {
d.dialed = true
return d.conn, d.negotiatedProtocol, d.err
}
func TestParsePortsAndNew(t *testing.T) {
tests := []struct {
input []string
addresses []string
expectedPorts []ForwardedPort
expectedAddresses []listenAddress
expectPortParseError bool
expectAddressParseError bool
expectNewError bool
}{
{input: []string{}, expectNewError: true},
{input: []string{"a"}, expectPortParseError: true, expectAddressParseError: false, expectNewError: true},
{input: []string{":a"}, expectPortParseError: true, expectAddressParseError: false, expectNewError: true},
{input: []string{"-1"}, expectPortParseError: true, expectAddressParseError: false, expectNewError: true},
{input: []string{"65536"}, expectPortParseError: true, expectAddressParseError: false, expectNewError: true},
{input: []string{"0"}, expectPortParseError: true, expectAddressParseError: false, expectNewError: true},
{input: []string{"0:0"}, expectPortParseError: true, expectAddressParseError: false, expectNewError: true},
{input: []string{"a:5000"}, expectPortParseError: true, expectAddressParseError: false, expectNewError: true},
{input: []string{"5000:a"}, expectPortParseError: true, expectAddressParseError: false, expectNewError: true},
{input: []string{"5000:5000"}, addresses: []string{"127.0.0.257"}, expectPortParseError: false, expectAddressParseError: true, expectNewError: true},
{input: []string{"5000:5000"}, addresses: []string{"::g"}, expectPortParseError: false, expectAddressParseError: true, expectNewError: true},
{input: []string{"5000:5000"}, addresses: []string{"domain.invalid"}, expectPortParseError: false, expectAddressParseError: true, expectNewError: true},
{
input: []string{"5000:5000"},
addresses: []string{"localhost"},
expectedPorts: []ForwardedPort{
{5000, 5000},
},
expectedAddresses: []listenAddress{
{protocol: "tcp4", address: "127.0.0.1", failureMode: "all"},
{protocol: "tcp6", address: "::1", failureMode: "all"},
},
},
{
input: []string{"5000:5000"},
addresses: []string{"localhost", "127.0.0.1"},
expectedPorts: []ForwardedPort{
{5000, 5000},
},
expectedAddresses: []listenAddress{
{protocol: "tcp4", address: "127.0.0.1", failureMode: "any"},
{protocol: "tcp6", address: "::1", failureMode: "all"},
},
},
{
input: []string{"5000", "5000:5000", "8888:5000", "5000:8888", ":5000", "0:5000"},
addresses: []string{"127.0.0.1", "::1"},
expectedPorts: []ForwardedPort{
{5000, 5000},
{5000, 5000},
{8888, 5000},
{5000, 8888},
{0, 5000},
{0, 5000},
},
expectedAddresses: []listenAddress{
{protocol: "tcp4", address: "127.0.0.1", failureMode: "any"},
{protocol: "tcp6", address: "::1", failureMode: "any"},
},
},
}
for i, test := range tests {
parsedPorts, err := parsePorts(test.input)
haveError := err != nil
if e, a := test.expectPortParseError, haveError; e != a {
t.Fatalf("%d: parsePorts: error expected=%t, got %t: %s", i, e, a, err)
}
// default to localhost
if len(test.addresses) == 0 && len(test.expectedAddresses) == 0 {
test.addresses = []string{"localhost"}
test.expectedAddresses = []listenAddress{{protocol: "tcp4", address: "127.0.0.1"}, {protocol: "tcp6", address: "::1"}}
}
// assert address parser
parsedAddresses, err := parseAddresses(test.addresses)
haveError = err != nil
if e, a := test.expectAddressParseError, haveError; e != a {
t.Fatalf("%d: parseAddresses: error expected=%t, got %t: %s", i, e, a, err)
}
dialer := &fakeDialer{}
expectedStopChan := make(chan struct{})
readyChan := make(chan struct{})
var pf *PortForwarder
if len(test.addresses) > 0 {
pf, err = NewOnAddresses(dialer, test.addresses, test.input, expectedStopChan, readyChan, os.Stdout, os.Stderr)
} else {
pf, err = New(dialer, test.input, expectedStopChan, readyChan, os.Stdout, os.Stderr)
}
haveError = err != nil
if e, a := test.expectNewError, haveError; e != a {
t.Fatalf("%d: New: error expected=%t, got %t: %s", i, e, a, err)
}
if test.expectPortParseError || test.expectAddressParseError || test.expectNewError {
continue
}
sort.Slice(test.expectedAddresses, func(i, j int) bool { return test.expectedAddresses[i].address < test.expectedAddresses[j].address })
sort.Slice(parsedAddresses, func(i, j int) bool { return parsedAddresses[i].address < parsedAddresses[j].address })
if !reflect.DeepEqual(test.expectedAddresses, parsedAddresses) {
t.Fatalf("%d: expectedAddresses: %v, got: %v", i, test.expectedAddresses, parsedAddresses)
}
for pi, expectedPort := range test.expectedPorts {
if e, a := expectedPort.Local, parsedPorts[pi].Local; e != a {
t.Fatalf("%d: local expected: %d, got: %d", i, e, a)
}
if e, a := expectedPort.Remote, parsedPorts[pi].Remote; e != a {
t.Fatalf("%d: remote expected: %d, got: %d", i, e, a)
}
}
if dialer.dialed {
t.Fatalf("%d: expected not dialed", i)
}
if _, portErr := pf.GetPorts(); portErr == nil {
t.Fatalf("%d: GetPorts: error expected but got nil", i)
}
// mock-signal the Ready channel
close(readyChan)
if ports, portErr := pf.GetPorts(); portErr != nil {
t.Fatalf("%d: GetPorts: unable to retrieve ports: %s", i, portErr)
} else if !reflect.DeepEqual(test.expectedPorts, ports) {
t.Fatalf("%d: ports: expected %#v, got %#v", i, test.expectedPorts, ports)
}
if e, a := expectedStopChan, pf.stopChan; e != a {
t.Fatalf("%d: stopChan: expected %#v, got %#v", i, e, a)
}
if pf.Ready == nil {
t.Fatalf("%d: Ready should be non-nil", i)
}
}
}
type GetListenerTestCase struct {
Hostname string
Protocol string
ShouldRaiseError bool
ExpectedListenerAddress string
}
func TestGetListener(t *testing.T) {
var pf PortForwarder
testCases := []GetListenerTestCase{
{
Hostname: "localhost",
Protocol: "tcp4",
ShouldRaiseError: false,
ExpectedListenerAddress: "127.0.0.1",
},
{
Hostname: "127.0.0.1",
Protocol: "tcp4",
ShouldRaiseError: false,
ExpectedListenerAddress: "127.0.0.1",
},
{
Hostname: "::1",
Protocol: "tcp6",
ShouldRaiseError: false,
ExpectedListenerAddress: "::1",
},
{
Hostname: "::1",
Protocol: "tcp4",
ShouldRaiseError: true,
},
{
Hostname: "127.0.0.1",
Protocol: "tcp6",
ShouldRaiseError: true,
},
}
for i, testCase := range testCases {
expectedListenerPort := "12345"
listener, err := pf.getListener(testCase.Protocol, testCase.Hostname, &ForwardedPort{12345, 12345})
if err != nil && strings.Contains(err.Error(), "cannot assign requested address") {
t.Logf("Can't test #%d: %v", i, err)
continue
}
errorRaised := err != nil
if testCase.ShouldRaiseError != errorRaised {
t.Errorf("Test case #%d failed: Data %v an error has been raised(%t) where it should not (or reciprocally): %v", i, testCase, testCase.ShouldRaiseError, err)
continue
}
if errorRaised {
continue
}
if listener == nil {
t.Errorf("Test case #%d did not raise an error but failed in initializing listener", i)
continue
}
host, port, _ := net.SplitHostPort(listener.Addr().String())
t.Logf("Asked a %s forward for: %s:%v, got listener %s:%s, expected: %s", testCase.Protocol, testCase.Hostname, 12345, host, port, expectedListenerPort)
if host != testCase.ExpectedListenerAddress {
t.Errorf("Test case #%d failed: Listener does not listen on expected address: asked '%v' got '%v'", i, testCase.ExpectedListenerAddress, host)
}
if port != expectedListenerPort {
t.Errorf("Test case #%d failed: Listener does not listen on expected port: asked %v got %v", i, expectedListenerPort, port)
}
listener.Close()
}
}

View File

@ -1,27 +0,0 @@
reviewers:
- lavalamp
- smarterclayton
- wojtek-t
- deads2k
- derekwaynecarr
- caesarxuchao
- vishh
- mikedanese
- liggitt
- nikhiljindal
- erictune
- pmorie
- dchen1107
- saad-ali
- luxas
- yifan-gu
- eparis
- mwielgus
- timothysc
- jsafrane
- dims
- krousey
- a-robinson
- aveshagarwal
- resouer
- cjcullen

View File

@ -1,18 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package record has all client logic for recording and reporting events.
package record // import "k8s.io/client-go/tools/record"

View File

@ -1,322 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package record
import (
"fmt"
"math/rand"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
restclient "k8s.io/client-go/rest"
ref "k8s.io/client-go/tools/reference"
"net/http"
"k8s.io/klog"
)
const maxTriesPerEvent = 12
var defaultSleepDuration = 10 * time.Second
const maxQueuedEvents = 1000
// EventSink knows how to store events (client.Client implements it.)
// EventSink must respect the namespace that will be embedded in 'event'.
// It is assumed that EventSink will return the same sorts of errors as
// pkg/client's REST client.
type EventSink interface {
Create(event *v1.Event) (*v1.Event, error)
Update(event *v1.Event) (*v1.Event, error)
Patch(oldEvent *v1.Event, data []byte) (*v1.Event, error)
}
// EventRecorder knows how to record events on behalf of an EventSource.
type EventRecorder interface {
// Event constructs an event from the given information and puts it in the queue for sending.
// 'object' is the object this event is about. Event will make a reference-- or you may also
// pass a reference to the object directly.
// 'type' of this event, and can be one of Normal, Warning. New types could be added in future
// 'reason' is the reason this event is generated. 'reason' should be short and unique; it
// should be in UpperCamelCase format (starting with a capital letter). "reason" will be used
// to automate handling of events, so imagine people writing switch statements to handle them.
// You want to make that easy.
// 'message' is intended to be human readable.
//
// The resulting event will be created in the same namespace as the reference object.
Event(object runtime.Object, eventtype, reason, message string)
// Eventf is just like Event, but with Sprintf for the message field.
Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{})
// PastEventf is just like Eventf, but with an option to specify the event's 'timestamp' field.
PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{})
// AnnotatedEventf is just like eventf, but with annotations attached
AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{})
}
// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log.
type EventBroadcaster interface {
// StartEventWatcher starts sending events received from this EventBroadcaster to the given
// event handler function. The return value can be ignored or used to stop recording, if
// desired.
StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface
// StartRecordingToSink starts sending events received from this EventBroadcaster to the given
// sink. The return value can be ignored or used to stop recording, if desired.
StartRecordingToSink(sink EventSink) watch.Interface
// StartLogging starts sending events received from this EventBroadcaster to the given logging
// function. The return value can be ignored or used to stop recording, if desired.
StartLogging(logf func(format string, args ...interface{})) watch.Interface
// NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster
// with the event source set to the given event source.
NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder
}
// Creates a new event broadcaster.
func NewBroadcaster() EventBroadcaster {
return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration}
}
func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster {
return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration}
}
type eventBroadcasterImpl struct {
*watch.Broadcaster
sleepDuration time.Duration
}
// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink.
// The return value can be ignored or used to stop recording, if desired.
// TODO: make me an object with parameterizable queue length and retry interval
func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface {
// The default math/rand package functions aren't thread safe, so create a
// new Rand object for each StartRecording call.
randGen := rand.New(rand.NewSource(time.Now().UnixNano()))
eventCorrelator := NewEventCorrelator(clock.RealClock{})
return eventBroadcaster.StartEventWatcher(
func(event *v1.Event) {
recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration)
})
}
func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, randGen *rand.Rand, sleepDuration time.Duration) {
// Make a copy before modification, because there could be multiple listeners.
// Events are safe to copy like this.
eventCopy := *event
event = &eventCopy
result, err := eventCorrelator.EventCorrelate(event)
if err != nil {
utilruntime.HandleError(err)
}
if result.Skip {
return
}
tries := 0
for {
if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) {
break
}
tries++
if tries >= maxTriesPerEvent {
klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event)
break
}
// Randomize the first sleep so that various clients won't all be
// synced up if the master goes down.
if tries == 1 {
time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64()))
} else {
time.Sleep(sleepDuration)
}
}
}
func isKeyNotFoundError(err error) bool {
statusErr, _ := err.(*errors.StatusError)
if statusErr != nil && statusErr.Status().Code == http.StatusNotFound {
return true
}
return false
}
// recordEvent attempts to write event to a sink. It returns true if the event
// was successfully recorded or discarded, false if it should be retried.
// If updateExistingEvent is false, it creates a new event, otherwise it updates
// existing event.
func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool {
var newEvent *v1.Event
var err error
if updateExistingEvent {
newEvent, err = sink.Patch(event, patch)
}
// Update can fail because the event may have been removed and it no longer exists.
if !updateExistingEvent || (updateExistingEvent && isKeyNotFoundError(err)) {
// Making sure that ResourceVersion is empty on creation
event.ResourceVersion = ""
newEvent, err = sink.Create(event)
}
if err == nil {
// we need to update our event correlator with the server returned state to handle name/resourceversion
eventCorrelator.UpdateState(newEvent)
return true
}
// If we can't contact the server, then hold everything while we keep trying.
// Otherwise, something about the event is malformed and we should abandon it.
switch err.(type) {
case *restclient.RequestConstructionError:
// We will construct the request the same next time, so don't keep trying.
klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err)
return true
case *errors.StatusError:
if errors.IsAlreadyExists(err) {
klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err)
} else {
klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err)
}
return true
case *errors.UnexpectedObjectError:
// We don't expect this; it implies the server's response didn't match a
// known pattern. Go ahead and retry.
default:
// This case includes actual http transport errors. Go ahead and retry.
}
klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err)
return false
}
// StartLogging starts sending events received from this EventBroadcaster to the given logging function.
// The return value can be ignored or used to stop recording, if desired.
func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface {
return eventBroadcaster.StartEventWatcher(
func(e *v1.Event) {
logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message)
})
}
// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function.
// The return value can be ignored or used to stop recording, if desired.
func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface {
watcher := eventBroadcaster.Watch()
go func() {
defer utilruntime.HandleCrash()
for watchEvent := range watcher.ResultChan() {
event, ok := watchEvent.Object.(*v1.Event)
if !ok {
// This is all local, so there's no reason this should
// ever happen.
continue
}
eventHandler(event)
}
}()
return watcher
}
// NewRecorder returns an EventRecorder that records events with the given event source.
func (eventBroadcaster *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder {
return &recorderImpl{scheme, source, eventBroadcaster.Broadcaster, clock.RealClock{}}
}
type recorderImpl struct {
scheme *runtime.Scheme
source v1.EventSource
*watch.Broadcaster
clock clock.Clock
}
func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, timestamp metav1.Time, eventtype, reason, message string) {
ref, err := ref.GetReference(recorder.scheme, object)
if err != nil {
klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message)
return
}
if !validateEventType(eventtype) {
klog.Errorf("Unsupported event type: '%v'", eventtype)
return
}
event := recorder.makeEvent(ref, annotations, eventtype, reason, message)
event.Source = recorder.source
go func() {
// NOTE: events should be a non-blocking operation
defer utilruntime.HandleCrash()
recorder.Action(watch.Added, event)
}()
}
func validateEventType(eventtype string) bool {
switch eventtype {
case v1.EventTypeNormal, v1.EventTypeWarning:
return true
}
return false
}
func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) {
recorder.generateEvent(object, nil, metav1.Now(), eventtype, reason, message)
}
func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...))
}
func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) {
recorder.generateEvent(object, nil, timestamp, eventtype, reason, fmt.Sprintf(messageFmt, args...))
}
func (recorder *recorderImpl) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) {
recorder.generateEvent(object, annotations, metav1.Now(), eventtype, reason, fmt.Sprintf(messageFmt, args...))
}
func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map[string]string, eventtype, reason, message string) *v1.Event {
t := metav1.Time{Time: recorder.clock.Now()}
namespace := ref.Namespace
if namespace == "" {
namespace = metav1.NamespaceDefault
}
return &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()),
Namespace: namespace,
Annotations: annotations,
},
InvolvedObject: *ref,
Reason: reason,
Message: message,
FirstTimestamp: t,
LastTimestamp: t,
Count: 1,
Type: eventtype,
}
}

View File

@ -1,924 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package record
import (
"encoding/json"
"fmt"
"math/rand"
"net/http"
"strconv"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest"
ref "k8s.io/client-go/tools/reference"
)
type testEventSink struct {
OnCreate func(e *v1.Event) (*v1.Event, error)
OnUpdate func(e *v1.Event) (*v1.Event, error)
OnPatch func(e *v1.Event, p []byte) (*v1.Event, error)
}
// CreateEvent records the event for testing.
func (t *testEventSink) Create(e *v1.Event) (*v1.Event, error) {
if t.OnCreate != nil {
return t.OnCreate(e)
}
return e, nil
}
// UpdateEvent records the event for testing.
func (t *testEventSink) Update(e *v1.Event) (*v1.Event, error) {
if t.OnUpdate != nil {
return t.OnUpdate(e)
}
return e, nil
}
// PatchEvent records the event for testing.
func (t *testEventSink) Patch(e *v1.Event, p []byte) (*v1.Event, error) {
if t.OnPatch != nil {
return t.OnPatch(e, p)
}
return e, nil
}
type OnCreateFunc func(*v1.Event) (*v1.Event, error)
func OnCreateFactory(testCache map[string]*v1.Event, createEvent chan<- *v1.Event) OnCreateFunc {
return func(event *v1.Event) (*v1.Event, error) {
testCache[getEventKey(event)] = event
createEvent <- event
return event, nil
}
}
type OnPatchFunc func(*v1.Event, []byte) (*v1.Event, error)
func OnPatchFactory(testCache map[string]*v1.Event, patchEvent chan<- *v1.Event) OnPatchFunc {
return func(event *v1.Event, patch []byte) (*v1.Event, error) {
cachedEvent, found := testCache[getEventKey(event)]
if !found {
return nil, fmt.Errorf("unexpected error: couldn't find Event in testCache.")
}
originalData, err := json.Marshal(cachedEvent)
if err != nil {
return nil, fmt.Errorf("unexpected error: %v", err)
}
patched, err := strategicpatch.StrategicMergePatch(originalData, patch, event)
if err != nil {
return nil, fmt.Errorf("unexpected error: %v", err)
}
patchedObj := &v1.Event{}
err = json.Unmarshal(patched, patchedObj)
if err != nil {
return nil, fmt.Errorf("unexpected error: %v", err)
}
patchEvent <- patchedObj
return patchedObj, nil
}
}
func TestEventf(t *testing.T) {
testPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
SelfLink: "/api/version/pods/foo",
Name: "foo",
Namespace: "baz",
UID: "bar",
},
}
testPod2 := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
SelfLink: "/api/version/pods/foo",
Name: "foo",
Namespace: "baz",
UID: "differentUid",
},
}
testRef, err := ref.GetPartialReference(scheme.Scheme, testPod, "spec.containers[2]")
if err != nil {
t.Fatal(err)
}
testRef2, err := ref.GetPartialReference(scheme.Scheme, testPod2, "spec.containers[3]")
if err != nil {
t.Fatal(err)
}
table := []struct {
obj k8sruntime.Object
eventtype string
reason string
messageFmt string
elements []interface{}
expect *v1.Event
expectLog string
expectUpdate bool
}{
{
obj: testRef,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "bar",
APIVersion: "version",
FieldPath: "spec.containers[2]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testPod,
eventtype: v1.EventTypeNormal,
reason: "Killed",
messageFmt: "some other verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "bar",
APIVersion: "version",
},
Reason: "Killed",
Message: "some other verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'Killed' some other verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "bar",
APIVersion: "version",
FieldPath: "spec.containers[2]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 2,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: true,
},
{
obj: testRef2,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "differentUid",
APIVersion: "version",
FieldPath: "spec.containers[3]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "bar",
APIVersion: "version",
FieldPath: "spec.containers[2]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 3,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: true,
},
{
obj: testRef2,
eventtype: v1.EventTypeNormal,
reason: "Stopped",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "differentUid",
APIVersion: "version",
FieldPath: "spec.containers[3]",
},
Reason: "Stopped",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef2,
eventtype: v1.EventTypeNormal,
reason: "Stopped",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "differentUid",
APIVersion: "version",
FieldPath: "spec.containers[3]",
},
Reason: "Stopped",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 2,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`,
expectUpdate: true,
},
}
testCache := map[string]*v1.Event{}
logCalled := make(chan struct{})
createEvent := make(chan *v1.Event)
updateEvent := make(chan *v1.Event)
patchEvent := make(chan *v1.Event)
testEvents := testEventSink{
OnCreate: OnCreateFactory(testCache, createEvent),
OnUpdate: func(event *v1.Event) (*v1.Event, error) {
updateEvent <- event
return event, nil
},
OnPatch: OnPatchFactory(testCache, patchEvent),
}
eventBroadcaster := NewBroadcasterForTests(0)
sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents)
clock := clock.NewFakeClock(time.Now())
recorder := recorderWithFakeClock(v1.EventSource{Component: "eventTest"}, eventBroadcaster, clock)
for index, item := range table {
clock.Step(1 * time.Second)
logWatcher := eventBroadcaster.StartLogging(func(formatter string, args ...interface{}) {
if e, a := item.expectLog, fmt.Sprintf(formatter, args...); e != a {
t.Errorf("Expected '%v', got '%v'", e, a)
}
logCalled <- struct{}{}
})
recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...)
<-logCalled
// validate event
if item.expectUpdate {
actualEvent := <-patchEvent
validateEvent(strconv.Itoa(index), actualEvent, item.expect, t)
} else {
actualEvent := <-createEvent
validateEvent(strconv.Itoa(index), actualEvent, item.expect, t)
}
logWatcher.Stop()
}
sinkWatcher.Stop()
}
func recorderWithFakeClock(eventSource v1.EventSource, eventBroadcaster EventBroadcaster, clock clock.Clock) EventRecorder {
return &recorderImpl{scheme.Scheme, eventSource, eventBroadcaster.(*eventBroadcasterImpl).Broadcaster, clock}
}
func TestWriteEventError(t *testing.T) {
type entry struct {
timesToSendError int
attemptsWanted int
err error
}
table := map[string]*entry{
"giveUp1": {
timesToSendError: 1000,
attemptsWanted: 1,
err: &restclient.RequestConstructionError{},
},
"giveUp2": {
timesToSendError: 1000,
attemptsWanted: 1,
err: &errors.StatusError{},
},
"retry1": {
timesToSendError: 1000,
attemptsWanted: 12,
err: &errors.UnexpectedObjectError{},
},
"retry2": {
timesToSendError: 1000,
attemptsWanted: 12,
err: fmt.Errorf("A weird error"),
},
"succeedEventually": {
timesToSendError: 2,
attemptsWanted: 2,
err: fmt.Errorf("A weird error"),
},
}
clock := clock.IntervalClock{Time: time.Now(), Duration: time.Second}
eventCorrelator := NewEventCorrelator(&clock)
randGen := rand.New(rand.NewSource(time.Now().UnixNano()))
for caseName, ent := range table {
attempts := 0
sink := &testEventSink{
OnCreate: func(event *v1.Event) (*v1.Event, error) {
attempts++
if attempts < ent.timesToSendError {
return nil, ent.err
}
return event, nil
},
}
ev := &v1.Event{}
recordToSink(sink, ev, eventCorrelator, randGen, 0)
if attempts != ent.attemptsWanted {
t.Errorf("case %v: wanted %d, got %d attempts", caseName, ent.attemptsWanted, attempts)
}
}
}
func TestUpdateExpiredEvent(t *testing.T) {
clock := clock.IntervalClock{Time: time.Now(), Duration: time.Second}
eventCorrelator := NewEventCorrelator(&clock)
randGen := rand.New(rand.NewSource(time.Now().UnixNano()))
var createdEvent *v1.Event
sink := &testEventSink{
OnPatch: func(*v1.Event, []byte) (*v1.Event, error) {
return nil, &errors.StatusError{
ErrStatus: metav1.Status{
Code: http.StatusNotFound,
Reason: metav1.StatusReasonNotFound,
}}
},
OnCreate: func(event *v1.Event) (*v1.Event, error) {
createdEvent = event
return event, nil
},
}
ev := &v1.Event{}
ev.ResourceVersion = "updated-resource-version"
ev.Count = 2
recordToSink(sink, ev, eventCorrelator, randGen, 0)
if createdEvent == nil {
t.Error("Event did not get created after patch failed")
return
}
if createdEvent.ResourceVersion != "" {
t.Errorf("Event did not have its resource version cleared, was %s", createdEvent.ResourceVersion)
}
}
func TestLotsOfEvents(t *testing.T) {
recorderCalled := make(chan struct{})
loggerCalled := make(chan struct{})
// Fail each event a few times to ensure there's some load on the tested code.
var counts [1000]int
testEvents := testEventSink{
OnCreate: func(event *v1.Event) (*v1.Event, error) {
num, err := strconv.Atoi(event.Message)
if err != nil {
t.Error(err)
return event, nil
}
counts[num]++
if counts[num] < 5 {
return nil, fmt.Errorf("fake error")
}
recorderCalled <- struct{}{}
return event, nil
},
}
eventBroadcaster := NewBroadcasterForTests(0)
sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents)
logWatcher := eventBroadcaster.StartLogging(func(formatter string, args ...interface{}) {
loggerCalled <- struct{}{}
})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "eventTest"})
for i := 0; i < maxQueuedEvents; i++ {
// we want a unique object to stop spam filtering
ref := &v1.ObjectReference{
Kind: "Pod",
Name: fmt.Sprintf("foo-%v", i),
Namespace: "baz",
UID: "bar",
APIVersion: "version",
}
// we need to vary the reason to prevent aggregation
go recorder.Eventf(ref, v1.EventTypeNormal, "Reason-"+string(i), strconv.Itoa(i))
}
// Make sure no events were dropped by either of the listeners.
for i := 0; i < maxQueuedEvents; i++ {
<-recorderCalled
<-loggerCalled
}
// Make sure that every event was attempted 5 times
for i := 0; i < maxQueuedEvents; i++ {
if counts[i] < 5 {
t.Errorf("Only attempted to record event '%d' %d times.", i, counts[i])
}
}
sinkWatcher.Stop()
logWatcher.Stop()
}
func TestEventfNoNamespace(t *testing.T) {
testPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
SelfLink: "/api/version/pods/foo",
Name: "foo",
UID: "bar",
},
}
testRef, err := ref.GetPartialReference(scheme.Scheme, testPod, "spec.containers[2]")
if err != nil {
t.Fatal(err)
}
table := []struct {
obj k8sruntime.Object
eventtype string
reason string
messageFmt string
elements []interface{}
expect *v1.Event
expectLog string
expectUpdate bool
}{
{
obj: testRef,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "default",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "",
UID: "bar",
APIVersion: "version",
FieldPath: "spec.containers[2]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: false,
},
}
testCache := map[string]*v1.Event{}
logCalled := make(chan struct{})
createEvent := make(chan *v1.Event)
updateEvent := make(chan *v1.Event)
patchEvent := make(chan *v1.Event)
testEvents := testEventSink{
OnCreate: OnCreateFactory(testCache, createEvent),
OnUpdate: func(event *v1.Event) (*v1.Event, error) {
updateEvent <- event
return event, nil
},
OnPatch: OnPatchFactory(testCache, patchEvent),
}
eventBroadcaster := NewBroadcasterForTests(0)
sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents)
clock := clock.NewFakeClock(time.Now())
recorder := recorderWithFakeClock(v1.EventSource{Component: "eventTest"}, eventBroadcaster, clock)
for index, item := range table {
clock.Step(1 * time.Second)
logWatcher := eventBroadcaster.StartLogging(func(formatter string, args ...interface{}) {
if e, a := item.expectLog, fmt.Sprintf(formatter, args...); e != a {
t.Errorf("Expected '%v', got '%v'", e, a)
}
logCalled <- struct{}{}
})
recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...)
<-logCalled
// validate event
if item.expectUpdate {
actualEvent := <-patchEvent
validateEvent(strconv.Itoa(index), actualEvent, item.expect, t)
} else {
actualEvent := <-createEvent
validateEvent(strconv.Itoa(index), actualEvent, item.expect, t)
}
logWatcher.Stop()
}
sinkWatcher.Stop()
}
func TestMultiSinkCache(t *testing.T) {
testPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
SelfLink: "/api/version/pods/foo",
Name: "foo",
Namespace: "baz",
UID: "bar",
},
}
testPod2 := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
SelfLink: "/api/version/pods/foo",
Name: "foo",
Namespace: "baz",
UID: "differentUid",
},
}
testRef, err := ref.GetPartialReference(scheme.Scheme, testPod, "spec.containers[2]")
if err != nil {
t.Fatal(err)
}
testRef2, err := ref.GetPartialReference(scheme.Scheme, testPod2, "spec.containers[3]")
if err != nil {
t.Fatal(err)
}
table := []struct {
obj k8sruntime.Object
eventtype string
reason string
messageFmt string
elements []interface{}
expect *v1.Event
expectLog string
expectUpdate bool
}{
{
obj: testRef,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "bar",
APIVersion: "version",
FieldPath: "spec.containers[2]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testPod,
eventtype: v1.EventTypeNormal,
reason: "Killed",
messageFmt: "some other verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "bar",
APIVersion: "version",
},
Reason: "Killed",
Message: "some other verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'Killed' some other verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "bar",
APIVersion: "version",
FieldPath: "spec.containers[2]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 2,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: true,
},
{
obj: testRef2,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "differentUid",
APIVersion: "version",
FieldPath: "spec.containers[3]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef,
eventtype: v1.EventTypeNormal,
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "bar",
APIVersion: "version",
FieldPath: "spec.containers[2]",
},
Reason: "Started",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 3,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[2]"}): type: 'Normal' reason: 'Started' some verbose message: 1`,
expectUpdate: true,
},
{
obj: testRef2,
eventtype: v1.EventTypeNormal,
reason: "Stopped",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "differentUid",
APIVersion: "version",
FieldPath: "spec.containers[3]",
},
Reason: "Stopped",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 1,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`,
expectUpdate: false,
},
{
obj: testRef2,
eventtype: v1.EventTypeNormal,
reason: "Stopped",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
expect: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "baz",
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "foo",
Namespace: "baz",
UID: "differentUid",
APIVersion: "version",
FieldPath: "spec.containers[3]",
},
Reason: "Stopped",
Message: "some verbose message: 1",
Source: v1.EventSource{Component: "eventTest"},
Count: 2,
Type: v1.EventTypeNormal,
},
expectLog: `Event(v1.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"differentUid", APIVersion:"version", ResourceVersion:"", FieldPath:"spec.containers[3]"}): type: 'Normal' reason: 'Stopped' some verbose message: 1`,
expectUpdate: true,
},
}
testCache := map[string]*v1.Event{}
createEvent := make(chan *v1.Event)
updateEvent := make(chan *v1.Event)
patchEvent := make(chan *v1.Event)
testEvents := testEventSink{
OnCreate: OnCreateFactory(testCache, createEvent),
OnUpdate: func(event *v1.Event) (*v1.Event, error) {
updateEvent <- event
return event, nil
},
OnPatch: OnPatchFactory(testCache, patchEvent),
}
testCache2 := map[string]*v1.Event{}
createEvent2 := make(chan *v1.Event)
updateEvent2 := make(chan *v1.Event)
patchEvent2 := make(chan *v1.Event)
testEvents2 := testEventSink{
OnCreate: OnCreateFactory(testCache2, createEvent2),
OnUpdate: func(event *v1.Event) (*v1.Event, error) {
updateEvent2 <- event
return event, nil
},
OnPatch: OnPatchFactory(testCache2, patchEvent2),
}
eventBroadcaster := NewBroadcasterForTests(0)
clock := clock.NewFakeClock(time.Now())
recorder := recorderWithFakeClock(v1.EventSource{Component: "eventTest"}, eventBroadcaster, clock)
sinkWatcher := eventBroadcaster.StartRecordingToSink(&testEvents)
for index, item := range table {
clock.Step(1 * time.Second)
recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...)
// validate event
if item.expectUpdate {
actualEvent := <-patchEvent
validateEvent(strconv.Itoa(index), actualEvent, item.expect, t)
} else {
actualEvent := <-createEvent
validateEvent(strconv.Itoa(index), actualEvent, item.expect, t)
}
}
// Another StartRecordingToSink call should start to record events with new clean cache.
sinkWatcher2 := eventBroadcaster.StartRecordingToSink(&testEvents2)
for index, item := range table {
clock.Step(1 * time.Second)
recorder.Eventf(item.obj, item.eventtype, item.reason, item.messageFmt, item.elements...)
// validate event
if item.expectUpdate {
actualEvent := <-patchEvent2
validateEvent(strconv.Itoa(index), actualEvent, item.expect, t)
} else {
actualEvent := <-createEvent2
validateEvent(strconv.Itoa(index), actualEvent, item.expect, t)
}
}
sinkWatcher.Stop()
sinkWatcher2.Stop()
}

View File

@ -1,462 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package record
import (
"encoding/json"
"fmt"
"strings"
"sync"
"time"
"github.com/golang/groupcache/lru"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/util/flowcontrol"
)
const (
maxLruCacheEntries = 4096
// if we see the same event that varies only by message
// more than 10 times in a 10 minute period, aggregate the event
defaultAggregateMaxEvents = 10
defaultAggregateIntervalInSeconds = 600
// by default, allow a source to send 25 events about an object
// but control the refill rate to 1 new event every 5 minutes
// this helps control the long-tail of events for things that are always
// unhealthy
defaultSpamBurst = 25
defaultSpamQPS = 1. / 300.
)
// getEventKey builds unique event key based on source, involvedObject, reason, message
func getEventKey(event *v1.Event) string {
return strings.Join([]string{
event.Source.Component,
event.Source.Host,
event.InvolvedObject.Kind,
event.InvolvedObject.Namespace,
event.InvolvedObject.Name,
event.InvolvedObject.FieldPath,
string(event.InvolvedObject.UID),
event.InvolvedObject.APIVersion,
event.Type,
event.Reason,
event.Message,
},
"")
}
// getSpamKey builds unique event key based on source, involvedObject
func getSpamKey(event *v1.Event) string {
return strings.Join([]string{
event.Source.Component,
event.Source.Host,
event.InvolvedObject.Kind,
event.InvolvedObject.Namespace,
event.InvolvedObject.Name,
string(event.InvolvedObject.UID),
event.InvolvedObject.APIVersion,
},
"")
}
// EventFilterFunc is a function that returns true if the event should be skipped
type EventFilterFunc func(event *v1.Event) bool
// EventSourceObjectSpamFilter is responsible for throttling
// the amount of events a source and object can produce.
type EventSourceObjectSpamFilter struct {
sync.RWMutex
// the cache that manages last synced state
cache *lru.Cache
// burst is the amount of events we allow per source + object
burst int
// qps is the refill rate of the token bucket in queries per second
qps float32
// clock is used to allow for testing over a time interval
clock clock.Clock
}
// NewEventSourceObjectSpamFilter allows burst events from a source about an object with the specified qps refill.
func NewEventSourceObjectSpamFilter(lruCacheSize, burst int, qps float32, clock clock.Clock) *EventSourceObjectSpamFilter {
return &EventSourceObjectSpamFilter{
cache: lru.New(lruCacheSize),
burst: burst,
qps: qps,
clock: clock,
}
}
// spamRecord holds data used to perform spam filtering decisions.
type spamRecord struct {
// rateLimiter controls the rate of events about this object
rateLimiter flowcontrol.RateLimiter
}
// Filter controls that a given source+object are not exceeding the allowed rate.
func (f *EventSourceObjectSpamFilter) Filter(event *v1.Event) bool {
var record spamRecord
// controls our cached information about this event (source+object)
eventKey := getSpamKey(event)
// do we have a record of similar events in our cache?
f.Lock()
defer f.Unlock()
value, found := f.cache.Get(eventKey)
if found {
record = value.(spamRecord)
}
// verify we have a rate limiter for this record
if record.rateLimiter == nil {
record.rateLimiter = flowcontrol.NewTokenBucketRateLimiterWithClock(f.qps, f.burst, f.clock)
}
// ensure we have available rate
filter := !record.rateLimiter.TryAccept()
// update the cache
f.cache.Add(eventKey, record)
return filter
}
// EventAggregatorKeyFunc is responsible for grouping events for aggregation
// It returns a tuple of the following:
// aggregateKey - key the identifies the aggregate group to bucket this event
// localKey - key that makes this event in the local group
type EventAggregatorKeyFunc func(event *v1.Event) (aggregateKey string, localKey string)
// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type and event.Reason
func EventAggregatorByReasonFunc(event *v1.Event) (string, string) {
return strings.Join([]string{
event.Source.Component,
event.Source.Host,
event.InvolvedObject.Kind,
event.InvolvedObject.Namespace,
event.InvolvedObject.Name,
string(event.InvolvedObject.UID),
event.InvolvedObject.APIVersion,
event.Type,
event.Reason,
},
""), event.Message
}
// EventAggregatorMessageFunc is responsible for producing an aggregation message
type EventAggregatorMessageFunc func(event *v1.Event) string
// EventAggregratorByReasonMessageFunc returns an aggregate message by prefixing the incoming message
func EventAggregatorByReasonMessageFunc(event *v1.Event) string {
return "(combined from similar events): " + event.Message
}
// EventAggregator identifies similar events and aggregates them into a single event
type EventAggregator struct {
sync.RWMutex
// The cache that manages aggregation state
cache *lru.Cache
// The function that groups events for aggregation
keyFunc EventAggregatorKeyFunc
// The function that generates a message for an aggregate event
messageFunc EventAggregatorMessageFunc
// The maximum number of events in the specified interval before aggregation occurs
maxEvents uint
// The amount of time in seconds that must transpire since the last occurrence of a similar event before it's considered new
maxIntervalInSeconds uint
// clock is used to allow for testing over a time interval
clock clock.Clock
}
// NewEventAggregator returns a new instance of an EventAggregator
func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messageFunc EventAggregatorMessageFunc,
maxEvents int, maxIntervalInSeconds int, clock clock.Clock) *EventAggregator {
return &EventAggregator{
cache: lru.New(lruCacheSize),
keyFunc: keyFunc,
messageFunc: messageFunc,
maxEvents: uint(maxEvents),
maxIntervalInSeconds: uint(maxIntervalInSeconds),
clock: clock,
}
}
// aggregateRecord holds data used to perform aggregation decisions
type aggregateRecord struct {
// we track the number of unique local keys we have seen in the aggregate set to know when to actually aggregate
// if the size of this set exceeds the max, we know we need to aggregate
localKeys sets.String
// The last time at which the aggregate was recorded
lastTimestamp metav1.Time
}
// EventAggregate checks if a similar event has been seen according to the
// aggregation configuration (max events, max interval, etc) and returns:
//
// - The (potentially modified) event that should be created
// - The cache key for the event, for correlation purposes. This will be set to
// the full key for normal events, and to the result of
// EventAggregatorMessageFunc for aggregate events.
func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, string) {
now := metav1.NewTime(e.clock.Now())
var record aggregateRecord
// eventKey is the full cache key for this event
eventKey := getEventKey(newEvent)
// aggregateKey is for the aggregate event, if one is needed.
aggregateKey, localKey := e.keyFunc(newEvent)
// Do we have a record of similar events in our cache?
e.Lock()
defer e.Unlock()
value, found := e.cache.Get(aggregateKey)
if found {
record = value.(aggregateRecord)
}
// Is the previous record too old? If so, make a fresh one. Note: if we didn't
// find a similar record, its lastTimestamp will be the zero value, so we
// create a new one in that case.
maxInterval := time.Duration(e.maxIntervalInSeconds) * time.Second
interval := now.Time.Sub(record.lastTimestamp.Time)
if interval > maxInterval {
record = aggregateRecord{localKeys: sets.NewString()}
}
// Write the new event into the aggregation record and put it on the cache
record.localKeys.Insert(localKey)
record.lastTimestamp = now
e.cache.Add(aggregateKey, record)
// If we are not yet over the threshold for unique events, don't correlate them
if uint(record.localKeys.Len()) < e.maxEvents {
return newEvent, eventKey
}
// do not grow our local key set any larger than max
record.localKeys.PopAny()
// create a new aggregate event, and return the aggregateKey as the cache key
// (so that it can be overwritten.)
eventCopy := &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%v.%x", newEvent.InvolvedObject.Name, now.UnixNano()),
Namespace: newEvent.Namespace,
},
Count: 1,
FirstTimestamp: now,
InvolvedObject: newEvent.InvolvedObject,
LastTimestamp: now,
Message: e.messageFunc(newEvent),
Type: newEvent.Type,
Reason: newEvent.Reason,
Source: newEvent.Source,
}
return eventCopy, aggregateKey
}
// eventLog records data about when an event was observed
type eventLog struct {
// The number of times the event has occurred since first occurrence.
count uint
// The time at which the event was first recorded.
firstTimestamp metav1.Time
// The unique name of the first occurrence of this event
name string
// Resource version returned from previous interaction with server
resourceVersion string
}
// eventLogger logs occurrences of an event
type eventLogger struct {
sync.RWMutex
cache *lru.Cache
clock clock.Clock
}
// newEventLogger observes events and counts their frequencies
func newEventLogger(lruCacheEntries int, clock clock.Clock) *eventLogger {
return &eventLogger{cache: lru.New(lruCacheEntries), clock: clock}
}
// eventObserve records an event, or updates an existing one if key is a cache hit
func (e *eventLogger) eventObserve(newEvent *v1.Event, key string) (*v1.Event, []byte, error) {
var (
patch []byte
err error
)
eventCopy := *newEvent
event := &eventCopy
e.Lock()
defer e.Unlock()
// Check if there is an existing event we should update
lastObservation := e.lastEventObservationFromCache(key)
// If we found a result, prepare a patch
if lastObservation.count > 0 {
// update the event based on the last observation so patch will work as desired
event.Name = lastObservation.name
event.ResourceVersion = lastObservation.resourceVersion
event.FirstTimestamp = lastObservation.firstTimestamp
event.Count = int32(lastObservation.count) + 1
eventCopy2 := *event
eventCopy2.Count = 0
eventCopy2.LastTimestamp = metav1.NewTime(time.Unix(0, 0))
eventCopy2.Message = ""
newData, _ := json.Marshal(event)
oldData, _ := json.Marshal(eventCopy2)
patch, err = strategicpatch.CreateTwoWayMergePatch(oldData, newData, event)
}
// record our new observation
e.cache.Add(
key,
eventLog{
count: uint(event.Count),
firstTimestamp: event.FirstTimestamp,
name: event.Name,
resourceVersion: event.ResourceVersion,
},
)
return event, patch, err
}
// updateState updates its internal tracking information based on latest server state
func (e *eventLogger) updateState(event *v1.Event) {
key := getEventKey(event)
e.Lock()
defer e.Unlock()
// record our new observation
e.cache.Add(
key,
eventLog{
count: uint(event.Count),
firstTimestamp: event.FirstTimestamp,
name: event.Name,
resourceVersion: event.ResourceVersion,
},
)
}
// lastEventObservationFromCache returns the event from the cache, reads must be protected via external lock
func (e *eventLogger) lastEventObservationFromCache(key string) eventLog {
value, ok := e.cache.Get(key)
if ok {
observationValue, ok := value.(eventLog)
if ok {
return observationValue
}
}
return eventLog{}
}
// EventCorrelator processes all incoming events and performs analysis to avoid overwhelming the system. It can filter all
// incoming events to see if the event should be filtered from further processing. It can aggregate similar events that occur
// frequently to protect the system from spamming events that are difficult for users to distinguish. It performs de-duplication
// to ensure events that are observed multiple times are compacted into a single event with increasing counts.
type EventCorrelator struct {
// the function to filter the event
filterFunc EventFilterFunc
// the object that performs event aggregation
aggregator *EventAggregator
// the object that observes events as they come through
logger *eventLogger
}
// EventCorrelateResult is the result of a Correlate
type EventCorrelateResult struct {
// the event after correlation
Event *v1.Event
// if provided, perform a strategic patch when updating the record on the server
Patch []byte
// if true, do no further processing of the event
Skip bool
}
// NewEventCorrelator returns an EventCorrelator configured with default values.
//
// The EventCorrelator is responsible for event filtering, aggregating, and counting
// prior to interacting with the API server to record the event.
//
// The default behavior is as follows:
// * Aggregation is performed if a similar event is recorded 10 times in a
// in a 10 minute rolling interval. A similar event is an event that varies only by
// the Event.Message field. Rather than recording the precise event, aggregation
// will create a new event whose message reports that it has combined events with
// the same reason.
// * Events are incrementally counted if the exact same event is encountered multiple
// times.
// * A source may burst 25 events about an object, but has a refill rate budget
// per object of 1 event every 5 minutes to control long-tail of spam.
func NewEventCorrelator(clock clock.Clock) *EventCorrelator {
cacheSize := maxLruCacheEntries
spamFilter := NewEventSourceObjectSpamFilter(cacheSize, defaultSpamBurst, defaultSpamQPS, clock)
return &EventCorrelator{
filterFunc: spamFilter.Filter,
aggregator: NewEventAggregator(
cacheSize,
EventAggregatorByReasonFunc,
EventAggregatorByReasonMessageFunc,
defaultAggregateMaxEvents,
defaultAggregateIntervalInSeconds,
clock),
logger: newEventLogger(cacheSize, clock),
}
}
// EventCorrelate filters, aggregates, counts, and de-duplicates all incoming events
func (c *EventCorrelator) EventCorrelate(newEvent *v1.Event) (*EventCorrelateResult, error) {
if newEvent == nil {
return nil, fmt.Errorf("event is nil")
}
aggregateEvent, ckey := c.aggregator.EventAggregate(newEvent)
observedEvent, patch, err := c.logger.eventObserve(aggregateEvent, ckey)
if c.filterFunc(observedEvent) {
return &EventCorrelateResult{Skip: true}, nil
}
return &EventCorrelateResult{Event: observedEvent, Patch: patch}, err
}
// UpdateState based on the latest observed state from server
func (c *EventCorrelator) UpdateState(event *v1.Event) {
c.logger.updateState(event)
}

View File

@ -1,279 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package record
import (
"reflect"
"strings"
"testing"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/diff"
)
func makeObjectReference(kind, name, namespace string) v1.ObjectReference {
return v1.ObjectReference{
Kind: kind,
Name: name,
Namespace: namespace,
UID: "C934D34AFB20242",
APIVersion: "version",
FieldPath: "spec.containers{mycontainer}",
}
}
func makeEvent(reason, message string, involvedObject v1.ObjectReference) v1.Event {
eventTime := metav1.Now()
event := v1.Event{
Reason: reason,
Message: message,
InvolvedObject: involvedObject,
Source: v1.EventSource{
Component: "kubelet",
Host: "kublet.node1",
},
Count: 1,
FirstTimestamp: eventTime,
LastTimestamp: eventTime,
Type: v1.EventTypeNormal,
}
return event
}
func makeEvents(num int, template v1.Event) []v1.Event {
events := []v1.Event{}
for i := 0; i < num; i++ {
events = append(events, template)
}
return events
}
func makeUniqueEvents(num int) []v1.Event {
events := []v1.Event{}
kind := "Pod"
for i := 0; i < num; i++ {
reason := strings.Join([]string{"reason", string(i)}, "-")
message := strings.Join([]string{"message", string(i)}, "-")
name := strings.Join([]string{"pod", string(i)}, "-")
namespace := strings.Join([]string{"ns", string(i)}, "-")
involvedObject := makeObjectReference(kind, name, namespace)
events = append(events, makeEvent(reason, message, involvedObject))
}
return events
}
func makeSimilarEvents(num int, template v1.Event, messagePrefix string) []v1.Event {
events := makeEvents(num, template)
for i := range events {
events[i].Message = strings.Join([]string{messagePrefix, string(i), events[i].Message}, "-")
}
return events
}
func setCount(event v1.Event, count int) v1.Event {
event.Count = int32(count)
return event
}
func validateEvent(messagePrefix string, actualEvent *v1.Event, expectedEvent *v1.Event, t *testing.T) (*v1.Event, error) {
recvEvent := *actualEvent
expectCompression := expectedEvent.Count > 1
t.Logf("%v - expectedEvent.Count is %d\n", messagePrefix, expectedEvent.Count)
// Just check that the timestamp was set.
if recvEvent.FirstTimestamp.IsZero() || recvEvent.LastTimestamp.IsZero() {
t.Errorf("%v - timestamp wasn't set: %#v", messagePrefix, recvEvent)
}
actualFirstTimestamp := recvEvent.FirstTimestamp
actualLastTimestamp := recvEvent.LastTimestamp
if actualFirstTimestamp.Equal(&actualLastTimestamp) {
if expectCompression {
t.Errorf("%v - FirstTimestamp (%q) and LastTimestamp (%q) must be different to indicate event compression happened, but were the same. Actual Event: %#v", messagePrefix, actualFirstTimestamp, actualLastTimestamp, recvEvent)
}
} else {
if expectedEvent.Count == 1 {
t.Errorf("%v - FirstTimestamp (%q) and LastTimestamp (%q) must be equal to indicate only one occurrence of the event, but were different. Actual Event: %#v", messagePrefix, actualFirstTimestamp, actualLastTimestamp, recvEvent)
}
}
// Temp clear time stamps for comparison because actual values don't matter for comparison
recvEvent.FirstTimestamp = expectedEvent.FirstTimestamp
recvEvent.LastTimestamp = expectedEvent.LastTimestamp
// Check that name has the right prefix.
if n, en := recvEvent.Name, expectedEvent.Name; !strings.HasPrefix(n, en) {
t.Errorf("%v - Name '%v' does not contain prefix '%v'", messagePrefix, n, en)
}
recvEvent.Name = expectedEvent.Name
if e, a := expectedEvent, &recvEvent; !reflect.DeepEqual(e, a) {
t.Errorf("%v - diff: %s", messagePrefix, diff.ObjectGoPrintDiff(e, a))
}
recvEvent.FirstTimestamp = actualFirstTimestamp
recvEvent.LastTimestamp = actualLastTimestamp
return actualEvent, nil
}
// TestEventAggregatorByReasonFunc ensures that two events are aggregated if they vary only by event.message
func TestEventAggregatorByReasonFunc(t *testing.T) {
event1 := makeEvent("end-of-world", "it was fun", makeObjectReference("Pod", "pod1", "other"))
event2 := makeEvent("end-of-world", "it was awful", makeObjectReference("Pod", "pod1", "other"))
event3 := makeEvent("nevermind", "it was a bug", makeObjectReference("Pod", "pod1", "other"))
aggKey1, localKey1 := EventAggregatorByReasonFunc(&event1)
aggKey2, localKey2 := EventAggregatorByReasonFunc(&event2)
aggKey3, _ := EventAggregatorByReasonFunc(&event3)
if aggKey1 != aggKey2 {
t.Errorf("Expected %v equal %v", aggKey1, aggKey2)
}
if localKey1 == localKey2 {
t.Errorf("Expected %v to not equal %v", aggKey1, aggKey3)
}
if aggKey1 == aggKey3 {
t.Errorf("Expected %v to not equal %v", aggKey1, aggKey3)
}
}
// TestEventAggregatorByReasonMessageFunc validates the proper output for an aggregate message
func TestEventAggregatorByReasonMessageFunc(t *testing.T) {
expectedPrefix := "(combined from similar events): "
event1 := makeEvent("end-of-world", "it was fun", makeObjectReference("Pod", "pod1", "other"))
actual := EventAggregatorByReasonMessageFunc(&event1)
if !strings.HasPrefix(actual, expectedPrefix) {
t.Errorf("Expected %v to begin with prefix %v", actual, expectedPrefix)
}
}
// TestEventCorrelator validates proper counting, aggregation of events
func TestEventCorrelator(t *testing.T) {
firstEvent := makeEvent("first", "i am first", makeObjectReference("Pod", "my-pod", "my-ns"))
duplicateEvent := makeEvent("duplicate", "me again", makeObjectReference("Pod", "my-pod", "my-ns"))
uniqueEvent := makeEvent("unique", "snowflake", makeObjectReference("Pod", "my-pod", "my-ns"))
similarEvent := makeEvent("similar", "similar message", makeObjectReference("Pod", "my-pod", "my-ns"))
similarEvent.InvolvedObject.FieldPath = "spec.containers{container1}"
aggregateEvent := makeEvent(similarEvent.Reason, EventAggregatorByReasonMessageFunc(&similarEvent), similarEvent.InvolvedObject)
similarButDifferentContainerEvent := similarEvent
similarButDifferentContainerEvent.InvolvedObject.FieldPath = "spec.containers{container2}"
scenario := map[string]struct {
previousEvents []v1.Event
newEvent v1.Event
expectedEvent v1.Event
intervalSeconds int
expectedSkip bool
}{
"create-a-single-event": {
previousEvents: []v1.Event{},
newEvent: firstEvent,
expectedEvent: setCount(firstEvent, 1),
intervalSeconds: 5,
},
"the-same-event-should-just-count": {
previousEvents: makeEvents(1, duplicateEvent),
newEvent: duplicateEvent,
expectedEvent: setCount(duplicateEvent, 2),
intervalSeconds: 5,
},
"the-same-event-should-just-count-even-if-more-than-aggregate": {
previousEvents: makeEvents(defaultAggregateMaxEvents, duplicateEvent),
newEvent: duplicateEvent,
expectedEvent: setCount(duplicateEvent, defaultAggregateMaxEvents+1),
intervalSeconds: 30, // larger interval induces aggregation but not spam.
},
"the-same-event-is-spam-if-happens-too-frequently": {
previousEvents: makeEvents(defaultSpamBurst+1, duplicateEvent),
newEvent: duplicateEvent,
expectedSkip: true,
intervalSeconds: 1,
},
"create-many-unique-events": {
previousEvents: makeUniqueEvents(30),
newEvent: uniqueEvent,
expectedEvent: setCount(uniqueEvent, 1),
intervalSeconds: 5,
},
"similar-events-should-aggregate-event": {
previousEvents: makeSimilarEvents(defaultAggregateMaxEvents-1, similarEvent, similarEvent.Message),
newEvent: similarEvent,
expectedEvent: setCount(aggregateEvent, 1),
intervalSeconds: 5,
},
"similar-events-many-times-should-count-the-aggregate": {
previousEvents: makeSimilarEvents(defaultAggregateMaxEvents, similarEvent, similarEvent.Message),
newEvent: similarEvent,
expectedEvent: setCount(aggregateEvent, 2),
intervalSeconds: 5,
},
"events-from-different-containers-do-not-aggregate": {
previousEvents: makeEvents(1, similarButDifferentContainerEvent),
newEvent: similarEvent,
expectedEvent: setCount(similarEvent, 1),
intervalSeconds: 5,
},
"similar-events-whose-interval-is-greater-than-aggregate-interval-do-not-aggregate": {
previousEvents: makeSimilarEvents(defaultAggregateMaxEvents-1, similarEvent, similarEvent.Message),
newEvent: similarEvent,
expectedEvent: setCount(similarEvent, 1),
intervalSeconds: defaultAggregateIntervalInSeconds,
},
}
for testScenario, testInput := range scenario {
eventInterval := time.Duration(testInput.intervalSeconds) * time.Second
clock := clock.IntervalClock{Time: time.Now(), Duration: eventInterval}
correlator := NewEventCorrelator(&clock)
for i := range testInput.previousEvents {
event := testInput.previousEvents[i]
now := metav1.NewTime(clock.Now())
event.FirstTimestamp = now
event.LastTimestamp = now
result, err := correlator.EventCorrelate(&event)
if err != nil {
t.Errorf("scenario %v: unexpected error playing back prevEvents %v", testScenario, err)
}
// if we are skipping the event, we can avoid updating state
if !result.Skip {
correlator.UpdateState(result.Event)
}
}
// update the input to current clock value
now := metav1.NewTime(clock.Now())
testInput.newEvent.FirstTimestamp = now
testInput.newEvent.LastTimestamp = now
result, err := correlator.EventCorrelate(&testInput.newEvent)
if err != nil {
t.Errorf("scenario %v: unexpected error correlating input event %v", testScenario, err)
}
// verify we did not get skip from filter function unexpectedly...
if result.Skip != testInput.expectedSkip {
t.Errorf("scenario %v: expected skip %v, but got %v", testScenario, testInput.expectedSkip, result.Skip)
continue
}
// we wanted to actually skip, so no event is needed to validate
if testInput.expectedSkip {
continue
}
// validate event
_, err = validateEvent(testScenario, result.Event, &testInput.expectedEvent, t)
if err != nil {
t.Errorf("scenario %v: unexpected error validating result %v", testScenario, err)
}
}
}

View File

@ -1,58 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package record
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// FakeRecorder is used as a fake during tests. It is thread safe. It is usable
// when created manually and not by NewFakeRecorder, however all events may be
// thrown away in this case.
type FakeRecorder struct {
Events chan string
}
func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) {
if f.Events != nil {
f.Events <- fmt.Sprintf("%s %s %s", eventtype, reason, message)
}
}
func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
if f.Events != nil {
f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...)
}
}
func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) {
}
func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) {
f.Eventf(object, eventtype, reason, messageFmt, args)
}
// NewFakeRecorder creates new fake event recorder with event channel with
// buffer of given size.
func NewFakeRecorder(bufferSize int) *FakeRecorder {
return &FakeRecorder{
Events: make(chan string, bufferSize),
}
}

View File

@ -1,72 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reference
import (
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
type TestRuntimeObj struct {
metav1.TypeMeta
metav1.ObjectMeta
}
func (o *TestRuntimeObj) DeepCopyObject() runtime.Object {
panic("die")
}
func TestGetReferenceRefVersion(t *testing.T) {
tests := []struct {
name string
input *TestRuntimeObj
expectedRefVersion string
}{
{
name: "api from selflink",
input: &TestRuntimeObj{
ObjectMeta: metav1.ObjectMeta{SelfLink: "/api/v1/namespaces"},
},
expectedRefVersion: "v1",
},
{
name: "foo.group/v3 from selflink",
input: &TestRuntimeObj{
ObjectMeta: metav1.ObjectMeta{SelfLink: "/apis/foo.group/v3/namespaces"},
},
expectedRefVersion: "foo.group/v3",
},
}
scheme := runtime.NewScheme()
scheme.AddKnownTypes(schema.GroupVersion{Group: "this", Version: "is ignored"}, &TestRuntimeObj{})
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ref, err := GetReference(scheme, test.input)
if err != nil {
t.Fatal(err)
}
if test.expectedRefVersion != ref.APIVersion {
t.Errorf("expected %q, got %q", test.expectedRefVersion, ref.APIVersion)
}
})
}
}

View File

@ -1,20 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package remotecommand adds support for executing commands in containers,
// with support for separate stdin, stdout, and stderr streams, as well as
// TTY.
package remotecommand // import "k8s.io/client-go/tools/remotecommand"

View File

@ -1,55 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"fmt"
"io"
"io/ioutil"
"k8s.io/apimachinery/pkg/util/runtime"
)
// errorStreamDecoder interprets the data on the error channel and creates a go error object from it.
type errorStreamDecoder interface {
decode(message []byte) error
}
// watchErrorStream watches the errorStream for remote command error data,
// decodes it with the given errorStreamDecoder, sends the decoded error (or nil if the remote
// command exited successfully) to the returned error channel, and closes it.
// This function returns immediately.
func watchErrorStream(errorStream io.Reader, d errorStreamDecoder) chan error {
errorChan := make(chan error)
go func() {
defer runtime.HandleCrash()
message, err := ioutil.ReadAll(errorStream)
switch {
case err != nil && err != io.EOF:
errorChan <- fmt.Errorf("error reading from error stream: %s", err)
case len(message) > 0:
errorChan <- d.decode(message)
default:
errorChan <- nil
}
close(errorChan)
}()
return errorChan
}

View File

@ -1,41 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"io"
)
// readerWrapper delegates to an io.Reader so that only the io.Reader interface is implemented,
// to keep io.Copy from doing things we don't want when copying from the reader to the data stream.
//
// If the Stdin io.Reader provided to remotecommand implements a WriteTo function (like bytes.Buffer does[1]),
// io.Copy calls that method[2] to attempt to write the entire buffer to the stream in one call.
// That results in an oversized call to spdystream.Stream#Write [3],
// which results in a single oversized data frame[4] that is too large.
//
// [1] https://golang.org/pkg/bytes/#Buffer.WriteTo
// [2] https://golang.org/pkg/io/#Copy
// [3] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/stream.go#L66-L73
// [4] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/spdy/write.go#L302-L304
type readerWrapper struct {
reader io.Reader
}
func (r readerWrapper) Read(p []byte) (int, error) {
return r.reader.Read(p)
}

View File

@ -1,142 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"fmt"
"io"
"net/http"
"net/url"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/remotecommand"
restclient "k8s.io/client-go/rest"
spdy "k8s.io/client-go/transport/spdy"
)
// StreamOptions holds information pertaining to the current streaming session:
// input/output streams, if the client is requesting a TTY, and a terminal size queue to
// support terminal resizing.
type StreamOptions struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
Tty bool
TerminalSizeQueue TerminalSizeQueue
}
// Executor is an interface for transporting shell-style streams.
type Executor interface {
// Stream initiates the transport of the standard shell streams. It will transport any
// non-nil stream to a remote system, and return an error if a problem occurs. If tty
// is set, the stderr stream is not used (raw TTY manages stdout and stderr over the
// stdout stream).
Stream(options StreamOptions) error
}
type streamCreator interface {
CreateStream(headers http.Header) (httpstream.Stream, error)
}
type streamProtocolHandler interface {
stream(conn streamCreator) error
}
// streamExecutor handles transporting standard shell streams over an httpstream connection.
type streamExecutor struct {
upgrader spdy.Upgrader
transport http.RoundTripper
method string
url *url.URL
protocols []string
}
// NewSPDYExecutor connects to the provided server and upgrades the connection to
// multiplexed bidirectional streams.
func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) {
wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config)
if err != nil {
return nil, err
}
return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url)
}
// NewSPDYExecutorForTransports connects to the provided server using the given transport,
// upgrades the response using the given upgrader to multiplexed bidirectional streams.
func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) {
return NewSPDYExecutorForProtocols(
transport, upgrader, method, url,
remotecommand.StreamProtocolV4Name,
remotecommand.StreamProtocolV3Name,
remotecommand.StreamProtocolV2Name,
remotecommand.StreamProtocolV1Name,
)
}
// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to
// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most
// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports.
func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) {
return &streamExecutor{
upgrader: upgrader,
transport: transport,
method: method,
url: url,
protocols: protocols,
}, nil
}
// Stream opens a protocol streamer to the server and streams until a client closes
// the connection or the server disconnects.
func (e *streamExecutor) Stream(options StreamOptions) error {
req, err := http.NewRequest(e.method, e.url.String(), nil)
if err != nil {
return fmt.Errorf("error creating request: %v", err)
}
conn, protocol, err := spdy.Negotiate(
e.upgrader,
&http.Client{Transport: e.transport},
req,
e.protocols...,
)
if err != nil {
return err
}
defer conn.Close()
var streamer streamProtocolHandler
switch protocol {
case remotecommand.StreamProtocolV4Name:
streamer = newStreamProtocolV4(options)
case remotecommand.StreamProtocolV3Name:
streamer = newStreamProtocolV3(options)
case remotecommand.StreamProtocolV2Name:
streamer = newStreamProtocolV2(options)
case "":
klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name)
fallthrough
case remotecommand.StreamProtocolV1Name:
streamer = newStreamProtocolV1(options)
}
return streamer.stream(conn)
}

View File

@ -1,33 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
// TerminalSize and TerminalSizeQueue was a part of k8s.io/kubernetes/pkg/util/term
// and were moved in order to decouple client from other term dependencies
// TerminalSize represents the width and height of a terminal.
type TerminalSize struct {
Width uint16
Height uint16
}
// TerminalSizeQueue is capable of returning terminal resize events as they occur.
type TerminalSizeQueue interface {
// Next returns the new terminal size after the terminal has been resized. It returns nil when
// monitoring has been stopped.
Next() *TerminalSize
}

View File

@ -1,160 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/klog"
)
// streamProtocolV1 implements the first version of the streaming exec & attach
// protocol. This version has some bugs, such as not being able to detect when
// non-interactive stdin data has ended. See http://issues.k8s.io/13394 and
// http://issues.k8s.io/13395 for more details.
type streamProtocolV1 struct {
StreamOptions
errorStream httpstream.Stream
remoteStdin httpstream.Stream
remoteStdout httpstream.Stream
remoteStderr httpstream.Stream
}
var _ streamProtocolHandler = &streamProtocolV1{}
func newStreamProtocolV1(options StreamOptions) streamProtocolHandler {
return &streamProtocolV1{
StreamOptions: options,
}
}
func (p *streamProtocolV1) stream(conn streamCreator) error {
doneChan := make(chan struct{}, 2)
errorChan := make(chan error)
cp := func(s string, dst io.Writer, src io.Reader) {
klog.V(6).Infof("Copying %s", s)
defer klog.V(6).Infof("Done copying %s", s)
if _, err := io.Copy(dst, src); err != nil && err != io.EOF {
klog.Errorf("Error copying %s: %v", s, err)
}
if s == v1.StreamTypeStdout || s == v1.StreamTypeStderr {
doneChan <- struct{}{}
}
}
// set up all the streams first
var err error
headers := http.Header{}
headers.Set(v1.StreamType, v1.StreamTypeError)
p.errorStream, err = conn.CreateStream(headers)
if err != nil {
return err
}
defer p.errorStream.Reset()
// Create all the streams first, then start the copy goroutines. The server doesn't start its copy
// goroutines until it's received all of the streams. If the client creates the stdin stream and
// immediately begins copying stdin data to the server, it's possible to overwhelm and wedge the
// spdy frame handler in the server so that it is full of unprocessed frames. The frames aren't
// getting processed because the server hasn't started its copying, and it won't do that until it
// gets all the streams. By creating all the streams first, we ensure that the server is ready to
// process data before the client starts sending any. See https://issues.k8s.io/16373 for more info.
if p.Stdin != nil {
headers.Set(v1.StreamType, v1.StreamTypeStdin)
p.remoteStdin, err = conn.CreateStream(headers)
if err != nil {
return err
}
defer p.remoteStdin.Reset()
}
if p.Stdout != nil {
headers.Set(v1.StreamType, v1.StreamTypeStdout)
p.remoteStdout, err = conn.CreateStream(headers)
if err != nil {
return err
}
defer p.remoteStdout.Reset()
}
if p.Stderr != nil && !p.Tty {
headers.Set(v1.StreamType, v1.StreamTypeStderr)
p.remoteStderr, err = conn.CreateStream(headers)
if err != nil {
return err
}
defer p.remoteStderr.Reset()
}
// now that all the streams have been created, proceed with reading & copying
// always read from errorStream
go func() {
message, err := ioutil.ReadAll(p.errorStream)
if err != nil && err != io.EOF {
errorChan <- fmt.Errorf("Error reading from error stream: %s", err)
return
}
if len(message) > 0 {
errorChan <- fmt.Errorf("Error executing remote command: %s", message)
return
}
}()
if p.Stdin != nil {
// TODO this goroutine will never exit cleanly (the io.Copy never unblocks)
// because stdin is not closed until the process exits. If we try to call
// stdin.Close(), it returns no error but doesn't unblock the copy. It will
// exit when the process exits, instead.
go cp(v1.StreamTypeStdin, p.remoteStdin, readerWrapper{p.Stdin})
}
waitCount := 0
completedStreams := 0
if p.Stdout != nil {
waitCount++
go cp(v1.StreamTypeStdout, p.Stdout, p.remoteStdout)
}
if p.Stderr != nil && !p.Tty {
waitCount++
go cp(v1.StreamTypeStderr, p.Stderr, p.remoteStderr)
}
Loop:
for {
select {
case <-doneChan:
completedStreams++
if completedStreams == waitCount {
break Loop
}
case err := <-errorChan:
return err
}
}
return nil
}

View File

@ -1,195 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"sync"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/runtime"
)
// streamProtocolV2 implements version 2 of the streaming protocol for attach
// and exec. The original streaming protocol was metav1. As a result, this
// version is referred to as version 2, even though it is the first actual
// numbered version.
type streamProtocolV2 struct {
StreamOptions
errorStream io.Reader
remoteStdin io.ReadWriteCloser
remoteStdout io.Reader
remoteStderr io.Reader
}
var _ streamProtocolHandler = &streamProtocolV2{}
func newStreamProtocolV2(options StreamOptions) streamProtocolHandler {
return &streamProtocolV2{
StreamOptions: options,
}
}
func (p *streamProtocolV2) createStreams(conn streamCreator) error {
var err error
headers := http.Header{}
// set up error stream
headers.Set(v1.StreamType, v1.StreamTypeError)
p.errorStream, err = conn.CreateStream(headers)
if err != nil {
return err
}
// set up stdin stream
if p.Stdin != nil {
headers.Set(v1.StreamType, v1.StreamTypeStdin)
p.remoteStdin, err = conn.CreateStream(headers)
if err != nil {
return err
}
}
// set up stdout stream
if p.Stdout != nil {
headers.Set(v1.StreamType, v1.StreamTypeStdout)
p.remoteStdout, err = conn.CreateStream(headers)
if err != nil {
return err
}
}
// set up stderr stream
if p.Stderr != nil && !p.Tty {
headers.Set(v1.StreamType, v1.StreamTypeStderr)
p.remoteStderr, err = conn.CreateStream(headers)
if err != nil {
return err
}
}
return nil
}
func (p *streamProtocolV2) copyStdin() {
if p.Stdin != nil {
var once sync.Once
// copy from client's stdin to container's stdin
go func() {
defer runtime.HandleCrash()
// if p.stdin is noninteractive, p.g. `echo abc | kubectl exec -i <pod> -- cat`, make sure
// we close remoteStdin as soon as the copy from p.stdin to remoteStdin finishes. Otherwise
// the executed command will remain running.
defer once.Do(func() { p.remoteStdin.Close() })
if _, err := io.Copy(p.remoteStdin, readerWrapper{p.Stdin}); err != nil {
runtime.HandleError(err)
}
}()
// read from remoteStdin until the stream is closed. this is essential to
// be able to exit interactive sessions cleanly and not leak goroutines or
// hang the client's terminal.
//
// TODO we aren't using go-dockerclient any more; revisit this to determine if it's still
// required by engine-api.
//
// go-dockerclient's current hijack implementation
// (https://github.com/fsouza/go-dockerclient/blob/89f3d56d93788dfe85f864a44f85d9738fca0670/client.go#L564)
// waits for all three streams (stdin/stdout/stderr) to finish copying
// before returning. When hijack finishes copying stdout/stderr, it calls
// Close() on its side of remoteStdin, which allows this copy to complete.
// When that happens, we must Close() on our side of remoteStdin, to
// allow the copy in hijack to complete, and hijack to return.
go func() {
defer runtime.HandleCrash()
defer once.Do(func() { p.remoteStdin.Close() })
// this "copy" doesn't actually read anything - it's just here to wait for
// the server to close remoteStdin.
if _, err := io.Copy(ioutil.Discard, p.remoteStdin); err != nil {
runtime.HandleError(err)
}
}()
}
}
func (p *streamProtocolV2) copyStdout(wg *sync.WaitGroup) {
if p.Stdout == nil {
return
}
wg.Add(1)
go func() {
defer runtime.HandleCrash()
defer wg.Done()
if _, err := io.Copy(p.Stdout, p.remoteStdout); err != nil {
runtime.HandleError(err)
}
}()
}
func (p *streamProtocolV2) copyStderr(wg *sync.WaitGroup) {
if p.Stderr == nil || p.Tty {
return
}
wg.Add(1)
go func() {
defer runtime.HandleCrash()
defer wg.Done()
if _, err := io.Copy(p.Stderr, p.remoteStderr); err != nil {
runtime.HandleError(err)
}
}()
}
func (p *streamProtocolV2) stream(conn streamCreator) error {
if err := p.createStreams(conn); err != nil {
return err
}
// now that all the streams have been created, proceed with reading & copying
errorChan := watchErrorStream(p.errorStream, &errorDecoderV2{})
p.copyStdin()
var wg sync.WaitGroup
p.copyStdout(&wg)
p.copyStderr(&wg)
// we're waiting for stdout/stderr to finish copying
wg.Wait()
// waits for errorStream to finish reading with an error or nil
return <-errorChan
}
// errorDecoderV2 interprets the error channel data as plain text.
type errorDecoderV2 struct{}
func (d *errorDecoderV2) decode(message []byte) error {
return fmt.Errorf("error executing remote command: %s", message)
}

View File

@ -1,228 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"errors"
"io"
"net/http"
"strings"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/wait"
)
type fakeReader struct {
err error
}
func (r *fakeReader) Read([]byte) (int, error) { return 0, r.err }
type fakeWriter struct{}
func (*fakeWriter) Write([]byte) (int, error) { return 0, nil }
type fakeStreamCreator struct {
created map[string]bool
errors map[string]error
}
var _ streamCreator = &fakeStreamCreator{}
func (f *fakeStreamCreator) CreateStream(headers http.Header) (httpstream.Stream, error) {
streamType := headers.Get(v1.StreamType)
f.created[streamType] = true
return nil, f.errors[streamType]
}
func TestV2CreateStreams(t *testing.T) {
tests := []struct {
name string
stdin bool
stdinError error
stdout bool
stdoutError error
stderr bool
stderrError error
errorError error
tty bool
expectError bool
}{
{
name: "stdin error",
stdin: true,
stdinError: errors.New("stdin error"),
expectError: true,
},
{
name: "stdout error",
stdout: true,
stdoutError: errors.New("stdout error"),
expectError: true,
},
{
name: "stderr error",
stderr: true,
stderrError: errors.New("stderr error"),
expectError: true,
},
{
name: "error stream error",
stdin: true,
stdout: true,
stderr: true,
errorError: errors.New("error stream error"),
expectError: true,
},
{
name: "no errors",
stdin: true,
stdout: true,
stderr: true,
expectError: false,
},
{
name: "no errors, stderr & tty set, don't expect stderr",
stdin: true,
stdout: true,
stderr: true,
tty: true,
expectError: false,
},
}
for _, test := range tests {
conn := &fakeStreamCreator{
created: make(map[string]bool),
errors: map[string]error{
v1.StreamTypeStdin: test.stdinError,
v1.StreamTypeStdout: test.stdoutError,
v1.StreamTypeStderr: test.stderrError,
v1.StreamTypeError: test.errorError,
},
}
opts := StreamOptions{Tty: test.tty}
if test.stdin {
opts.Stdin = &fakeReader{}
}
if test.stdout {
opts.Stdout = &fakeWriter{}
}
if test.stderr {
opts.Stderr = &fakeWriter{}
}
h := newStreamProtocolV2(opts).(*streamProtocolV2)
err := h.createStreams(conn)
if test.expectError {
if err == nil {
t.Errorf("%s: expected error", test.name)
continue
}
if e, a := test.stdinError, err; test.stdinError != nil && e != a {
t.Errorf("%s: expected %v, got %v", test.name, e, a)
}
if e, a := test.stdoutError, err; test.stdoutError != nil && e != a {
t.Errorf("%s: expected %v, got %v", test.name, e, a)
}
if e, a := test.stderrError, err; test.stderrError != nil && e != a {
t.Errorf("%s: expected %v, got %v", test.name, e, a)
}
if e, a := test.errorError, err; test.errorError != nil && e != a {
t.Errorf("%s: expected %v, got %v", test.name, e, a)
}
continue
}
if !test.expectError && err != nil {
t.Errorf("%s: unexpected error: %v", test.name, err)
continue
}
if test.stdin && !conn.created[v1.StreamTypeStdin] {
t.Errorf("%s: expected stdin stream", test.name)
}
if test.stdout && !conn.created[v1.StreamTypeStdout] {
t.Errorf("%s: expected stdout stream", test.name)
}
if test.stderr {
if test.tty && conn.created[v1.StreamTypeStderr] {
t.Errorf("%s: unexpected stderr stream because tty is set", test.name)
} else if !test.tty && !conn.created[v1.StreamTypeStderr] {
t.Errorf("%s: expected stderr stream", test.name)
}
}
if !conn.created[v1.StreamTypeError] {
t.Errorf("%s: expected error stream", test.name)
}
}
}
func TestV2ErrorStreamReading(t *testing.T) {
tests := []struct {
name string
stream io.Reader
expectedError error
}{
{
name: "error reading from stream",
stream: &fakeReader{errors.New("foo")},
expectedError: errors.New("error reading from error stream: foo"),
},
{
name: "stream returns an error",
stream: strings.NewReader("some error"),
expectedError: errors.New("error executing remote command: some error"),
},
}
for _, test := range tests {
h := newStreamProtocolV2(StreamOptions{}).(*streamProtocolV2)
h.errorStream = test.stream
ch := watchErrorStream(h.errorStream, &errorDecoderV2{})
if ch == nil {
t.Fatalf("%s: unexpected nil channel", test.name)
}
var err error
select {
case err = <-ch:
case <-time.After(wait.ForeverTestTimeout):
t.Fatalf("%s: timed out", test.name)
}
if test.expectedError != nil {
if err == nil {
t.Errorf("%s: expected an error", test.name)
} else if e, a := test.expectedError, err; e.Error() != a.Error() {
t.Errorf("%s: expected %q, got %q", test.name, e, a)
}
continue
}
if test.expectedError == nil && err != nil {
t.Errorf("%s: unexpected error: %v", test.name, err)
continue
}
}
}

View File

@ -1,111 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"encoding/json"
"io"
"net/http"
"sync"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/runtime"
)
// streamProtocolV3 implements version 3 of the streaming protocol for attach
// and exec. This version adds support for resizing the container's terminal.
type streamProtocolV3 struct {
*streamProtocolV2
resizeStream io.Writer
}
var _ streamProtocolHandler = &streamProtocolV3{}
func newStreamProtocolV3(options StreamOptions) streamProtocolHandler {
return &streamProtocolV3{
streamProtocolV2: newStreamProtocolV2(options).(*streamProtocolV2),
}
}
func (p *streamProtocolV3) createStreams(conn streamCreator) error {
// set up the streams from v2
if err := p.streamProtocolV2.createStreams(conn); err != nil {
return err
}
// set up resize stream
if p.Tty {
headers := http.Header{}
headers.Set(v1.StreamType, v1.StreamTypeResize)
var err error
p.resizeStream, err = conn.CreateStream(headers)
if err != nil {
return err
}
}
return nil
}
func (p *streamProtocolV3) handleResizes() {
if p.resizeStream == nil || p.TerminalSizeQueue == nil {
return
}
go func() {
defer runtime.HandleCrash()
encoder := json.NewEncoder(p.resizeStream)
for {
size := p.TerminalSizeQueue.Next()
if size == nil {
return
}
if err := encoder.Encode(&size); err != nil {
runtime.HandleError(err)
}
}
}()
}
func (p *streamProtocolV3) stream(conn streamCreator) error {
if err := p.createStreams(conn); err != nil {
return err
}
// now that all the streams have been created, proceed with reading & copying
errorChan := watchErrorStream(p.errorStream, &errorDecoderV3{})
p.handleResizes()
p.copyStdin()
var wg sync.WaitGroup
p.copyStdout(&wg)
p.copyStderr(&wg)
// we're waiting for stdout/stderr to finish copying
wg.Wait()
// waits for errorStream to finish reading with an error or nil
return <-errorChan
}
type errorDecoderV3 struct {
errorDecoderV2
}

View File

@ -1,119 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"encoding/json"
"errors"
"fmt"
"strconv"
"sync"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/remotecommand"
"k8s.io/client-go/util/exec"
)
// streamProtocolV4 implements version 4 of the streaming protocol for attach
// and exec. This version adds support for exit codes on the error stream through
// the use of metav1.Status instead of plain text messages.
type streamProtocolV4 struct {
*streamProtocolV3
}
var _ streamProtocolHandler = &streamProtocolV4{}
func newStreamProtocolV4(options StreamOptions) streamProtocolHandler {
return &streamProtocolV4{
streamProtocolV3: newStreamProtocolV3(options).(*streamProtocolV3),
}
}
func (p *streamProtocolV4) createStreams(conn streamCreator) error {
return p.streamProtocolV3.createStreams(conn)
}
func (p *streamProtocolV4) handleResizes() {
p.streamProtocolV3.handleResizes()
}
func (p *streamProtocolV4) stream(conn streamCreator) error {
if err := p.createStreams(conn); err != nil {
return err
}
// now that all the streams have been created, proceed with reading & copying
errorChan := watchErrorStream(p.errorStream, &errorDecoderV4{})
p.handleResizes()
p.copyStdin()
var wg sync.WaitGroup
p.copyStdout(&wg)
p.copyStderr(&wg)
// we're waiting for stdout/stderr to finish copying
wg.Wait()
// waits for errorStream to finish reading with an error or nil
return <-errorChan
}
// errorDecoderV4 interprets the json-marshaled metav1.Status on the error channel
// and creates an exec.ExitError from it.
type errorDecoderV4 struct{}
func (d *errorDecoderV4) decode(message []byte) error {
status := metav1.Status{}
err := json.Unmarshal(message, &status)
if err != nil {
return fmt.Errorf("error stream protocol error: %v in %q", err, string(message))
}
switch status.Status {
case metav1.StatusSuccess:
return nil
case metav1.StatusFailure:
if status.Reason == remotecommand.NonZeroExitCodeReason {
if status.Details == nil {
return errors.New("error stream protocol error: details must be set")
}
for i := range status.Details.Causes {
c := &status.Details.Causes[i]
if c.Type != remotecommand.ExitCodeCauseType {
continue
}
rc, err := strconv.ParseUint(c.Message, 10, 8)
if err != nil {
return fmt.Errorf("error stream protocol error: invalid exit code value %q", c.Message)
}
return exec.CodeExitError{
Err: fmt.Errorf("command terminated with exit code %d", rc),
Code: int(rc),
}
}
return fmt.Errorf("error stream protocol error: no %s cause given", remotecommand.ExitCodeCauseType)
}
default:
return errors.New("error stream protocol error: unknown error")
}
return fmt.Errorf(status.Message)
}

View File

@ -1,71 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"fmt"
"testing"
)
func TestV4ErrorDecoder(t *testing.T) {
dec := errorDecoderV4{}
type Test struct {
message string
err string
}
for _, test := range []Test{
{
message: "{}",
err: "error stream protocol error: unknown error",
},
{
message: "{",
err: "error stream protocol error: unexpected end of JSON input in \"{\"",
},
{
message: `{"status": "Success" }`,
err: "",
},
{
message: `{"status": "Failure", "message": "foobar" }`,
err: "foobar",
},
{
message: `{"status": "Failure", "message": "foobar", "reason": "NonZeroExitCode", "details": {"causes": [{"reason": "foo"}] } }`,
err: "error stream protocol error: no ExitCode cause given",
},
{
message: `{"status": "Failure", "message": "foobar", "reason": "NonZeroExitCode", "details": {"causes": [{"reason": "ExitCode"}] } }`,
err: "error stream protocol error: invalid exit code value \"\"",
},
{
message: `{"status": "Failure", "message": "foobar", "reason": "NonZeroExitCode", "details": {"causes": [{"reason": "ExitCode", "message": "42"}] } }`,
err: "command terminated with exit code 42",
},
} {
err := dec.decode([]byte(test.message))
want := test.err
if want == "" {
want = "<nil>"
}
if got := fmt.Sprintf("%v", err); got != want {
t.Errorf("wrong error for message %q: want=%q, got=%q", test.message, want, got)
}
}
}

View File

@ -1,114 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package watch
import (
"sync"
"sync/atomic"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
)
func newTicketer() *ticketer {
return &ticketer{
cond: sync.NewCond(&sync.Mutex{}),
}
}
type ticketer struct {
counter uint64
cond *sync.Cond
current uint64
}
func (t *ticketer) GetTicket() uint64 {
// -1 to start from 0
return atomic.AddUint64(&t.counter, 1) - 1
}
func (t *ticketer) WaitForTicket(ticket uint64, f func()) {
t.cond.L.Lock()
defer t.cond.L.Unlock()
for ticket != t.current {
t.cond.Wait()
}
f()
t.current++
t.cond.Broadcast()
}
// NewIndexerInformerWatcher will create an IndexerInformer and wrap it into watch.Interface
// so you can use it anywhere where you'd have used a regular Watcher returned from Watch method.
func NewIndexerInformerWatcher(lw cache.ListerWatcher, objType runtime.Object) (cache.Indexer, cache.Controller, watch.Interface) {
ch := make(chan watch.Event)
w := watch.NewProxyWatcher(ch)
t := newTicketer()
indexer, informer := cache.NewIndexerInformer(lw, objType, 0, cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
go t.WaitForTicket(t.GetTicket(), func() {
select {
case ch <- watch.Event{
Type: watch.Added,
Object: obj.(runtime.Object),
}:
case <-w.StopChan():
}
})
},
UpdateFunc: func(old, new interface{}) {
go t.WaitForTicket(t.GetTicket(), func() {
select {
case ch <- watch.Event{
Type: watch.Modified,
Object: new.(runtime.Object),
}:
case <-w.StopChan():
}
})
},
DeleteFunc: func(obj interface{}) {
go t.WaitForTicket(t.GetTicket(), func() {
staleObj, stale := obj.(cache.DeletedFinalStateUnknown)
if stale {
// We have no means of passing the additional information down using watch API based on watch.Event
// but the caller can filter such objects by checking if metadata.deletionTimestamp is set
obj = staleObj
}
select {
case ch <- watch.Event{
Type: watch.Deleted,
Object: obj.(runtime.Object),
}:
case <-w.StopChan():
}
})
},
}, cache.Indexers{})
go func() {
informer.Run(w.StopChan())
}()
return indexer, informer, w
}

View File

@ -1,236 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package watch
import (
"math/rand"
"reflect"
"sort"
"testing"
"time"
"github.com/davecgh/go-spew/spew"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/watch"
fakeclientset "k8s.io/client-go/kubernetes/fake"
testcore "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
)
type byEventTypeAndName []watch.Event
func (a byEventTypeAndName) Len() int { return len(a) }
func (a byEventTypeAndName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byEventTypeAndName) Less(i, j int) bool {
if a[i].Type < a[j].Type {
return true
}
if a[i].Type > a[j].Type {
return false
}
return a[i].Object.(*corev1.Secret).Name < a[j].Object.(*corev1.Secret).Name
}
func TestTicketer(t *testing.T) {
tg := newTicketer()
const numTickets = 100 // current golang limit for race detector is 8192 simultaneously alive goroutines
var tickets []uint64
for i := 0; i < numTickets; i++ {
ticket := tg.GetTicket()
tickets = append(tickets, ticket)
exp, got := uint64(i), ticket
if got != exp {
t.Fatalf("expected ticket %d, got %d", exp, got)
}
}
// shuffle tickets
rand.Shuffle(len(tickets), func(i, j int) {
tickets[i], tickets[j] = tickets[j], tickets[i]
})
res := make(chan uint64, len(tickets))
for _, ticket := range tickets {
go func(ticket uint64) {
time.Sleep(time.Duration(rand.Intn(50)) * time.Millisecond)
tg.WaitForTicket(ticket, func() {
res <- ticket
})
}(ticket)
}
for i := 0; i < numTickets; i++ {
exp, got := uint64(i), <-res
if got != exp {
t.Fatalf("expected ticket %d, got %d", exp, got)
}
}
}
func TestNewInformerWatcher(t *testing.T) {
// Make sure there are no 2 same types of events on a secret with the same name or that might be flaky.
tt := []struct {
name string
objects []runtime.Object
events []watch.Event
}{
{
name: "basic test",
objects: []runtime.Object{
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-1",
},
StringData: map[string]string{
"foo-1": "initial",
},
},
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-2",
},
StringData: map[string]string{
"foo-2": "initial",
},
},
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-3",
},
StringData: map[string]string{
"foo-3": "initial",
},
},
},
events: []watch.Event{
{
Type: watch.Added,
Object: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-4",
},
StringData: map[string]string{
"foo-4": "initial",
},
},
},
{
Type: watch.Modified,
Object: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-2",
},
StringData: map[string]string{
"foo-2": "new",
},
},
},
{
Type: watch.Deleted,
Object: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-3",
},
},
},
},
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
var expected []watch.Event
for _, o := range tc.objects {
expected = append(expected, watch.Event{
Type: watch.Added,
Object: o.DeepCopyObject(),
})
}
for _, e := range tc.events {
expected = append(expected, *e.DeepCopy())
}
fake := fakeclientset.NewSimpleClientset(tc.objects...)
fakeWatch := watch.NewFakeWithChanSize(len(tc.events), false)
fake.PrependWatchReactor("secrets", testcore.DefaultWatchReactor(fakeWatch, nil))
for _, e := range tc.events {
fakeWatch.Action(e.Type, e.Object)
}
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return fake.Core().Secrets("").List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return fake.Core().Secrets("").Watch(options)
},
}
_, _, w := NewIndexerInformerWatcher(lw, &corev1.Secret{})
var result []watch.Event
loop:
for {
var event watch.Event
var ok bool
select {
case event, ok = <-w.ResultChan():
if !ok {
t.Errorf("Failed to read event: channel is already closed!")
return
}
result = append(result, *event.DeepCopy())
case <-time.After(time.Second * 1):
// All the events are buffered -> this means we are done
// Also the one sec will make sure that we would detect RetryWatcher's incorrect behaviour after last event
break loop
}
}
// Informers don't guarantee event order so we need to sort these arrays to compare them
sort.Sort(byEventTypeAndName(expected))
sort.Sort(byEventTypeAndName(result))
if !reflect.DeepEqual(expected, result) {
t.Error(spew.Errorf("\nexpected: %#v,\ngot: %#v,\ndiff: %s", expected, result, diff.ObjectReflectDiff(expected, result)))
return
}
// Fill in some data to test watch closing while there are some events to be read
for _, e := range tc.events {
fakeWatch.Action(e.Type, e.Object)
}
// Stop before reading all the data to make sure the informer can deal with closed channel
w.Stop()
// Wait a bit to see if the informer won't panic
// TODO: Try to figure out a more reliable mechanism than time.Sleep (https://github.com/kubernetes/kubernetes/pull/50102/files#r184716591)
time.Sleep(1 * time.Second)
})
}
}

View File

@ -1,225 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package watch
import (
"context"
"errors"
"fmt"
"time"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
)
// PreconditionFunc returns true if the condition has been reached, false if it has not been reached yet,
// or an error if the condition failed or detected an error state.
type PreconditionFunc func(store cache.Store) (bool, error)
// ConditionFunc returns true if the condition has been reached, false if it has not been reached yet,
// or an error if the condition cannot be checked and should terminate. In general, it is better to define
// level driven conditions over edge driven conditions (pod has ready=true, vs pod modified and ready changed
// from false to true).
type ConditionFunc func(event watch.Event) (bool, error)
// ErrWatchClosed is returned when the watch channel is closed before timeout in UntilWithoutRetry.
var ErrWatchClosed = errors.New("watch closed before UntilWithoutRetry timeout")
// UntilWithoutRetry reads items from the watch until each provided condition succeeds, and then returns the last watch
// encountered. The first condition that returns an error terminates the watch (and the event is also returned).
// If no event has been received, the returned event will be nil.
// Conditions are satisfied sequentially so as to provide a useful primitive for higher level composition.
// Waits until context deadline or until context is canceled.
//
// Warning: Unless you have a very specific use case (probably a special Watcher) don't use this function!!!
// Warning: This will fail e.g. on API timeouts and/or 'too old resource version' error.
// Warning: You are most probably looking for a function *Until* or *UntilWithSync* below,
// Warning: solving such issues.
// TODO: Consider making this function private to prevent misuse when the other occurrences in our codebase are gone.
func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions ...ConditionFunc) (*watch.Event, error) {
ch := watcher.ResultChan()
defer watcher.Stop()
var lastEvent *watch.Event
for _, condition := range conditions {
// check the next condition against the previous event and short circuit waiting for the next watch
if lastEvent != nil {
done, err := condition(*lastEvent)
if err != nil {
return lastEvent, err
}
if done {
continue
}
}
ConditionSucceeded:
for {
select {
case event, ok := <-ch:
if !ok {
return lastEvent, ErrWatchClosed
}
lastEvent = &event
done, err := condition(event)
if err != nil {
return lastEvent, err
}
if done {
break ConditionSucceeded
}
case <-ctx.Done():
return lastEvent, wait.ErrWaitTimeout
}
}
}
return lastEvent, nil
}
// UntilWithSync creates an informer from lw, optionally checks precondition when the store is synced,
// and watches the output until each provided condition succeeds, in a way that is identical
// to function UntilWithoutRetry. (See above.)
// UntilWithSync can deal with all errors like API timeout, lost connections and 'Resource version too old'.
// It is the only function that can recover from 'Resource version too old', Until and UntilWithoutRetry will
// just fail in that case. On the other hand it can't provide you with guarantees as strong as using simple
// Watch method with Until. It can skip some intermediate events in case of watch function failing but it will
// re-list to recover and you always get an event, if there has been a change, after recovery.
// Also with the current implementation based on DeltaFIFO, order of the events you receive is guaranteed only for
// particular object, not between more of them even it's the same resource.
// The most frequent usage would be a command that needs to watch the "state of the world" and should't fail, like:
// waiting for object reaching a state, "small" controllers, ...
func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition PreconditionFunc, conditions ...ConditionFunc) (*watch.Event, error) {
indexer, informer, watcher := NewIndexerInformerWatcher(lw, objType)
// Proxy watcher can be stopped multiple times so it's fine to use defer here to cover alternative branches and
// let UntilWithoutRetry to stop it
defer watcher.Stop()
if precondition != nil {
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) {
return nil, fmt.Errorf("UntilWithSync: unable to sync caches: %v", ctx.Err())
}
done, err := precondition(indexer)
if err != nil {
return nil, err
}
if done {
return nil, nil
}
}
return UntilWithoutRetry(ctx, watcher, conditions...)
}
// ContextWithOptionalTimeout wraps context.WithTimeout and handles infinite timeouts expressed as 0 duration.
func ContextWithOptionalTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) {
if timeout < 0 {
// This should be handled in validation
klog.Errorf("Timeout for context shall not be negative!")
timeout = 0
}
if timeout == 0 {
return context.WithCancel(parent)
}
return context.WithTimeout(parent, timeout)
}
// ListWatchUntil checks the provided conditions against the items returned by the list watcher, returning wait.ErrWaitTimeout
// if timeout is exceeded without all conditions returning true, or an error if an error occurs.
// TODO: check for watch expired error and retry watch from latest point? Same issue exists for Until.
// TODO: remove when no longer used
//
// Deprecated: Use UntilWithSync instead.
func ListWatchUntil(timeout time.Duration, lw cache.ListerWatcher, conditions ...ConditionFunc) (*watch.Event, error) {
if len(conditions) == 0 {
return nil, nil
}
list, err := lw.List(metav1.ListOptions{})
if err != nil {
return nil, err
}
initialItems, err := meta.ExtractList(list)
if err != nil {
return nil, err
}
// use the initial items as simulated "adds"
var lastEvent *watch.Event
currIndex := 0
passedConditions := 0
for _, condition := range conditions {
// check the next condition against the previous event and short circuit waiting for the next watch
if lastEvent != nil {
done, err := condition(*lastEvent)
if err != nil {
return lastEvent, err
}
if done {
passedConditions = passedConditions + 1
continue
}
}
ConditionSucceeded:
for currIndex < len(initialItems) {
lastEvent = &watch.Event{Type: watch.Added, Object: initialItems[currIndex]}
currIndex++
done, err := condition(*lastEvent)
if err != nil {
return lastEvent, err
}
if done {
passedConditions = passedConditions + 1
break ConditionSucceeded
}
}
}
if passedConditions == len(conditions) {
return lastEvent, nil
}
remainingConditions := conditions[passedConditions:]
metaObj, err := meta.ListAccessor(list)
if err != nil {
return nil, err
}
currResourceVersion := metaObj.GetResourceVersion()
watchInterface, err := lw.Watch(metav1.ListOptions{ResourceVersion: currResourceVersion})
if err != nil {
return nil, err
}
ctx, cancel := ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
evt, err := UntilWithoutRetry(ctx, watchInterface, remainingConditions...)
if err == ErrWatchClosed {
// present a consistent error interface to callers
err = wait.ErrWaitTimeout
}
return evt, err
}

View File

@ -1,303 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package watch
import (
"context"
"errors"
"reflect"
"strings"
"testing"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
fakeclient "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
)
type fakePod struct {
name string
}
func (obj *fakePod) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind }
func (obj *fakePod) DeepCopyObject() runtime.Object { panic("DeepCopyObject not supported by fakePod") }
func TestUntil(t *testing.T) {
fw := watch.NewFake()
go func() {
var obj *fakePod
fw.Add(obj)
fw.Modify(obj)
}()
conditions := []ConditionFunc{
func(event watch.Event) (bool, error) { return event.Type == watch.Added, nil },
func(event watch.Event) (bool, error) { return event.Type == watch.Modified, nil },
}
ctx, _ := context.WithTimeout(context.Background(), time.Minute)
lastEvent, err := UntilWithoutRetry(ctx, fw, conditions...)
if err != nil {
t.Fatalf("expected nil error, got %#v", err)
}
if lastEvent == nil {
t.Fatal("expected an event")
}
if lastEvent.Type != watch.Modified {
t.Fatalf("expected MODIFIED event type, got %v", lastEvent.Type)
}
if got, isPod := lastEvent.Object.(*fakePod); !isPod {
t.Fatalf("expected a pod event, got %#v", got)
}
}
func TestUntilMultipleConditions(t *testing.T) {
fw := watch.NewFake()
go func() {
var obj *fakePod
fw.Add(obj)
}()
conditions := []ConditionFunc{
func(event watch.Event) (bool, error) { return event.Type == watch.Added, nil },
func(event watch.Event) (bool, error) { return event.Type == watch.Added, nil },
}
ctx, _ := context.WithTimeout(context.Background(), time.Minute)
lastEvent, err := UntilWithoutRetry(ctx, fw, conditions...)
if err != nil {
t.Fatalf("expected nil error, got %#v", err)
}
if lastEvent == nil {
t.Fatal("expected an event")
}
if lastEvent.Type != watch.Added {
t.Fatalf("expected MODIFIED event type, got %v", lastEvent.Type)
}
if got, isPod := lastEvent.Object.(*fakePod); !isPod {
t.Fatalf("expected a pod event, got %#v", got)
}
}
func TestUntilMultipleConditionsFail(t *testing.T) {
fw := watch.NewFake()
go func() {
var obj *fakePod
fw.Add(obj)
}()
conditions := []ConditionFunc{
func(event watch.Event) (bool, error) { return event.Type == watch.Added, nil },
func(event watch.Event) (bool, error) { return event.Type == watch.Added, nil },
func(event watch.Event) (bool, error) { return event.Type == watch.Deleted, nil },
}
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
lastEvent, err := UntilWithoutRetry(ctx, fw, conditions...)
if err != wait.ErrWaitTimeout {
t.Fatalf("expected ErrWaitTimeout error, got %#v", err)
}
if lastEvent == nil {
t.Fatal("expected an event")
}
if lastEvent.Type != watch.Added {
t.Fatalf("expected ADDED event type, got %v", lastEvent.Type)
}
if got, isPod := lastEvent.Object.(*fakePod); !isPod {
t.Fatalf("expected a pod event, got %#v", got)
}
}
func TestUntilTimeout(t *testing.T) {
fw := watch.NewFake()
go func() {
var obj *fakePod
fw.Add(obj)
fw.Modify(obj)
}()
conditions := []ConditionFunc{
func(event watch.Event) (bool, error) {
return event.Type == watch.Added, nil
},
func(event watch.Event) (bool, error) {
return event.Type == watch.Modified, nil
},
}
lastEvent, err := UntilWithoutRetry(context.Background(), fw, conditions...)
if err != nil {
t.Fatalf("expected nil error, got %#v", err)
}
if lastEvent == nil {
t.Fatal("expected an event")
}
if lastEvent.Type != watch.Modified {
t.Fatalf("expected MODIFIED event type, got %v", lastEvent.Type)
}
if got, isPod := lastEvent.Object.(*fakePod); !isPod {
t.Fatalf("expected a pod event, got %#v", got)
}
}
func TestUntilErrorCondition(t *testing.T) {
fw := watch.NewFake()
go func() {
var obj *fakePod
fw.Add(obj)
}()
expected := "something bad"
conditions := []ConditionFunc{
func(event watch.Event) (bool, error) { return event.Type == watch.Added, nil },
func(event watch.Event) (bool, error) { return false, errors.New(expected) },
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
_, err := UntilWithoutRetry(ctx, fw, conditions...)
if err == nil {
t.Fatal("expected an error")
}
if !strings.Contains(err.Error(), expected) {
t.Fatalf("expected %q in error string, got %q", expected, err.Error())
}
}
func TestUntilWithSync(t *testing.T) {
// FIXME: test preconditions
tt := []struct {
name string
lw *cache.ListWatch
preconditionFunc PreconditionFunc
conditionFunc ConditionFunc
expectedErr error
expectedEvent *watch.Event
}{
{
name: "doesn't wait for sync with no precondition",
lw: &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
select {}
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
select {}
},
},
preconditionFunc: nil,
conditionFunc: func(e watch.Event) (bool, error) {
return true, nil
},
expectedErr: errors.New("timed out waiting for the condition"),
expectedEvent: nil,
},
{
name: "waits indefinitely with precondition if it can't sync",
lw: &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
select {}
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
select {}
},
},
preconditionFunc: func(store cache.Store) (bool, error) {
return true, nil
},
conditionFunc: func(e watch.Event) (bool, error) {
return true, nil
},
expectedErr: errors.New("UntilWithSync: unable to sync caches: context deadline exceeded"),
expectedEvent: nil,
},
{
name: "precondition can stop the loop",
lw: func() *cache.ListWatch {
fakeclient := fakeclient.NewSimpleClientset(&corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "first"}})
return &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return fakeclient.CoreV1().Secrets("").List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return fakeclient.CoreV1().Secrets("").Watch(options)
},
}
}(),
preconditionFunc: func(store cache.Store) (bool, error) {
_, exists, err := store.Get(&metav1.ObjectMeta{Namespace: "", Name: "first"})
if err != nil {
return true, err
}
if exists {
return true, nil
}
return false, nil
},
conditionFunc: func(e watch.Event) (bool, error) {
return true, errors.New("should never reach this")
},
expectedErr: nil,
expectedEvent: nil,
},
{
name: "precondition lets it proceed to regular condition",
lw: func() *cache.ListWatch {
fakeclient := fakeclient.NewSimpleClientset(&corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "first"}})
return &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return fakeclient.CoreV1().Secrets("").List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return fakeclient.CoreV1().Secrets("").Watch(options)
},
}
}(),
preconditionFunc: func(store cache.Store) (bool, error) {
return false, nil
},
conditionFunc: func(e watch.Event) (bool, error) {
if e.Type == watch.Added {
return true, nil
}
panic("no other events are expected")
},
expectedErr: nil,
expectedEvent: &watch.Event{Type: watch.Added, Object: &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "first"}}},
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
// Informer waits for caches to sync by polling in 100ms intervals,
// timeout needs to be reasonably higher
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
defer cancel()
event, err := UntilWithSync(ctx, tc.lw, &corev1.Secret{}, tc.preconditionFunc, tc.conditionFunc)
if !reflect.DeepEqual(err, tc.expectedErr) {
t.Errorf("expected error %#v, got %#v", tc.expectedErr, err)
}
if !reflect.DeepEqual(event, tc.expectedEvent) {
t.Errorf("expected event %#v, got %#v", tc.expectedEvent, event)
}
})
}
}