mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
4
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/BUILD
generated
vendored
@ -54,13 +54,12 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//pkg/controller/garbagecollector/metaonly:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
@ -68,6 +67,7 @@ go_test(
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
|
161
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector.go
generated
vendored
161
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector.go
generated
vendored
@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/dynamic"
|
||||
@ -59,10 +60,8 @@ const ResourceResyncTime time.Duration = 0
|
||||
// ensures that the garbage collector operates with a graph that is at least as
|
||||
// up to date as the notification is sent.
|
||||
type GarbageCollector struct {
|
||||
restMapper resettableRESTMapper
|
||||
// clientPool uses the regular dynamicCodec. We need it to update
|
||||
// finalizers. It can be removed if we support patching finalizers.
|
||||
clientPool dynamic.ClientPool
|
||||
restMapper resettableRESTMapper
|
||||
dynamicClient dynamic.Interface
|
||||
// garbage collector attempts to delete the items in attemptToDelete queue when the time is ripe.
|
||||
attemptToDelete workqueue.RateLimitingInterface
|
||||
// garbage collector attempts to orphan the dependents of the items in the attemptToOrphan queue, then deletes the items.
|
||||
@ -76,8 +75,7 @@ type GarbageCollector struct {
|
||||
}
|
||||
|
||||
func NewGarbageCollector(
|
||||
metaOnlyClientPool dynamic.ClientPool,
|
||||
clientPool dynamic.ClientPool,
|
||||
dynamicClient dynamic.Interface,
|
||||
mapper resettableRESTMapper,
|
||||
deletableResources map[schema.GroupVersionResource]struct{},
|
||||
ignoredResources map[schema.GroupResource]struct{},
|
||||
@ -88,17 +86,17 @@ func NewGarbageCollector(
|
||||
attemptToOrphan := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_attempt_to_orphan")
|
||||
absentOwnerCache := NewUIDCache(500)
|
||||
gc := &GarbageCollector{
|
||||
clientPool: clientPool,
|
||||
dynamicClient: dynamicClient,
|
||||
restMapper: mapper,
|
||||
attemptToDelete: attemptToDelete,
|
||||
attemptToOrphan: attemptToOrphan,
|
||||
absentOwnerCache: absentOwnerCache,
|
||||
}
|
||||
gb := &GraphBuilder{
|
||||
metaOnlyClientPool: metaOnlyClientPool,
|
||||
informersStarted: informersStarted,
|
||||
restMapper: mapper,
|
||||
graphChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_graph_changes"),
|
||||
dynamicClient: dynamicClient,
|
||||
informersStarted: informersStarted,
|
||||
restMapper: mapper,
|
||||
graphChanges: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "garbage_collector_graph_changes"),
|
||||
uidToNode: &concurrentUIDToNode{
|
||||
uidToNode: make(map[types.UID]*node),
|
||||
},
|
||||
@ -166,51 +164,79 @@ type resettableRESTMapper interface {
|
||||
// Note that discoveryClient should NOT be shared with gc.restMapper, otherwise
|
||||
// the mapper's underlying discovery client will be unnecessarily reset during
|
||||
// the course of detecting new resources.
|
||||
func (gc *GarbageCollector) Sync(discoveryClient discovery.DiscoveryInterface, period time.Duration, stopCh <-chan struct{}) {
|
||||
func (gc *GarbageCollector) Sync(discoveryClient discovery.ServerResourcesInterface, period time.Duration, stopCh <-chan struct{}) {
|
||||
oldResources := make(map[schema.GroupVersionResource]struct{})
|
||||
wait.Until(func() {
|
||||
// Get the current resource list from discovery.
|
||||
newResources := GetDeletableResources(discoveryClient)
|
||||
|
||||
// This can occur if there is an internal error in GetDeletableResources.
|
||||
if len(newResources) == 0 {
|
||||
glog.V(2).Infof("no resources reported by discovery, skipping garbage collector sync")
|
||||
return
|
||||
}
|
||||
|
||||
// Decide whether discovery has reported a change.
|
||||
if reflect.DeepEqual(oldResources, newResources) {
|
||||
glog.V(5).Infof("no resource updates from discovery, skipping garbage collector sync")
|
||||
return
|
||||
}
|
||||
|
||||
// Something has changed, time to sync.
|
||||
glog.V(2).Infof("syncing garbage collector with updated resources from discovery: %v", newResources)
|
||||
|
||||
// Ensure workers are paused to avoid processing events before informers
|
||||
// have resynced.
|
||||
gc.workerLock.Lock()
|
||||
defer gc.workerLock.Unlock()
|
||||
|
||||
// Resetting the REST mapper will also invalidate the underlying discovery
|
||||
// client. This is a leaky abstraction and assumes behavior about the REST
|
||||
// mapper, but we'll deal with it for now.
|
||||
gc.restMapper.Reset()
|
||||
// Once we get here, we should not unpause workers until we've successfully synced
|
||||
attempt := 0
|
||||
wait.PollImmediateUntil(100*time.Millisecond, func() (bool, error) {
|
||||
attempt++
|
||||
|
||||
// Perform the monitor resync and wait for controllers to report cache sync.
|
||||
//
|
||||
// NOTE: It's possible that newResources will diverge from the resources
|
||||
// discovered by restMapper during the call to Reset, since they are
|
||||
// distinct discovery clients invalidated at different times. For example,
|
||||
// newResources may contain resources not returned in the restMapper's
|
||||
// discovery call if the resources appeared in-between the calls. In that
|
||||
// case, the restMapper will fail to map some of newResources until the next
|
||||
// sync period.
|
||||
if err := gc.resyncMonitors(newResources); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to sync resource monitors: %v", err))
|
||||
return
|
||||
}
|
||||
// TODO: WaitForCacheSync can block forever during normal operation. Could
|
||||
// pass a timeout channel, but we have to consider the implications of
|
||||
// un-pausing the GC with a partially synced graph builder.
|
||||
if !controller.WaitForCacheSync("garbage collector", stopCh, gc.dependencyGraphBuilder.IsSynced) {
|
||||
utilruntime.HandleError(fmt.Errorf("timed out waiting for dependency graph builder sync during GC sync"))
|
||||
return
|
||||
}
|
||||
// On a reattempt, check if available resources have changed
|
||||
if attempt > 1 {
|
||||
newResources = GetDeletableResources(discoveryClient)
|
||||
if len(newResources) == 0 {
|
||||
glog.V(2).Infof("no resources reported by discovery (attempt %d)", attempt)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof("syncing garbage collector with updated resources from discovery (attempt %d): %s", attempt, printDiff(oldResources, newResources))
|
||||
|
||||
// Resetting the REST mapper will also invalidate the underlying discovery
|
||||
// client. This is a leaky abstraction and assumes behavior about the REST
|
||||
// mapper, but we'll deal with it for now.
|
||||
gc.restMapper.Reset()
|
||||
glog.V(4).Infof("reset restmapper")
|
||||
|
||||
// Perform the monitor resync and wait for controllers to report cache sync.
|
||||
//
|
||||
// NOTE: It's possible that newResources will diverge from the resources
|
||||
// discovered by restMapper during the call to Reset, since they are
|
||||
// distinct discovery clients invalidated at different times. For example,
|
||||
// newResources may contain resources not returned in the restMapper's
|
||||
// discovery call if the resources appeared in-between the calls. In that
|
||||
// case, the restMapper will fail to map some of newResources until the next
|
||||
// attempt.
|
||||
if err := gc.resyncMonitors(newResources); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to sync resource monitors (attempt %d): %v", attempt, err))
|
||||
return false, nil
|
||||
}
|
||||
glog.V(4).Infof("resynced monitors")
|
||||
|
||||
// wait for caches to fill for a while (our sync period) before attempting to rediscover resources and retry syncing.
|
||||
// this protects us from deadlocks where available resources changed and one of our informer caches will never fill.
|
||||
// informers keep attempting to sync in the background, so retrying doesn't interrupt them.
|
||||
// the call to resyncMonitors on the reattempt will no-op for resources that still exist.
|
||||
// note that workers stay paused until we successfully resync.
|
||||
if !controller.WaitForCacheSync("garbage collector", waitForStopOrTimeout(stopCh, period), gc.dependencyGraphBuilder.IsSynced) {
|
||||
utilruntime.HandleError(fmt.Errorf("timed out waiting for dependency graph builder sync during GC sync (attempt %d)", attempt))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// success, break out of the loop
|
||||
return true, nil
|
||||
}, stopCh)
|
||||
|
||||
// Finally, keep track of our new state. Do this after all preceding steps
|
||||
// have succeeded to ensure we'll retry on subsequent syncs if an error
|
||||
@ -220,6 +246,36 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.DiscoveryInterface, p
|
||||
}, period, stopCh)
|
||||
}
|
||||
|
||||
// printDiff returns a human-readable summary of what resources were added and removed
|
||||
func printDiff(oldResources, newResources map[schema.GroupVersionResource]struct{}) string {
|
||||
removed := sets.NewString()
|
||||
for oldResource := range oldResources {
|
||||
if _, ok := newResources[oldResource]; !ok {
|
||||
removed.Insert(fmt.Sprintf("%+v", oldResource))
|
||||
}
|
||||
}
|
||||
added := sets.NewString()
|
||||
for newResource := range newResources {
|
||||
if _, ok := oldResources[newResource]; !ok {
|
||||
added.Insert(fmt.Sprintf("%+v", newResource))
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("added: %v, removed: %v", added.List(), removed.List())
|
||||
}
|
||||
|
||||
// waitForStopOrTimeout returns a stop channel that closes when the provided stop channel closes or when the specified timeout is reached
|
||||
func waitForStopOrTimeout(stopCh <-chan struct{}, timeout time.Duration) <-chan struct{} {
|
||||
stopChWithTimeout := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-stopCh:
|
||||
case <-time.After(timeout):
|
||||
}
|
||||
close(stopChWithTimeout)
|
||||
}()
|
||||
return stopChWithTimeout
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) IsSynced() bool {
|
||||
return gc.dependencyGraphBuilder.IsSynced()
|
||||
}
|
||||
@ -283,19 +339,15 @@ func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *no
|
||||
// ii) should update the object to remove such references. This is to
|
||||
// prevent objects having references to an old resource from being
|
||||
// deleted during a cluster upgrade.
|
||||
fqKind := schema.FromAPIVersionAndKind(reference.APIVersion, reference.Kind)
|
||||
client, err := gc.clientPool.ClientForGroupVersionKind(fqKind)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
resource, err := gc.apiResource(reference.APIVersion, reference.Kind)
|
||||
resource, namespaced, err := gc.apiResource(reference.APIVersion, reference.Kind)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
// TODO: It's only necessary to talk to the API server if the owner node
|
||||
// is a "virtual" node. The local graph could lag behind the real
|
||||
// status, but in practice, the difference is small.
|
||||
owner, err = client.Resource(resource, item.identity.Namespace).Get(reference.Name, metav1.GetOptions{})
|
||||
owner, err = gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.identity.Namespace)).Get(reference.Name, metav1.GetOptions{})
|
||||
switch {
|
||||
case errors.IsNotFound(err):
|
||||
gc.absentOwnerCache.Add(reference.UID)
|
||||
@ -416,8 +468,11 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
|
||||
// waitingForDependentsDeletion needs to be deleted from the
|
||||
// ownerReferences, otherwise the referenced objects will be stuck with
|
||||
// the FinalizerDeletingDependents and never get deleted.
|
||||
patch := deleteOwnerRefPatch(item.identity.UID, append(ownerRefsToUIDs(dangling), ownerRefsToUIDs(waitingForDependentsDeletion)...)...)
|
||||
_, err = gc.patchObject(item.identity, patch)
|
||||
ownerUIDs := append(ownerRefsToUIDs(dangling), ownerRefsToUIDs(waitingForDependentsDeletion)...)
|
||||
patch := deleteOwnerRefStrategicMergePatch(item.identity.UID, ownerUIDs...)
|
||||
_, err = gc.patch(item, patch, func(n *node) ([]byte, error) {
|
||||
return gc.deleteOwnerRefJSONMergePatch(n, ownerUIDs...)
|
||||
})
|
||||
return err
|
||||
case len(waitingForDependentsDeletion) != 0 && item.dependentsLength() != 0:
|
||||
deps := item.getDependents()
|
||||
@ -429,11 +484,11 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
|
||||
// there are multiple workers run attemptToDeleteItem in
|
||||
// parallel, the circle detection can fail in a race condition.
|
||||
glog.V(2).Infof("processing object %s, some of its owners and its dependent [%s] have FinalizerDeletingDependents, to prevent potential cycle, its ownerReferences are going to be modified to be non-blocking, then the object is going to be deleted with Foreground", item.identity, dep.identity)
|
||||
patch, err := item.patchToUnblockOwnerReferences()
|
||||
patch, err := item.unblockOwnerReferencesStrategicMergePatch()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := gc.patchObject(item.identity, patch); err != nil {
|
||||
if _, err := gc.patch(item, patch, gc.unblockOwnerReferencesJSONMergePatch); err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
@ -493,8 +548,10 @@ func (gc *GarbageCollector) orphanDependents(owner objectReference, dependents [
|
||||
go func(dependent *node) {
|
||||
defer wg.Done()
|
||||
// the dependent.identity.UID is used as precondition
|
||||
patch := deleteOwnerRefPatch(dependent.identity.UID, owner.UID)
|
||||
_, err := gc.patchObject(dependent.identity, patch)
|
||||
patch := deleteOwnerRefStrategicMergePatch(dependent.identity.UID, owner.UID)
|
||||
_, err := gc.patch(dependent, patch, func(n *node) ([]byte, error) {
|
||||
return gc.deleteOwnerRefJSONMergePatch(n, owner.UID)
|
||||
})
|
||||
// note that if the target ownerReference doesn't exist in the
|
||||
// dependent, strategic merge patch will NOT return an error.
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
|
242
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector_test.go
generated
vendored
242
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/garbagecollector_test.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
@ -31,9 +32,9 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -41,11 +42,11 @@ import (
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
||||
)
|
||||
|
||||
type testRESTMapper struct {
|
||||
@ -56,12 +57,13 @@ func (_ *testRESTMapper) Reset() {}
|
||||
|
||||
func TestGarbageCollectorConstruction(t *testing.T) {
|
||||
config := &restclient.Config{}
|
||||
config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
|
||||
tweakableRM := meta.NewDefaultRESTMapper(nil, nil)
|
||||
rm := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, legacyscheme.Registry.RESTMapper()}}
|
||||
metaOnlyClientPool := dynamic.NewClientPool(config, rm, dynamic.LegacyAPIPathResolverFunc)
|
||||
config.ContentConfig.NegotiatedSerializer = nil
|
||||
clientPool := dynamic.NewClientPool(config, rm, dynamic.LegacyAPIPathResolverFunc)
|
||||
tweakableRM := meta.NewDefaultRESTMapper(nil)
|
||||
rm := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}}
|
||||
dynamicClient, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
podResource := map[schema.GroupVersionResource]struct{}{
|
||||
{Version: "v1", Resource: "pods"}: {},
|
||||
}
|
||||
@ -76,7 +78,7 @@ func TestGarbageCollectorConstruction(t *testing.T) {
|
||||
// construction will not fail.
|
||||
alwaysStarted := make(chan struct{})
|
||||
close(alwaysStarted)
|
||||
gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, rm, twoResources, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
|
||||
gc, err := NewGarbageCollector(dynamicClient, rm, twoResources, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -142,18 +144,34 @@ type fakeActionHandler struct {
|
||||
|
||||
// ServeHTTP logs the action that occurred and always returns the associated status code
|
||||
func (f *fakeActionHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
func() {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
f.actions = append(f.actions, fakeAction{method: request.Method, path: request.URL.Path, query: request.URL.RawQuery})
|
||||
fakeResponse, ok := f.response[request.Method+request.URL.Path]
|
||||
if !ok {
|
||||
fakeResponse.statusCode = 200
|
||||
fakeResponse.content = []byte("{\"kind\": \"List\"}")
|
||||
f.actions = append(f.actions, fakeAction{method: request.Method, path: request.URL.Path, query: request.URL.RawQuery})
|
||||
fakeResponse, ok := f.response[request.Method+request.URL.Path]
|
||||
if !ok {
|
||||
fakeResponse.statusCode = 200
|
||||
fakeResponse.content = []byte("{\"kind\": \"List\"}")
|
||||
}
|
||||
response.Header().Set("Content-Type", "application/json")
|
||||
response.WriteHeader(fakeResponse.statusCode)
|
||||
response.Write(fakeResponse.content)
|
||||
}()
|
||||
|
||||
// This is to allow the fakeActionHandler to simulate a watch being opened
|
||||
if strings.Contains(request.URL.RawQuery, "watch=true") {
|
||||
hijacker, ok := response.(http.Hijacker)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
connection, _, err := hijacker.Hijack()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer connection.Close()
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
response.Header().Set("Content-Type", "application/json")
|
||||
response.WriteHeader(fakeResponse.statusCode)
|
||||
response.Write(fakeResponse.content)
|
||||
}
|
||||
|
||||
// testServerAndClientConfig returns a server that listens and a config that can reference it
|
||||
@ -171,16 +189,17 @@ type garbageCollector struct {
|
||||
}
|
||||
|
||||
func setupGC(t *testing.T, config *restclient.Config) garbageCollector {
|
||||
config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
|
||||
metaOnlyClientPool := dynamic.NewClientPool(config, legacyscheme.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
config.ContentConfig.NegotiatedSerializer = nil
|
||||
clientPool := dynamic.NewClientPool(config, legacyscheme.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
dynamicClient, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
podResource := map[schema.GroupVersionResource]struct{}{{Version: "v1", Resource: "pods"}: {}}
|
||||
client := fake.NewSimpleClientset()
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
||||
alwaysStarted := make(chan struct{})
|
||||
close(alwaysStarted)
|
||||
gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, &testRESTMapper{legacyscheme.Registry.RESTMapper()}, podResource, ignoredResources, sharedInformers, alwaysStarted)
|
||||
gc, err := NewGarbageCollector(dynamicClient, &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}, podResource, ignoredResources, sharedInformers, alwaysStarted)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -415,13 +434,13 @@ func TestGCListWatcher(t *testing.T) {
|
||||
testHandler := &fakeActionHandler{}
|
||||
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
|
||||
defer srv.Close()
|
||||
clientPool := dynamic.NewClientPool(clientConfig, legacyscheme.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
podResource := schema.GroupVersionResource{Version: "v1", Resource: "pods"}
|
||||
client, err := clientPool.ClientForGroupVersionResource(podResource)
|
||||
dynamicClient, err := dynamic.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
lw := listWatcher(client, podResource)
|
||||
|
||||
lw := listWatcher(dynamicClient, podResource)
|
||||
lw.DisableChunking = true
|
||||
if _, err := lw.Watch(metav1.ListOptions{ResourceVersion: "1"}); err != nil {
|
||||
t.Fatal(err)
|
||||
@ -574,7 +593,7 @@ func TestDeleteOwnerRefPatch(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
patch := deleteOwnerRefPatch("100", "2", "3")
|
||||
patch := deleteOwnerRefStrategicMergePatch("100", "2", "3")
|
||||
patched, err := strategicpatch.StrategicMergePatch(originalData, patch, v1.Pod{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -619,7 +638,7 @@ func TestUnblockOwnerReference(t *testing.T) {
|
||||
n := node{
|
||||
owners: accessor.GetOwnerReferences(),
|
||||
}
|
||||
patch, err := n.patchToUnblockOwnerReferences()
|
||||
patch, err := n.unblockOwnerReferencesStrategicMergePatch()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -668,9 +687,13 @@ func TestOrphanDependentsFailure(t *testing.T) {
|
||||
},
|
||||
}
|
||||
err := gc.orphanDependents(objectReference{}, dependents)
|
||||
expected := `the server reported a conflict (patch pods pod)`
|
||||
expected := `the server reported a conflict`
|
||||
if err == nil || !strings.Contains(err.Error(), expected) {
|
||||
t.Errorf("expected error contains text %s, got %v", expected, err)
|
||||
if err != nil {
|
||||
t.Errorf("expected error contains text %q, got %q", expected, err.Error())
|
||||
} else {
|
||||
t.Errorf("expected error contains text %q, got nil", expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -766,9 +789,145 @@ func TestGetDeletableResources(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestGarbageCollectorSync ensures that a discovery client error
|
||||
// will not cause the garbage collector to block infinitely.
|
||||
func TestGarbageCollectorSync(t *testing.T) {
|
||||
serverResources := []*metav1.APIResourceList{
|
||||
{
|
||||
GroupVersion: "v1",
|
||||
APIResources: []metav1.APIResource{
|
||||
{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
unsyncableServerResources := []*metav1.APIResourceList{
|
||||
{
|
||||
GroupVersion: "v1",
|
||||
APIResources: []metav1.APIResource{
|
||||
{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
|
||||
{Name: "secrets", Namespaced: true, Kind: "Secret", Verbs: metav1.Verbs{"delete", "list", "watch"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeDiscoveryClient := &fakeServerResources{
|
||||
PreferredResources: serverResources,
|
||||
Error: nil,
|
||||
Lock: sync.Mutex{},
|
||||
InterfaceUsedCount: 0,
|
||||
}
|
||||
|
||||
testHandler := &fakeActionHandler{
|
||||
response: map[string]FakeResponse{
|
||||
"GET" + "/api/v1/pods": {
|
||||
200,
|
||||
[]byte("{}"),
|
||||
},
|
||||
"GET" + "/api/v1/secrets": {
|
||||
404,
|
||||
[]byte("{}"),
|
||||
},
|
||||
},
|
||||
}
|
||||
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
|
||||
defer srv.Close()
|
||||
clientConfig.ContentConfig.NegotiatedSerializer = nil
|
||||
client, err := kubernetes.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
rm := &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}
|
||||
dynamicClient, err := dynamic.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
podResource := map[schema.GroupVersionResource]struct{}{
|
||||
{Group: "", Version: "v1", Resource: "pods"}: {},
|
||||
}
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
||||
alwaysStarted := make(chan struct{})
|
||||
close(alwaysStarted)
|
||||
gc, err := NewGarbageCollector(dynamicClient, rm, podResource, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
go gc.Run(1, stopCh)
|
||||
go gc.Sync(fakeDiscoveryClient, 10*time.Millisecond, stopCh)
|
||||
|
||||
// Wait until the sync discovers the initial resources
|
||||
fmt.Printf("Test output")
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected garbagecollector.Sync to be running but it is blocked: %v", err)
|
||||
}
|
||||
|
||||
// Simulate the discovery client returning an error
|
||||
fakeDiscoveryClient.setPreferredResources(nil)
|
||||
fakeDiscoveryClient.setError(fmt.Errorf("Error calling discoveryClient.ServerPreferredResources()"))
|
||||
|
||||
// Wait until sync discovers the change
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Remove the error from being returned and see if the garbage collector sync is still working
|
||||
fakeDiscoveryClient.setPreferredResources(serverResources)
|
||||
fakeDiscoveryClient.setError(nil)
|
||||
|
||||
err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
|
||||
}
|
||||
|
||||
// Simulate the discovery client returning a resource the restmapper can resolve, but will not sync caches
|
||||
fakeDiscoveryClient.setPreferredResources(unsyncableServerResources)
|
||||
fakeDiscoveryClient.setError(nil)
|
||||
|
||||
// Wait until sync discovers the change
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Put the resources back to normal and ensure garbage collector sync recovers
|
||||
fakeDiscoveryClient.setPreferredResources(serverResources)
|
||||
fakeDiscoveryClient.setError(nil)
|
||||
|
||||
err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func expectSyncNotBlocked(fakeDiscoveryClient *fakeServerResources, workerLock *sync.RWMutex) error {
|
||||
before := fakeDiscoveryClient.getInterfaceUsedCount()
|
||||
t := 1 * time.Second
|
||||
time.Sleep(t)
|
||||
after := fakeDiscoveryClient.getInterfaceUsedCount()
|
||||
if before == after {
|
||||
return fmt.Errorf("discoveryClient.ServerPreferredResources() called %d times over %v", after-before, t)
|
||||
}
|
||||
|
||||
workerLockAcquired := make(chan struct{})
|
||||
go func() {
|
||||
workerLock.Lock()
|
||||
workerLock.Unlock()
|
||||
close(workerLockAcquired)
|
||||
}()
|
||||
select {
|
||||
case <-workerLockAcquired:
|
||||
return nil
|
||||
case <-time.After(t):
|
||||
return fmt.Errorf("workerLock blocked for at least %v", t)
|
||||
}
|
||||
}
|
||||
|
||||
type fakeServerResources struct {
|
||||
PreferredResources []*metav1.APIResourceList
|
||||
Error error
|
||||
Lock sync.Mutex
|
||||
InterfaceUsedCount int
|
||||
}
|
||||
|
||||
func (_ *fakeServerResources) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
|
||||
@ -780,9 +939,30 @@ func (_ *fakeServerResources) ServerResources() ([]*metav1.APIResourceList, erro
|
||||
}
|
||||
|
||||
func (f *fakeServerResources) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.InterfaceUsedCount++
|
||||
return f.PreferredResources, f.Error
|
||||
}
|
||||
|
||||
func (f *fakeServerResources) setPreferredResources(resources []*metav1.APIResourceList) {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.PreferredResources = resources
|
||||
}
|
||||
|
||||
func (f *fakeServerResources) setError(err error) {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.Error = err
|
||||
}
|
||||
|
||||
func (f *fakeServerResources) getInterfaceUsedCount() int {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
return f.InterfaceUsedCount
|
||||
}
|
||||
|
||||
func (_ *fakeServerResources) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
51
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/graph_builder.go
generated
vendored
51
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/graph_builder.go
generated
vendored
@ -78,7 +78,7 @@ type GraphBuilder struct {
|
||||
// each monitor list/watches a resource, the results are funneled to the
|
||||
// dependencyGraphBuilder
|
||||
monitors monitors
|
||||
monitorLock sync.Mutex
|
||||
monitorLock sync.RWMutex
|
||||
// informersStarted is closed after after all of the controllers have been initialized and are running.
|
||||
// After that it is safe to start them here, before that it is not.
|
||||
informersStarted <-chan struct{}
|
||||
@ -91,9 +91,7 @@ type GraphBuilder struct {
|
||||
// it is protected by monitorLock.
|
||||
running bool
|
||||
|
||||
// metaOnlyClientPool uses a special codec, which removes fields except for
|
||||
// apiVersion, kind, and metadata during decoding.
|
||||
metaOnlyClientPool dynamic.ClientPool
|
||||
dynamicClient dynamic.Interface
|
||||
// monitors are the producer of the graphChanges queue, graphBuilder alters
|
||||
// the in-memory graph according to the changes.
|
||||
graphChanges workqueue.RateLimitingInterface
|
||||
@ -113,6 +111,7 @@ type GraphBuilder struct {
|
||||
// monitor runs a Controller with a local stop channel.
|
||||
type monitor struct {
|
||||
controller cache.Controller
|
||||
store cache.Store
|
||||
|
||||
// stopCh stops Controller. If stopCh is nil, the monitor is considered to be
|
||||
// not yet started.
|
||||
@ -130,29 +129,17 @@ type monitors map[schema.GroupVersionResource]*monitor
|
||||
func listWatcher(client dynamic.Interface, resource schema.GroupVersionResource) *cache.ListWatch {
|
||||
return &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
// APIResource.Kind is not used by the dynamic client, so
|
||||
// leave it empty. We want to list this resource in all
|
||||
// namespaces if it's namespace scoped, so leave
|
||||
// APIResource.Namespaced as false is all right.
|
||||
apiResource := metav1.APIResource{Name: resource.Resource}
|
||||
return client.ParameterCodec(dynamic.VersionedParameterEncoderWithV1Fallback).
|
||||
Resource(&apiResource, metav1.NamespaceAll).
|
||||
List(options)
|
||||
// We want to list this resource in all namespaces if it's namespace scoped, so not passing namespace is ok.
|
||||
return client.Resource(resource).List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
// APIResource.Kind is not used by the dynamic client, so
|
||||
// leave it empty. We want to list this resource in all
|
||||
// namespaces if it's namespace scoped, so leave
|
||||
// APIResource.Namespaced as false is all right.
|
||||
apiResource := metav1.APIResource{Name: resource.Resource}
|
||||
return client.ParameterCodec(dynamic.VersionedParameterEncoderWithV1Fallback).
|
||||
Resource(&apiResource, metav1.NamespaceAll).
|
||||
Watch(options)
|
||||
// We want to list this resource in all namespaces if it's namespace scoped, so not passing namespace is ok.
|
||||
return client.Resource(resource).Watch(options)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (gb *GraphBuilder) controllerFor(resource schema.GroupVersionResource, kind schema.GroupVersionKind) (cache.Controller, error) {
|
||||
func (gb *GraphBuilder) controllerFor(resource schema.GroupVersionResource, kind schema.GroupVersionKind) (cache.Controller, cache.Store, error) {
|
||||
handlers := cache.ResourceEventHandlerFuncs{
|
||||
// add the event to the dependencyGraphBuilder's graphChanges.
|
||||
AddFunc: func(obj interface{}) {
|
||||
@ -192,25 +179,21 @@ func (gb *GraphBuilder) controllerFor(resource schema.GroupVersionResource, kind
|
||||
glog.V(4).Infof("using a shared informer for resource %q, kind %q", resource.String(), kind.String())
|
||||
// need to clone because it's from a shared cache
|
||||
shared.Informer().AddEventHandlerWithResyncPeriod(handlers, ResourceResyncTime)
|
||||
return shared.Informer().GetController(), nil
|
||||
return shared.Informer().GetController(), shared.Informer().GetStore(), nil
|
||||
} else {
|
||||
glog.V(4).Infof("unable to use a shared informer for resource %q, kind %q: %v", resource.String(), kind.String(), err)
|
||||
}
|
||||
|
||||
// TODO: consider store in one storage.
|
||||
glog.V(5).Infof("create storage for resource %s", resource)
|
||||
client, err := gb.metaOnlyClientPool.ClientForGroupVersionKind(kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, monitor := cache.NewInformer(
|
||||
listWatcher(client, resource),
|
||||
store, monitor := cache.NewInformer(
|
||||
listWatcher(gb.dynamicClient, resource),
|
||||
nil,
|
||||
ResourceResyncTime,
|
||||
// don't need to clone because it's not from shared cache
|
||||
handlers,
|
||||
)
|
||||
return monitor, nil
|
||||
return monitor, store, nil
|
||||
}
|
||||
|
||||
// syncMonitors rebuilds the monitor set according to the supplied resources,
|
||||
@ -246,12 +229,12 @@ func (gb *GraphBuilder) syncMonitors(resources map[schema.GroupVersionResource]s
|
||||
errs = append(errs, fmt.Errorf("couldn't look up resource %q: %v", resource, err))
|
||||
continue
|
||||
}
|
||||
c, err := gb.controllerFor(resource, kind)
|
||||
c, s, err := gb.controllerFor(resource, kind)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("couldn't start monitor for resource %q: %v", resource, err))
|
||||
continue
|
||||
}
|
||||
current[resource] = &monitor{controller: c}
|
||||
current[resource] = &monitor{store: s, controller: c}
|
||||
added++
|
||||
}
|
||||
gb.monitors = current
|
||||
@ -306,11 +289,13 @@ func (gb *GraphBuilder) IsSynced() bool {
|
||||
defer gb.monitorLock.Unlock()
|
||||
|
||||
if len(gb.monitors) == 0 {
|
||||
glog.V(4).Info("garbage controller monitor not synced: no monitors")
|
||||
return false
|
||||
}
|
||||
|
||||
for _, monitor := range gb.monitors {
|
||||
for resource, monitor := range gb.monitors {
|
||||
if !monitor.controller.HasSynced() {
|
||||
glog.V(4).Infof("garbage controller monitor not yet synced: %+v", resource)
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -361,8 +346,6 @@ var ignoredResources = map[schema.GroupResource]struct{}{
|
||||
{Group: "authorization.k8s.io", Resource: "selfsubjectaccessreviews"}: {},
|
||||
{Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}: {},
|
||||
{Group: "authorization.k8s.io", Resource: "selfsubjectrulesreviews"}: {},
|
||||
{Group: "apiregistration.k8s.io", Resource: "apiservices"}: {},
|
||||
{Group: "apiextensions.k8s.io", Resource: "customresourcedefinitions"}: {},
|
||||
}
|
||||
|
||||
// DefaultIgnoredResources returns the default set of resources that the garbage collector controller
|
||||
|
20
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/BUILD
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/BUILD
generated
vendored
@ -3,38 +3,18 @@ package(default_visibility = ["//visibility:public"])
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"metaonly.go",
|
||||
"types.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["metaonly_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
66
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly.go
generated
vendored
66
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly.go
generated
vendored
@ -1,66 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metaonly
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
)
|
||||
|
||||
type metaOnlyJSONScheme struct{}
|
||||
|
||||
// This function can be extended to mapping different gvk to different MetadataOnlyObject,
|
||||
// which embedded with different version of ObjectMeta. Currently the system
|
||||
// only supports metav1.ObjectMeta.
|
||||
func gvkToMetadataOnlyObject(gvk schema.GroupVersionKind) runtime.Object {
|
||||
if strings.HasSuffix(gvk.Kind, "List") {
|
||||
return &MetadataOnlyObjectList{}
|
||||
} else {
|
||||
return &MetadataOnlyObject{}
|
||||
}
|
||||
}
|
||||
|
||||
func NewMetadataCodecFactory() serializer.CodecFactory {
|
||||
// populating another scheme from legacyscheme.Scheme, registering every kind with
|
||||
// MetadataOnlyObject (or MetadataOnlyObjectList).
|
||||
scheme := runtime.NewScheme()
|
||||
allTypes := legacyscheme.Scheme.AllKnownTypes()
|
||||
for kind := range allTypes {
|
||||
if kind.Version == runtime.APIVersionInternal {
|
||||
continue
|
||||
}
|
||||
if kind == metav1.Unversioned.WithKind("Status") {
|
||||
// this is added below as unversioned
|
||||
continue
|
||||
}
|
||||
metaOnlyObject := gvkToMetadataOnlyObject(kind)
|
||||
scheme.AddKnownTypeWithName(kind, metaOnlyObject)
|
||||
}
|
||||
scheme.AddUnversionedTypes(metav1.Unversioned, &metav1.Status{})
|
||||
return serializer.NewCodecFactory(scheme)
|
||||
}
|
||||
|
||||
// String converts a MetadataOnlyObject to a human-readable string.
|
||||
func (metaOnly MetadataOnlyObject) String() string {
|
||||
return fmt.Sprintf("%s/%s, name: %s, DeletionTimestamp:%v", metaOnly.TypeMeta.APIVersion, metaOnly.TypeMeta.Kind, metaOnly.ObjectMeta.Name, metaOnly.ObjectMeta.DeletionTimestamp)
|
||||
}
|
164
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly_test.go
generated
vendored
164
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly/metaonly_test.go
generated
vendored
@ -1,164 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metaonly
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
)
|
||||
|
||||
func getPod() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{UID: "1234"},
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "fake-name",
|
||||
Image: "fakeimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getPodJson(t *testing.T) []byte {
|
||||
data, err := json.Marshal(getPod())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func getPodListJson(t *testing.T) []byte {
|
||||
data, err := json.Marshal(&v1.PodList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PodList",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
Items: []v1.Pod{
|
||||
*getPod(),
|
||||
*getPod(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func verfiyMetadata(description string, t *testing.T, in *MetadataOnlyObject) {
|
||||
pod := getPod()
|
||||
if e, a := pod.ObjectMeta, in.ObjectMeta; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("%s: expected %#v, got %#v", description, e, a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeToMetadataOnlyObject(t *testing.T) {
|
||||
data := getPodJson(t)
|
||||
cf := serializer.DirectCodecFactory{CodecFactory: NewMetadataCodecFactory()}
|
||||
info, ok := runtime.SerializerInfoForMediaType(cf.SupportedMediaTypes(), runtime.ContentTypeJSON)
|
||||
if !ok {
|
||||
t.Fatalf("expected to get a JSON serializer")
|
||||
}
|
||||
codec := cf.DecoderToVersion(info.Serializer, schema.GroupVersion{Group: "SOMEGROUP", Version: "SOMEVERSION"})
|
||||
// decode with into
|
||||
into := &MetadataOnlyObject{}
|
||||
ret, _, err := codec.Decode(data, nil, into)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
metaOnly, ok := ret.(*MetadataOnlyObject)
|
||||
if !ok {
|
||||
t.Fatalf("expected ret to be *runtime.MetadataOnlyObject")
|
||||
}
|
||||
verfiyMetadata("check returned metaonly with into", t, metaOnly)
|
||||
verfiyMetadata("check into", t, into)
|
||||
// decode without into
|
||||
ret, _, err = codec.Decode(data, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
metaOnly, ok = ret.(*MetadataOnlyObject)
|
||||
if !ok {
|
||||
t.Fatalf("expected ret to be *runtime.MetadataOnlyObject")
|
||||
}
|
||||
verfiyMetadata("check returned metaonly without into", t, metaOnly)
|
||||
}
|
||||
|
||||
func verifyListMetadata(t *testing.T, metaOnlyList *MetadataOnlyObjectList) {
|
||||
items, err := meta.ExtractList(metaOnlyList)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, item := range items {
|
||||
metaOnly, ok := item.(*MetadataOnlyObject)
|
||||
if !ok {
|
||||
t.Fatalf("expected item to be *MetadataOnlyObject")
|
||||
}
|
||||
verfiyMetadata("check list", t, metaOnly)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeToMetadataOnlyObjectList(t *testing.T) {
|
||||
data := getPodListJson(t)
|
||||
cf := serializer.DirectCodecFactory{CodecFactory: NewMetadataCodecFactory()}
|
||||
info, ok := runtime.SerializerInfoForMediaType(cf.SupportedMediaTypes(), runtime.ContentTypeJSON)
|
||||
if !ok {
|
||||
t.Fatalf("expected to get a JSON serializer")
|
||||
}
|
||||
codec := cf.DecoderToVersion(info.Serializer, schema.GroupVersion{Group: "SOMEGROUP", Version: "SOMEVERSION"})
|
||||
// decode with into
|
||||
into := &MetadataOnlyObjectList{}
|
||||
ret, _, err := codec.Decode(data, nil, into)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
metaOnlyList, ok := ret.(*MetadataOnlyObjectList)
|
||||
if !ok {
|
||||
t.Fatalf("expected ret to be *runtime.UnstructuredList")
|
||||
}
|
||||
verifyListMetadata(t, metaOnlyList)
|
||||
verifyListMetadata(t, into)
|
||||
// decode without into
|
||||
ret, _, err = codec.Decode(data, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
metaOnlyList, ok = ret.(*MetadataOnlyObjectList)
|
||||
if !ok {
|
||||
t.Fatalf("expected ret to be *runtime.UnstructuredList")
|
||||
}
|
||||
verifyListMetadata(t, metaOnlyList)
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
58
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/operations.go
generated
vendored
58
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/operations.go
generated
vendored
@ -30,76 +30,58 @@ import (
|
||||
"k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
||||
// cluster scoped resources don't have namespaces. Default to the item's namespace, but clear it for cluster scoped resources
|
||||
func resourceDefaultNamespace(namespaced bool, defaultNamespace string) string {
|
||||
if namespaced {
|
||||
return defaultNamespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// apiResource consults the REST mapper to translate an <apiVersion, kind,
|
||||
// namespace> tuple to a unversioned.APIResource struct.
|
||||
func (gc *GarbageCollector) apiResource(apiVersion, kind string) (*metav1.APIResource, error) {
|
||||
func (gc *GarbageCollector) apiResource(apiVersion, kind string) (schema.GroupVersionResource, bool, error) {
|
||||
fqKind := schema.FromAPIVersionAndKind(apiVersion, kind)
|
||||
mapping, err := gc.restMapper.RESTMapping(fqKind.GroupKind(), fqKind.Version)
|
||||
if err != nil {
|
||||
return nil, newRESTMappingError(kind, apiVersion)
|
||||
return schema.GroupVersionResource{}, false, newRESTMappingError(kind, apiVersion)
|
||||
}
|
||||
glog.V(5).Infof("map kind %s, version %s to resource %s", kind, apiVersion, mapping.Resource)
|
||||
resource := metav1.APIResource{
|
||||
Name: mapping.Resource,
|
||||
Namespaced: mapping.Scope == meta.RESTScopeNamespace,
|
||||
Kind: kind,
|
||||
}
|
||||
return &resource, nil
|
||||
return mapping.Resource, mapping.Scope == meta.RESTScopeNamespace, nil
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) deleteObject(item objectReference, policy *metav1.DeletionPropagation) error {
|
||||
fqKind := schema.FromAPIVersionAndKind(item.APIVersion, item.Kind)
|
||||
client, err := gc.clientPool.ClientForGroupVersionKind(fqKind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resource, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||
resource, namespaced, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uid := item.UID
|
||||
preconditions := metav1.Preconditions{UID: &uid}
|
||||
deleteOptions := metav1.DeleteOptions{Preconditions: &preconditions, PropagationPolicy: policy}
|
||||
return client.Resource(resource, item.Namespace).Delete(item.Name, &deleteOptions)
|
||||
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Delete(item.Name, &deleteOptions)
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) getObject(item objectReference) (*unstructured.Unstructured, error) {
|
||||
fqKind := schema.FromAPIVersionAndKind(item.APIVersion, item.Kind)
|
||||
client, err := gc.clientPool.ClientForGroupVersionKind(fqKind)
|
||||
resource, namespaced, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resource, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.Resource(resource, item.Namespace).Get(item.Name, metav1.GetOptions{})
|
||||
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Get(item.Name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) updateObject(item objectReference, obj *unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
fqKind := schema.FromAPIVersionAndKind(item.APIVersion, item.Kind)
|
||||
client, err := gc.clientPool.ClientForGroupVersionKind(fqKind)
|
||||
resource, namespaced, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resource, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.Resource(resource, item.Namespace).Update(obj)
|
||||
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Update(obj)
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) patchObject(item objectReference, patch []byte) (*unstructured.Unstructured, error) {
|
||||
fqKind := schema.FromAPIVersionAndKind(item.APIVersion, item.Kind)
|
||||
client, err := gc.clientPool.ClientForGroupVersionKind(fqKind)
|
||||
func (gc *GarbageCollector) patchObject(item objectReference, patch []byte, pt types.PatchType) (*unstructured.Unstructured, error) {
|
||||
resource, namespaced, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resource, err := gc.apiResource(item.APIVersion, item.Kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.Resource(resource, item.Namespace).Patch(item.Name, types.StrategicMergePatchType, patch)
|
||||
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Patch(item.Name, pt, patch)
|
||||
}
|
||||
|
||||
// TODO: Using Patch when strategicmerge supports deleting an entry from a
|
||||
|
117
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/patch.go
generated
vendored
117
vendor/k8s.io/kubernetes/pkg/controller/garbagecollector/patch.go
generated
vendored
@ -21,12 +21,16 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
|
||||
)
|
||||
|
||||
func deleteOwnerRefPatch(dependentUID types.UID, ownerUIDs ...types.UID) []byte {
|
||||
func deleteOwnerRefStrategicMergePatch(dependentUID types.UID, ownerUIDs ...types.UID) []byte {
|
||||
var pieces []string
|
||||
for _, ownerUID := range ownerUIDs {
|
||||
pieces = append(pieces, fmt.Sprintf(`{"$patch":"delete","uid":"%s"}`, ownerUID))
|
||||
@ -35,9 +39,97 @@ func deleteOwnerRefPatch(dependentUID types.UID, ownerUIDs ...types.UID) []byte
|
||||
return []byte(patch)
|
||||
}
|
||||
|
||||
// generate a patch that unsets the BlockOwnerDeletion field of all
|
||||
// getMetadata tries getting object metadata from local cache, and sends GET request to apiserver when
|
||||
// local cache is not available or not latest.
|
||||
func (gc *GarbageCollector) getMetadata(apiVersion, kind, namespace, name string) (metav1.Object, error) {
|
||||
apiResource, _, err := gc.apiResource(apiVersion, kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gc.dependencyGraphBuilder.monitorLock.RLock()
|
||||
defer gc.dependencyGraphBuilder.monitorLock.RUnlock()
|
||||
m, ok := gc.dependencyGraphBuilder.monitors[apiResource]
|
||||
if !ok || m == nil {
|
||||
// If local cache doesn't exist for mapping.Resource, send a GET request to API server
|
||||
return gc.dynamicClient.Resource(apiResource).Namespace(namespace).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
key := name
|
||||
if len(namespace) != 0 {
|
||||
key = namespace + "/" + name
|
||||
}
|
||||
raw, exist, err := m.store.GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exist {
|
||||
// If local cache doesn't contain the object, send a GET request to API server
|
||||
return gc.dynamicClient.Resource(apiResource).Namespace(namespace).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
obj, ok := raw.(runtime.Object)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expect a runtime.Object, got %v", raw)
|
||||
}
|
||||
return meta.Accessor(obj)
|
||||
}
|
||||
|
||||
type objectForPatch struct {
|
||||
ObjectMetaForPatch `json:"metadata"`
|
||||
}
|
||||
|
||||
type ObjectMetaForPatch struct {
|
||||
ResourceVersion string `json:"resourceVersion"`
|
||||
OwnerReferences []metav1.OwnerReference `json:"ownerReferences"`
|
||||
}
|
||||
|
||||
// jsonMergePatchFunc defines the interface for functions that construct json merge patches that manipulate
|
||||
// owner reference array.
|
||||
type jsonMergePatchFunc func(*node) ([]byte, error)
|
||||
|
||||
// patch tries strategic merge patch on item first, and if SMP is not supported, it fallbacks to JSON merge
|
||||
// patch.
|
||||
func (gc *GarbageCollector) patch(item *node, smp []byte, jmp jsonMergePatchFunc) (*unstructured.Unstructured, error) {
|
||||
smpResult, err := gc.patchObject(item.identity, smp, types.StrategicMergePatchType)
|
||||
if err == nil {
|
||||
return smpResult, nil
|
||||
}
|
||||
if !errors.IsUnsupportedMediaType(err) {
|
||||
return nil, err
|
||||
}
|
||||
// StrategicMergePatch is not supported, use JSON merge patch instead
|
||||
patch, err := jmp(item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return gc.patchObject(item.identity, patch, types.MergePatchType)
|
||||
}
|
||||
|
||||
// Returns JSON merge patch that removes the ownerReferences matching ownerUIDs.
|
||||
func (gc *GarbageCollector) deleteOwnerRefJSONMergePatch(item *node, ownerUIDs ...types.UID) ([]byte, error) {
|
||||
accessor, err := gc.getMetadata(item.identity.APIVersion, item.identity.Kind, item.identity.Namespace, item.identity.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expectedObjectMeta := ObjectMetaForPatch{}
|
||||
expectedObjectMeta.ResourceVersion = accessor.GetResourceVersion()
|
||||
refs := accessor.GetOwnerReferences()
|
||||
for _, ref := range refs {
|
||||
var skip bool
|
||||
for _, ownerUID := range ownerUIDs {
|
||||
if ref.UID == ownerUID {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !skip {
|
||||
expectedObjectMeta.OwnerReferences = append(expectedObjectMeta.OwnerReferences, ref)
|
||||
}
|
||||
}
|
||||
return json.Marshal(objectForPatch{expectedObjectMeta})
|
||||
}
|
||||
|
||||
// Generate a patch that unsets the BlockOwnerDeletion field of all
|
||||
// ownerReferences of node.
|
||||
func (n *node) patchToUnblockOwnerReferences() ([]byte, error) {
|
||||
func (n *node) unblockOwnerReferencesStrategicMergePatch() ([]byte, error) {
|
||||
var dummy metaonly.MetadataOnlyObject
|
||||
var blockingRefs []metav1.OwnerReference
|
||||
falseVar := false
|
||||
@ -52,3 +144,22 @@ func (n *node) patchToUnblockOwnerReferences() ([]byte, error) {
|
||||
dummy.ObjectMeta.UID = n.identity.UID
|
||||
return json.Marshal(dummy)
|
||||
}
|
||||
|
||||
// Generate a JSON merge patch that unsets the BlockOwnerDeletion field of all
|
||||
// ownerReferences of node.
|
||||
func (gc *GarbageCollector) unblockOwnerReferencesJSONMergePatch(n *node) ([]byte, error) {
|
||||
accessor, err := gc.getMetadata(n.identity.APIVersion, n.identity.Kind, n.identity.Namespace, n.identity.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expectedObjectMeta := ObjectMetaForPatch{}
|
||||
expectedObjectMeta.ResourceVersion = accessor.GetResourceVersion()
|
||||
var expectedOwners []metav1.OwnerReference
|
||||
falseVar := false
|
||||
for _, owner := range n.owners {
|
||||
owner.BlockOwnerDeletion = &falseVar
|
||||
expectedOwners = append(expectedOwners, owner)
|
||||
}
|
||||
expectedObjectMeta.OwnerReferences = expectedOwners
|
||||
return json.Marshal(objectForPatch{expectedObjectMeta})
|
||||
}
|
||||
|
Reference in New Issue
Block a user