Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -9,6 +9,7 @@ load(
go_library(
name = "go_default_library",
srcs = [
"dump.go",
"errors.go",
"garbagecollector.go",
"graph.go",
@ -23,54 +24,64 @@ go_library(
"//pkg/controller/garbagecollector/metaonly:go_default_library",
"//pkg/util/reflector/prometheus:go_default_library",
"//pkg/util/workqueue/prometheus:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/github.com/golang/groupcache/lru:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/gonum.org/v1/gonum/graph:go_default_library",
"//vendor/gonum.org/v1/gonum/graph/encoding:go_default_library",
"//vendor/gonum.org/v1/gonum/graph/encoding/dot:go_default_library",
"//vendor/gonum.org/v1/gonum/graph/simple:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["garbagecollector_test.go"],
srcs = [
"dump_test.go",
"garbagecollector_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/core/install:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/gonum.org/v1/gonum/graph:go_default_library",
"//vendor/gonum.org/v1/gonum/graph/simple:go_default_library",
],
)

View File

@ -6,3 +6,5 @@ reviewers:
- caesarxuchao
- lavalamp
- deads2k
labels:
- sig/api-machinery

View File

@ -0,0 +1,278 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package garbagecollector
import (
"fmt"
"net/http"
"strings"
"gonum.org/v1/gonum/graph"
"gonum.org/v1/gonum/graph/encoding"
"gonum.org/v1/gonum/graph/encoding/dot"
"gonum.org/v1/gonum/graph/simple"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
type gonumVertex struct {
uid types.UID
gvk schema.GroupVersionKind
namespace string
name string
missingFromGraph bool
beingDeleted bool
deletingDependents bool
virtual bool
vertexID int64
}
func (v *gonumVertex) ID() int64 {
return v.vertexID
}
func (v *gonumVertex) String() string {
kind := v.gvk.Kind + "." + v.gvk.Version
if len(v.gvk.Group) > 0 {
kind = kind + "." + v.gvk.Group
}
missing := ""
if v.missingFromGraph {
missing = "(missing)"
}
deleting := ""
if v.beingDeleted {
deleting = "(deleting)"
}
deletingDependents := ""
if v.deletingDependents {
deleting = "(deletingDependents)"
}
virtual := ""
if v.virtual {
virtual = "(virtual)"
}
return fmt.Sprintf(`%s/%s[%s]-%v%s%s%s%s`, kind, v.name, v.namespace, v.uid, missing, deleting, deletingDependents, virtual)
}
func (v *gonumVertex) Attributes() []encoding.Attribute {
kubectlString := v.gvk.Kind + "." + v.gvk.Version
if len(v.gvk.Group) > 0 {
kubectlString = kubectlString + "." + v.gvk.Group
}
kubectlString = kubectlString + "/" + v.name
label := fmt.Sprintf(`uid=%v
namespace=%v
%v
`,
v.uid,
v.namespace,
kubectlString,
)
conditionStrings := []string{}
if v.beingDeleted {
conditionStrings = append(conditionStrings, "beingDeleted")
}
if v.deletingDependents {
conditionStrings = append(conditionStrings, "deletingDependents")
}
if v.virtual {
conditionStrings = append(conditionStrings, "virtual")
}
if v.missingFromGraph {
conditionStrings = append(conditionStrings, "missingFromGraph")
}
conditionString := strings.Join(conditionStrings, ",")
if len(conditionString) > 0 {
label = label + conditionString + "\n"
}
return []encoding.Attribute{
{Key: "label", Value: fmt.Sprintf(`"%v"`, label)},
// these place metadata in the correct location, but don't conform to any normal attribute for rendering
{Key: "group", Value: fmt.Sprintf(`"%v"`, v.gvk.Group)},
{Key: "version", Value: fmt.Sprintf(`"%v"`, v.gvk.Version)},
{Key: "kind", Value: fmt.Sprintf(`"%v"`, v.gvk.Kind)},
{Key: "namespace", Value: fmt.Sprintf(`"%v"`, v.namespace)},
{Key: "name", Value: fmt.Sprintf(`"%v"`, v.name)},
{Key: "uid", Value: fmt.Sprintf(`"%v"`, v.uid)},
{Key: "missing", Value: fmt.Sprintf(`"%v"`, v.missingFromGraph)},
{Key: "beingDeleted", Value: fmt.Sprintf(`"%v"`, v.beingDeleted)},
{Key: "deletingDependents", Value: fmt.Sprintf(`"%v"`, v.deletingDependents)},
{Key: "virtual", Value: fmt.Sprintf(`"%v"`, v.virtual)},
}
}
func NewGonumVertex(node *node, nodeID int64) *gonumVertex {
gv, err := schema.ParseGroupVersion(node.identity.APIVersion)
if err != nil {
// this indicates a bad data serialization that should be prevented during storage of the API
utilruntime.HandleError(err)
}
return &gonumVertex{
uid: node.identity.UID,
gvk: gv.WithKind(node.identity.Kind),
namespace: node.identity.Namespace,
name: node.identity.Name,
beingDeleted: node.beingDeleted,
deletingDependents: node.deletingDependents,
virtual: node.virtual,
vertexID: nodeID,
}
}
func NewMissingGonumVertex(ownerRef metav1.OwnerReference, nodeID int64) *gonumVertex {
gv, err := schema.ParseGroupVersion(ownerRef.APIVersion)
if err != nil {
// this indicates a bad data serialization that should be prevented during storage of the API
utilruntime.HandleError(err)
}
return &gonumVertex{
uid: ownerRef.UID,
gvk: gv.WithKind(ownerRef.Kind),
name: ownerRef.Name,
missingFromGraph: true,
vertexID: nodeID,
}
}
func (m *concurrentUIDToNode) ToGonumGraph() graph.Directed {
m.uidToNodeLock.Lock()
defer m.uidToNodeLock.Unlock()
return toGonumGraph(m.uidToNode)
}
func toGonumGraph(uidToNode map[types.UID]*node) graph.Directed {
uidToVertex := map[types.UID]*gonumVertex{}
graphBuilder := simple.NewDirectedGraph()
// add the vertices first, then edges. That avoids having to deal with missing refs.
for _, node := range uidToNode {
// skip adding objects that don't have owner references and aren't referred to.
if len(node.dependents) == 0 && len(node.owners) == 0 {
continue
}
vertex := NewGonumVertex(node, graphBuilder.NewNode().ID())
uidToVertex[node.identity.UID] = vertex
graphBuilder.AddNode(vertex)
}
for _, node := range uidToNode {
currVertex := uidToVertex[node.identity.UID]
for _, ownerRef := range node.owners {
currOwnerVertex, ok := uidToVertex[ownerRef.UID]
if !ok {
currOwnerVertex = NewMissingGonumVertex(ownerRef, graphBuilder.NewNode().ID())
uidToVertex[node.identity.UID] = currOwnerVertex
graphBuilder.AddNode(currOwnerVertex)
}
graphBuilder.SetEdge(simple.Edge{
F: currVertex,
T: currOwnerVertex,
})
}
}
return graphBuilder
}
func (m *concurrentUIDToNode) ToGonumGraphForObj(uids ...types.UID) graph.Directed {
m.uidToNodeLock.Lock()
defer m.uidToNodeLock.Unlock()
return toGonumGraphForObj(m.uidToNode, uids...)
}
func toGonumGraphForObj(uidToNode map[types.UID]*node, uids ...types.UID) graph.Directed {
uidsToCheck := append([]types.UID{}, uids...)
interestingNodes := map[types.UID]*node{}
// build the set of nodes to inspect first, then use the normal construction on the subset
for i := 0; i < len(uidsToCheck); i++ {
uid := uidsToCheck[i]
// if we've already been observed, there was a bug, but skip it so we don't loop forever
if _, ok := interestingNodes[uid]; ok {
continue
}
node, ok := uidToNode[uid]
// if there is no node for the UID, skip over it. We may add it to the list multiple times
// but we won't loop forever and hopefully the condition doesn't happen very often
if !ok {
continue
}
interestingNodes[node.identity.UID] = node
for _, ownerRef := range node.owners {
// if we've already inspected this UID, don't add it to be inspected again
if _, ok := interestingNodes[ownerRef.UID]; ok {
continue
}
uidsToCheck = append(uidsToCheck, ownerRef.UID)
}
for dependent := range node.dependents {
// if we've already inspected this UID, don't add it to be inspected again
if _, ok := interestingNodes[dependent.identity.UID]; ok {
continue
}
uidsToCheck = append(uidsToCheck, dependent.identity.UID)
}
}
return toGonumGraph(interestingNodes)
}
func NewDebugHandler(controller *GarbageCollector) http.Handler {
return &debugHTTPHandler{controller: controller}
}
type debugHTTPHandler struct {
controller *GarbageCollector
}
func (h *debugHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if req.URL.Path != "/graph" {
http.Error(w, "", http.StatusNotFound)
return
}
var graph graph.Directed
if uidStrings := req.URL.Query()["uid"]; len(uidStrings) > 0 {
uids := []types.UID{}
for _, uidString := range uidStrings {
uids = append(uids, types.UID(uidString))
}
graph = h.controller.dependencyGraphBuilder.uidToNode.ToGonumGraphForObj(uids...)
} else {
graph = h.controller.dependencyGraphBuilder.uidToNode.ToGonumGraph()
}
data, err := dot.Marshal(graph, "full", "", " ", false)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Write(data)
w.WriteHeader(http.StatusOK)
}

View File

@ -0,0 +1,487 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package garbagecollector
import (
"sort"
"testing"
"github.com/davecgh/go-spew/spew"
"gonum.org/v1/gonum/graph"
"gonum.org/v1/gonum/graph/simple"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
var (
alphaNode = func() *node {
return &node{
identity: objectReference{
OwnerReference: metav1.OwnerReference{
UID: types.UID("alpha"),
},
},
owners: []metav1.OwnerReference{
{UID: types.UID("bravo")},
{UID: types.UID("charlie")},
},
}
}
bravoNode = func() *node {
return &node{
identity: objectReference{
OwnerReference: metav1.OwnerReference{
UID: types.UID("bravo"),
},
},
dependents: map[*node]struct{}{
alphaNode(): {},
},
}
}
charlieNode = func() *node {
return &node{
identity: objectReference{
OwnerReference: metav1.OwnerReference{
UID: types.UID("charlie"),
},
},
dependents: map[*node]struct{}{
alphaNode(): {},
},
}
}
deltaNode = func() *node {
return &node{
identity: objectReference{
OwnerReference: metav1.OwnerReference{
UID: types.UID("delta"),
},
},
owners: []metav1.OwnerReference{
{UID: types.UID("foxtrot")},
},
}
}
echoNode = func() *node {
return &node{
identity: objectReference{
OwnerReference: metav1.OwnerReference{
UID: types.UID("echo"),
},
},
}
}
foxtrotNode = func() *node {
return &node{
identity: objectReference{
OwnerReference: metav1.OwnerReference{
UID: types.UID("foxtrot"),
},
},
owners: []metav1.OwnerReference{
{UID: types.UID("golf")},
},
dependents: map[*node]struct{}{
deltaNode(): {},
},
}
}
golfNode = func() *node {
return &node{
identity: objectReference{
OwnerReference: metav1.OwnerReference{
UID: types.UID("golf"),
},
},
dependents: map[*node]struct{}{
foxtrotNode(): {},
},
}
}
)
func TestToGonumGraph(t *testing.T) {
tests := []struct {
name string
uidToNode map[types.UID]*node
expect graph.Directed
}{
{
name: "simple",
uidToNode: map[types.UID]*node{
types.UID("alpha"): alphaNode(),
types.UID("bravo"): bravoNode(),
types.UID("charlie"): charlieNode(),
},
expect: func() graph.Directed {
graphBuilder := simple.NewDirectedGraph()
alphaVertex := NewGonumVertex(alphaNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(alphaVertex)
bravoVertex := NewGonumVertex(bravoNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(bravoVertex)
charlieVertex := NewGonumVertex(charlieNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(charlieVertex)
graphBuilder.SetEdge(simple.Edge{
F: alphaVertex,
T: bravoVertex,
})
graphBuilder.SetEdge(simple.Edge{
F: alphaVertex,
T: charlieVertex,
})
return graphBuilder
}(),
},
{
name: "missing", // synthetic vertex created
uidToNode: map[types.UID]*node{
types.UID("alpha"): alphaNode(),
types.UID("charlie"): charlieNode(),
},
expect: func() graph.Directed {
graphBuilder := simple.NewDirectedGraph()
alphaVertex := NewGonumVertex(alphaNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(alphaVertex)
bravoVertex := NewGonumVertex(bravoNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(bravoVertex)
charlieVertex := NewGonumVertex(charlieNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(charlieVertex)
graphBuilder.SetEdge(simple.Edge{
F: alphaVertex,
T: bravoVertex,
})
graphBuilder.SetEdge(simple.Edge{
F: alphaVertex,
T: charlieVertex,
})
return graphBuilder
}(),
},
{
name: "drop-no-ref",
uidToNode: map[types.UID]*node{
types.UID("alpha"): alphaNode(),
types.UID("bravo"): bravoNode(),
types.UID("charlie"): charlieNode(),
types.UID("echo"): echoNode(),
},
expect: func() graph.Directed {
graphBuilder := simple.NewDirectedGraph()
alphaVertex := NewGonumVertex(alphaNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(alphaVertex)
bravoVertex := NewGonumVertex(bravoNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(bravoVertex)
charlieVertex := NewGonumVertex(charlieNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(charlieVertex)
graphBuilder.SetEdge(simple.Edge{
F: alphaVertex,
T: bravoVertex,
})
graphBuilder.SetEdge(simple.Edge{
F: alphaVertex,
T: charlieVertex,
})
return graphBuilder
}(),
},
{
name: "two-chains",
uidToNode: map[types.UID]*node{
types.UID("alpha"): alphaNode(),
types.UID("bravo"): bravoNode(),
types.UID("charlie"): charlieNode(),
types.UID("delta"): deltaNode(),
types.UID("foxtrot"): foxtrotNode(),
types.UID("golf"): golfNode(),
},
expect: func() graph.Directed {
graphBuilder := simple.NewDirectedGraph()
alphaVertex := NewGonumVertex(alphaNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(alphaVertex)
bravoVertex := NewGonumVertex(bravoNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(bravoVertex)
charlieVertex := NewGonumVertex(charlieNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(charlieVertex)
graphBuilder.SetEdge(simple.Edge{
F: alphaVertex,
T: bravoVertex,
})
graphBuilder.SetEdge(simple.Edge{
F: alphaVertex,
T: charlieVertex,
})
deltaVertex := NewGonumVertex(deltaNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(deltaVertex)
foxtrotVertex := NewGonumVertex(foxtrotNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(foxtrotVertex)
golfVertex := NewGonumVertex(golfNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(golfVertex)
graphBuilder.SetEdge(simple.Edge{
F: deltaVertex,
T: foxtrotVertex,
})
graphBuilder.SetEdge(simple.Edge{
F: foxtrotVertex,
T: golfVertex,
})
return graphBuilder
}(),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
actual := toGonumGraph(test.uidToNode)
compareGraphs(test.expect, actual, t)
})
}
}
func TestToGonumGraphObj(t *testing.T) {
tests := []struct {
name string
uidToNode map[types.UID]*node
uids []types.UID
expect graph.Directed
}{
{
name: "simple",
uidToNode: map[types.UID]*node{
types.UID("alpha"): alphaNode(),
types.UID("bravo"): bravoNode(),
types.UID("charlie"): charlieNode(),
},
uids: []types.UID{types.UID("bravo")},
expect: func() graph.Directed {
graphBuilder := simple.NewDirectedGraph()
alphaVertex := NewGonumVertex(alphaNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(alphaVertex)
bravoVertex := NewGonumVertex(bravoNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(bravoVertex)
charlieVertex := NewGonumVertex(charlieNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(charlieVertex)
graphBuilder.SetEdge(simple.Edge{
F: alphaVertex,
T: bravoVertex,
})
graphBuilder.SetEdge(simple.Edge{
F: alphaVertex,
T: charlieVertex,
})
return graphBuilder
}(),
},
{
name: "missing", // synthetic vertex created
uidToNode: map[types.UID]*node{
types.UID("alpha"): alphaNode(),
types.UID("charlie"): charlieNode(),
},
uids: []types.UID{types.UID("bravo")},
expect: func() graph.Directed {
graphBuilder := simple.NewDirectedGraph()
return graphBuilder
}(),
},
{
name: "drop-no-ref",
uidToNode: map[types.UID]*node{
types.UID("alpha"): alphaNode(),
types.UID("bravo"): bravoNode(),
types.UID("charlie"): charlieNode(),
types.UID("echo"): echoNode(),
},
uids: []types.UID{types.UID("echo")},
expect: func() graph.Directed {
graphBuilder := simple.NewDirectedGraph()
return graphBuilder
}(),
},
{
name: "two-chains-from-owner",
uidToNode: map[types.UID]*node{
types.UID("alpha"): alphaNode(),
types.UID("bravo"): bravoNode(),
types.UID("charlie"): charlieNode(),
types.UID("delta"): deltaNode(),
types.UID("foxtrot"): foxtrotNode(),
types.UID("golf"): golfNode(),
},
uids: []types.UID{types.UID("golf")},
expect: func() graph.Directed {
graphBuilder := simple.NewDirectedGraph()
deltaVertex := NewGonumVertex(deltaNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(deltaVertex)
foxtrotVertex := NewGonumVertex(foxtrotNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(foxtrotVertex)
golfVertex := NewGonumVertex(golfNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(golfVertex)
graphBuilder.SetEdge(simple.Edge{
F: deltaVertex,
T: foxtrotVertex,
})
graphBuilder.SetEdge(simple.Edge{
F: foxtrotVertex,
T: golfVertex,
})
return graphBuilder
}(),
},
{
name: "two-chains-from-child",
uidToNode: map[types.UID]*node{
types.UID("alpha"): alphaNode(),
types.UID("bravo"): bravoNode(),
types.UID("charlie"): charlieNode(),
types.UID("delta"): deltaNode(),
types.UID("foxtrot"): foxtrotNode(),
types.UID("golf"): golfNode(),
},
uids: []types.UID{types.UID("delta")},
expect: func() graph.Directed {
graphBuilder := simple.NewDirectedGraph()
deltaVertex := NewGonumVertex(deltaNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(deltaVertex)
foxtrotVertex := NewGonumVertex(foxtrotNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(foxtrotVertex)
golfVertex := NewGonumVertex(golfNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(golfVertex)
graphBuilder.SetEdge(simple.Edge{
F: deltaVertex,
T: foxtrotVertex,
})
graphBuilder.SetEdge(simple.Edge{
F: foxtrotVertex,
T: golfVertex,
})
return graphBuilder
}(),
},
{
name: "two-chains-choose-both",
uidToNode: map[types.UID]*node{
types.UID("alpha"): alphaNode(),
types.UID("bravo"): bravoNode(),
types.UID("charlie"): charlieNode(),
types.UID("delta"): deltaNode(),
types.UID("foxtrot"): foxtrotNode(),
types.UID("golf"): golfNode(),
},
uids: []types.UID{types.UID("delta"), types.UID("charlie")},
expect: func() graph.Directed {
graphBuilder := simple.NewDirectedGraph()
alphaVertex := NewGonumVertex(alphaNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(alphaVertex)
bravoVertex := NewGonumVertex(bravoNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(bravoVertex)
charlieVertex := NewGonumVertex(charlieNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(charlieVertex)
graphBuilder.SetEdge(simple.Edge{
F: alphaVertex,
T: bravoVertex,
})
graphBuilder.SetEdge(simple.Edge{
F: alphaVertex,
T: charlieVertex,
})
deltaVertex := NewGonumVertex(deltaNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(deltaVertex)
foxtrotVertex := NewGonumVertex(foxtrotNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(foxtrotVertex)
golfVertex := NewGonumVertex(golfNode(), graphBuilder.NewNode().ID())
graphBuilder.AddNode(golfVertex)
graphBuilder.SetEdge(simple.Edge{
F: deltaVertex,
T: foxtrotVertex,
})
graphBuilder.SetEdge(simple.Edge{
F: foxtrotVertex,
T: golfVertex,
})
return graphBuilder
}(),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
actual := toGonumGraphForObj(test.uidToNode, test.uids...)
compareGraphs(test.expect, actual, t)
})
}
}
func compareGraphs(expected, actual graph.Directed, t *testing.T) {
// sort the edges by from ID, then to ID
// (the slices we get back are from map iteration, where order is not guaranteed)
expectedNodes := expected.Nodes()
actualNodes := actual.Nodes()
sort.Sort(gonumByUID(expectedNodes))
sort.Sort(gonumByUID(actualNodes))
if len(expectedNodes) != len(actualNodes) {
t.Fatal(spew.Sdump(actual))
}
for i := range expectedNodes {
currExpected := *expectedNodes[i].(*gonumVertex)
currActual := *actualNodes[i].(*gonumVertex)
if currExpected.uid != currActual.uid {
t.Errorf("expected %v, got %v", spew.Sdump(currExpected), spew.Sdump(currActual))
}
expectedFrom := append([]graph.Node{}, expected.From(expectedNodes[i].ID())...)
actualFrom := append([]graph.Node{}, actual.From(actualNodes[i].ID())...)
sort.Sort(gonumByUID(expectedFrom))
sort.Sort(gonumByUID(actualFrom))
if len(expectedFrom) != len(actualFrom) {
t.Errorf("%q: expected %v, got %v", currExpected.uid, spew.Sdump(expectedFrom), spew.Sdump(actualFrom))
}
for i := range expectedFrom {
currExpectedFrom := *expectedFrom[i].(*gonumVertex)
currActualFrom := *actualFrom[i].(*gonumVertex)
if currExpectedFrom.uid != currActualFrom.uid {
t.Errorf("expected %v, got %v", spew.Sdump(currExpectedFrom), spew.Sdump(currActualFrom))
}
}
}
}
type gonumByUID []graph.Node
func (s gonumByUID) Len() int { return len(s) }
func (s gonumByUID) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s gonumByUID) Less(i, j int) bool {
lhs := s[i].(*gonumVertex)
lhsUID := string(lhs.uid)
rhs := s[j].(*gonumVertex)
rhsUID := string(rhs.uid)
return lhsUID < rhsUID
}

View File

@ -22,7 +22,7 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
@ -130,8 +130,8 @@ func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
defer gc.attemptToOrphan.ShutDown()
defer gc.dependencyGraphBuilder.graphChanges.ShutDown()
glog.Infof("Starting garbage collector controller")
defer glog.Infof("Shutting down garbage collector controller")
klog.Infof("Starting garbage collector controller")
defer klog.Infof("Shutting down garbage collector controller")
go gc.dependencyGraphBuilder.Run(stopCh)
@ -139,7 +139,7 @@ func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
return
}
glog.Infof("Garbage collector: all resource monitors have synced. Proceeding to collect garbage")
klog.Infof("Garbage collector: all resource monitors have synced. Proceeding to collect garbage")
// gc workers
for i := 0; i < workers; i++ {
@ -172,13 +172,13 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.ServerResourcesInterf
// This can occur if there is an internal error in GetDeletableResources.
if len(newResources) == 0 {
glog.V(2).Infof("no resources reported by discovery, skipping garbage collector sync")
klog.V(2).Infof("no resources reported by discovery, skipping garbage collector sync")
return
}
// Decide whether discovery has reported a change.
if reflect.DeepEqual(oldResources, newResources) {
glog.V(5).Infof("no resource updates from discovery, skipping garbage collector sync")
klog.V(5).Infof("no resource updates from discovery, skipping garbage collector sync")
return
}
@ -196,18 +196,18 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.ServerResourcesInterf
if attempt > 1 {
newResources = GetDeletableResources(discoveryClient)
if len(newResources) == 0 {
glog.V(2).Infof("no resources reported by discovery (attempt %d)", attempt)
klog.V(2).Infof("no resources reported by discovery (attempt %d)", attempt)
return false, nil
}
}
glog.V(2).Infof("syncing garbage collector with updated resources from discovery (attempt %d): %s", attempt, printDiff(oldResources, newResources))
klog.V(2).Infof("syncing garbage collector with updated resources from discovery (attempt %d): %s", attempt, printDiff(oldResources, newResources))
// Resetting the REST mapper will also invalidate the underlying discovery
// client. This is a leaky abstraction and assumes behavior about the REST
// mapper, but we'll deal with it for now.
gc.restMapper.Reset()
glog.V(4).Infof("reset restmapper")
klog.V(4).Infof("reset restmapper")
// Perform the monitor resync and wait for controllers to report cache sync.
//
@ -222,7 +222,7 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.ServerResourcesInterf
utilruntime.HandleError(fmt.Errorf("failed to sync resource monitors (attempt %d): %v", attempt, err))
return false, nil
}
glog.V(4).Infof("resynced monitors")
klog.V(4).Infof("resynced monitors")
// wait for caches to fill for a while (our sync period) before attempting to rediscover resources and retry syncing.
// this protects us from deadlocks where available resources changed and one of our informer caches will never fill.
@ -242,7 +242,7 @@ func (gc *GarbageCollector) Sync(discoveryClient discovery.ServerResourcesInterf
// have succeeded to ensure we'll retry on subsequent syncs if an error
// occurred.
oldResources = newResources
glog.V(2).Infof("synced garbage collector")
klog.V(2).Infof("synced garbage collector")
}, period, stopCh)
}
@ -308,7 +308,7 @@ func (gc *GarbageCollector) attemptToDeleteWorker() bool {
// have a way to distinguish this from a valid type we will recognize
// after the next discovery sync.
// For now, record the error and retry.
glog.V(5).Infof("error syncing item %s: %v", n, err)
klog.V(5).Infof("error syncing item %s: %v", n, err)
} else {
utilruntime.HandleError(fmt.Errorf("error syncing item %s: %v", n, err))
}
@ -318,7 +318,7 @@ func (gc *GarbageCollector) attemptToDeleteWorker() bool {
// requeue if item hasn't been observed via an informer event yet.
// otherwise a virtual node for an item added AND removed during watch reestablishment can get stuck in the graph and never removed.
// see https://issue.k8s.io/56121
glog.V(5).Infof("item %s hasn't been observed via informer yet", n.identity)
klog.V(5).Infof("item %s hasn't been observed via informer yet", n.identity)
gc.attemptToDelete.AddRateLimited(item)
}
return true
@ -330,7 +330,7 @@ func (gc *GarbageCollector) attemptToDeleteWorker() bool {
func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *node) (
dangling bool, owner *unstructured.Unstructured, err error) {
if gc.absentOwnerCache.Has(reference.UID) {
glog.V(5).Infof("according to the absentOwnerCache, object %s's owner %s/%s, %s does not exist", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
klog.V(5).Infof("according to the absentOwnerCache, object %s's owner %s/%s, %s does not exist", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
return true, nil, nil
}
// TODO: we need to verify the reference resource is supported by the
@ -351,14 +351,14 @@ func (gc *GarbageCollector) isDangling(reference metav1.OwnerReference, item *no
switch {
case errors.IsNotFound(err):
gc.absentOwnerCache.Add(reference.UID)
glog.V(5).Infof("object %s's owner %s/%s, %s is not found", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
klog.V(5).Infof("object %s's owner %s/%s, %s is not found", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
return true, nil, nil
case err != nil:
return false, nil, err
}
if owner.GetUID() != reference.UID {
glog.V(5).Infof("object %s's owner %s/%s, %s is not found, UID mismatch", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
klog.V(5).Infof("object %s's owner %s/%s, %s is not found, UID mismatch", item.identity.UID, reference.APIVersion, reference.Kind, reference.Name)
gc.absentOwnerCache.Add(reference.UID)
return true, nil, nil
}
@ -405,10 +405,10 @@ func ownerRefsToUIDs(refs []metav1.OwnerReference) []types.UID {
}
func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
glog.V(2).Infof("processing item %s", item.identity)
klog.V(2).Infof("processing item %s", item.identity)
// "being deleted" is an one-way trip to the final deletion. We'll just wait for the final deletion, and then process the object's dependents.
if item.isBeingDeleted() && !item.isDeletingDependents() {
glog.V(5).Infof("processing item %s returned at once, because its DeletionTimestamp is non-nil", item.identity)
klog.V(5).Infof("processing item %s returned at once, because its DeletionTimestamp is non-nil", item.identity)
return nil
}
// TODO: It's only necessary to talk to the API server if this is a
@ -420,7 +420,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
// the GraphBuilder can add "virtual" node for an owner that doesn't
// exist yet, so we need to enqueue a virtual Delete event to remove
// the virtual node from GraphBuilder.uidToNode.
glog.V(5).Infof("item %v not found, generating a virtual delete event", item.identity)
klog.V(5).Infof("item %v not found, generating a virtual delete event", item.identity)
gc.dependencyGraphBuilder.enqueueVirtualDeleteEvent(item.identity)
// since we're manually inserting a delete event to remove this node,
// we don't need to keep tracking it as a virtual node and requeueing in attemptToDelete
@ -431,7 +431,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
}
if latest.GetUID() != item.identity.UID {
glog.V(5).Infof("UID doesn't match, item %v not found, generating a virtual delete event", item.identity)
klog.V(5).Infof("UID doesn't match, item %v not found, generating a virtual delete event", item.identity)
gc.dependencyGraphBuilder.enqueueVirtualDeleteEvent(item.identity)
// since we're manually inserting a delete event to remove this node,
// we don't need to keep tracking it as a virtual node and requeueing in attemptToDelete
@ -448,7 +448,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
// compute if we should delete the item
ownerReferences := latest.GetOwnerReferences()
if len(ownerReferences) == 0 {
glog.V(2).Infof("object %s's doesn't have an owner, continue on next item", item.identity)
klog.V(2).Infof("object %s's doesn't have an owner, continue on next item", item.identity)
return nil
}
@ -456,15 +456,15 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
if err != nil {
return err
}
glog.V(5).Infof("classify references of %s.\nsolid: %#v\ndangling: %#v\nwaitingForDependentsDeletion: %#v\n", item.identity, solid, dangling, waitingForDependentsDeletion)
klog.V(5).Infof("classify references of %s.\nsolid: %#v\ndangling: %#v\nwaitingForDependentsDeletion: %#v\n", item.identity, solid, dangling, waitingForDependentsDeletion)
switch {
case len(solid) != 0:
glog.V(2).Infof("object %s has at least one existing owner: %#v, will not garbage collect", solid, item.identity)
klog.V(2).Infof("object %#v has at least one existing owner: %#v, will not garbage collect", solid, item.identity)
if len(dangling) == 0 && len(waitingForDependentsDeletion) == 0 {
return nil
}
glog.V(2).Infof("remove dangling references %#v and waiting references %#v for object %s", dangling, waitingForDependentsDeletion, item.identity)
klog.V(2).Infof("remove dangling references %#v and waiting references %#v for object %s", dangling, waitingForDependentsDeletion, item.identity)
// waitingForDependentsDeletion needs to be deleted from the
// ownerReferences, otherwise the referenced objects will be stuck with
// the FinalizerDeletingDependents and never get deleted.
@ -483,7 +483,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
// problem.
// there are multiple workers run attemptToDeleteItem in
// parallel, the circle detection can fail in a race condition.
glog.V(2).Infof("processing object %s, some of its owners and its dependent [%s] have FinalizerDeletingDependents, to prevent potential cycle, its ownerReferences are going to be modified to be non-blocking, then the object is going to be deleted with Foreground", item.identity, dep.identity)
klog.V(2).Infof("processing object %s, some of its owners and its dependent [%s] have FinalizerDeletingDependents, to prevent potential cycle, its ownerReferences are going to be modified to be non-blocking, then the object is going to be deleted with Foreground", item.identity, dep.identity)
patch, err := item.unblockOwnerReferencesStrategicMergePatch()
if err != nil {
return err
@ -494,7 +494,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
break
}
}
glog.V(2).Infof("at least one owner of object %s has FinalizerDeletingDependents, and the object itself has dependents, so it is going to be deleted in Foreground", item.identity)
klog.V(2).Infof("at least one owner of object %s has FinalizerDeletingDependents, and the object itself has dependents, so it is going to be deleted in Foreground", item.identity)
// the deletion event will be observed by the graphBuilder, so the item
// will be processed again in processDeletingDependentsItem. If it
// doesn't have dependents, the function will remove the
@ -518,7 +518,7 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
// otherwise, default to background.
policy = metav1.DeletePropagationBackground
}
glog.V(2).Infof("delete object %s with propagation policy %s", item.identity, policy)
klog.V(2).Infof("delete object %s with propagation policy %s", item.identity, policy)
return gc.deleteObject(item.identity, &policy)
}
}
@ -527,12 +527,12 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
func (gc *GarbageCollector) processDeletingDependentsItem(item *node) error {
blockingDependents := item.blockingDependents()
if len(blockingDependents) == 0 {
glog.V(2).Infof("remove DeleteDependents finalizer for item %s", item.identity)
klog.V(2).Infof("remove DeleteDependents finalizer for item %s", item.identity)
return gc.removeFinalizer(item, metav1.FinalizerDeleteDependents)
}
for _, dep := range blockingDependents {
if !dep.isDeletingDependents() {
glog.V(2).Infof("adding %s to attemptToDelete, because its owner %s is deletingDependents", dep.identity, item.identity)
klog.V(2).Infof("adding %s to attemptToDelete, because its owner %s is deletingDependents", dep.identity, item.identity)
gc.attemptToDelete.Add(dep)
}
}
@ -570,7 +570,7 @@ func (gc *GarbageCollector) orphanDependents(owner objectReference, dependents [
if len(errorsSlice) != 0 {
return fmt.Errorf("failed to orphan dependents of owner %s, got errors: %s", owner, utilerrors.NewAggregate(errorsSlice).Error())
}
glog.V(5).Infof("successfully updated all dependents of owner %s", owner)
klog.V(5).Infof("successfully updated all dependents of owner %s", owner)
return nil
}
@ -644,9 +644,9 @@ func GetDeletableResources(discoveryClient discovery.ServerResourcesInterface) m
preferredResources, err := discoveryClient.ServerPreferredResources()
if err != nil {
if discovery.IsGroupDiscoveryFailedError(err) {
glog.Warningf("failed to discover some groups: %v", err.(*discovery.ErrGroupDiscoveryFailed).Groups)
klog.Warningf("failed to discover some groups: %v", err.(*discovery.ErrGroupDiscoveryFailed).Groups)
} else {
glog.Warningf("failed to discover preferred resources: %v", err)
klog.Warningf("failed to discover preferred resources: %v", err)
}
}
if preferredResources == nil {
@ -660,7 +660,7 @@ func GetDeletableResources(discoveryClient discovery.ServerResourcesInterface) m
for _, rl := range deletableResources {
gv, err := schema.ParseGroupVersion(rl.GroupVersion)
if err != nil {
glog.Warningf("ignoring invalid discovered resource %q: %v", rl.GroupVersion, err)
klog.Warningf("ignoring invalid discovered resource %q: %v", rl.GroupVersion, err)
continue
}
for i := range rl.APIResources {

View File

@ -22,7 +22,7 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -176,16 +176,16 @@ func (gb *GraphBuilder) controllerFor(resource schema.GroupVersionResource, kind
}
shared, err := gb.sharedInformers.ForResource(resource)
if err == nil {
glog.V(4).Infof("using a shared informer for resource %q, kind %q", resource.String(), kind.String())
klog.V(4).Infof("using a shared informer for resource %q, kind %q", resource.String(), kind.String())
// need to clone because it's from a shared cache
shared.Informer().AddEventHandlerWithResyncPeriod(handlers, ResourceResyncTime)
return shared.Informer().GetController(), shared.Informer().GetStore(), nil
} else {
glog.V(4).Infof("unable to use a shared informer for resource %q, kind %q: %v", resource.String(), kind.String(), err)
klog.V(4).Infof("unable to use a shared informer for resource %q, kind %q: %v", resource.String(), kind.String(), err)
}
// TODO: consider store in one storage.
glog.V(5).Infof("create storage for resource %s", resource)
klog.V(5).Infof("create storage for resource %s", resource)
store, monitor := cache.NewInformer(
listWatcher(gb.dynamicClient, resource),
nil,
@ -245,7 +245,7 @@ func (gb *GraphBuilder) syncMonitors(resources map[schema.GroupVersionResource]s
}
}
glog.V(4).Infof("synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove))
klog.V(4).Infof("synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove))
// NewAggregate returns nil if errs is 0-length
return utilerrors.NewAggregate(errs)
}
@ -277,7 +277,7 @@ func (gb *GraphBuilder) startMonitors() {
started++
}
}
glog.V(4).Infof("started %d new monitors, %d currently running", started, len(monitors))
klog.V(4).Infof("started %d new monitors, %d currently running", started, len(monitors))
}
// IsSynced returns true if any monitors exist AND all those monitors'
@ -289,13 +289,13 @@ func (gb *GraphBuilder) IsSynced() bool {
defer gb.monitorLock.Unlock()
if len(gb.monitors) == 0 {
glog.V(4).Info("garbage controller monitor not synced: no monitors")
klog.V(4).Info("garbage controller monitor not synced: no monitors")
return false
}
for resource, monitor := range gb.monitors {
if !monitor.controller.HasSynced() {
glog.V(4).Infof("garbage controller monitor not yet synced: %+v", resource)
klog.V(4).Infof("garbage controller monitor not yet synced: %+v", resource)
return false
}
}
@ -305,8 +305,8 @@ func (gb *GraphBuilder) IsSynced() bool {
// Run sets the stop channel and starts monitor execution until stopCh is
// closed. Any running monitors will be stopped before Run returns.
func (gb *GraphBuilder) Run(stopCh <-chan struct{}) {
glog.Infof("GraphBuilder running")
defer glog.Infof("GraphBuilder stopping")
klog.Infof("GraphBuilder running")
defer klog.Infof("GraphBuilder stopping")
// Set up the stop channel.
gb.monitorLock.Lock()
@ -333,19 +333,11 @@ func (gb *GraphBuilder) Run(stopCh <-chan struct{}) {
// reset monitors so that the graph builder can be safely re-run/synced.
gb.monitors = nil
glog.Infof("stopped %d of %d monitors", stopped, len(monitors))
klog.Infof("stopped %d of %d monitors", stopped, len(monitors))
}
var ignoredResources = map[schema.GroupResource]struct{}{
{Group: "extensions", Resource: "replicationcontrollers"}: {},
{Group: "", Resource: "bindings"}: {},
{Group: "", Resource: "componentstatuses"}: {},
{Group: "", Resource: "events"}: {},
{Group: "authentication.k8s.io", Resource: "tokenreviews"}: {},
{Group: "authorization.k8s.io", Resource: "subjectaccessreviews"}: {},
{Group: "authorization.k8s.io", Resource: "selfsubjectaccessreviews"}: {},
{Group: "authorization.k8s.io", Resource: "localsubjectaccessreviews"}: {},
{Group: "authorization.k8s.io", Resource: "selfsubjectrulesreviews"}: {},
{Group: "", Resource: "events"}: {},
}
// DefaultIgnoredResources returns the default set of resources that the garbage collector controller
@ -385,7 +377,7 @@ func (gb *GraphBuilder) addDependentToOwners(n *node, owners []metav1.OwnerRefer
dependents: make(map[*node]struct{}),
virtual: true,
}
glog.V(5).Infof("add virtual node.identity: %s\n\n", ownerNode.identity)
klog.V(5).Infof("add virtual node.identity: %s\n\n", ownerNode.identity)
gb.uidToNode.Write(ownerNode)
}
ownerNode.addDependent(n)
@ -523,7 +515,7 @@ func (gb *GraphBuilder) addUnblockedOwnersToDeleteQueue(removed []metav1.OwnerRe
if ref.BlockOwnerDeletion != nil && *ref.BlockOwnerDeletion {
node, found := gb.uidToNode.Read(ref.UID)
if !found {
glog.V(5).Infof("cannot find %s in uidToNode", ref.UID)
klog.V(5).Infof("cannot find %s in uidToNode", ref.UID)
continue
}
gb.attemptToDelete.Add(node)
@ -535,7 +527,7 @@ func (gb *GraphBuilder) addUnblockedOwnersToDeleteQueue(removed []metav1.OwnerRe
if wasBlocked && isUnblocked {
node, found := gb.uidToNode.Read(c.newRef.UID)
if !found {
glog.V(5).Infof("cannot find %s in uidToNode", c.newRef.UID)
klog.V(5).Infof("cannot find %s in uidToNode", c.newRef.UID)
continue
}
gb.attemptToDelete.Add(node)
@ -545,12 +537,12 @@ func (gb *GraphBuilder) addUnblockedOwnersToDeleteQueue(removed []metav1.OwnerRe
func (gb *GraphBuilder) processTransitions(oldObj interface{}, newAccessor metav1.Object, n *node) {
if startsWaitingForDependentsOrphaned(oldObj, newAccessor) {
glog.V(5).Infof("add %s to the attemptToOrphan", n.identity)
klog.V(5).Infof("add %s to the attemptToOrphan", n.identity)
gb.attemptToOrphan.Add(n)
return
}
if startsWaitingForDependentsDeleted(oldObj, newAccessor) {
glog.V(2).Infof("add %s to the attemptToDelete, because it's waiting for its dependents to be deleted", n.identity)
klog.V(2).Infof("add %s to the attemptToDelete, because it's waiting for its dependents to be deleted", n.identity)
// if the n is added as a "virtual" node, its deletingDependents field is not properly set, so always set it here.
n.markDeletingDependents()
for dep := range n.dependents {
@ -583,8 +575,8 @@ func (gb *GraphBuilder) processGraphChanges() bool {
utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err))
return true
}
glog.V(5).Infof("GraphBuilder process object: %s/%s, namespace %s, name %s, uid %s, event type %v", event.gvk.GroupVersion().String(), event.gvk.Kind, accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType)
// Check if the node already exsits
klog.V(5).Infof("GraphBuilder process object: %s/%s, namespace %s, name %s, uid %s, event type %v", event.gvk.GroupVersion().String(), event.gvk.Kind, accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType)
// Check if the node already exists
existingNode, found := gb.uidToNode.Read(accessor.GetUID())
if found {
// this marks the node as having been observed via an informer event
@ -635,7 +627,7 @@ func (gb *GraphBuilder) processGraphChanges() bool {
gb.processTransitions(event.oldObj, accessor, existingNode)
case event.eventType == deleteEvent:
if !found {
glog.V(5).Infof("%v doesn't exist in the graph, this shouldn't happen", accessor.GetUID())
klog.V(5).Infof("%v doesn't exist in the graph, this shouldn't happen", accessor.GetUID())
return true
}
// removeNode updates the graph

View File

@ -13,8 +13,8 @@ go_library(
],
importpath = "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly",
deps = [
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
)

View File

@ -19,7 +19,7 @@ package garbagecollector
import (
"fmt"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
@ -73,7 +73,7 @@ func (gc *GarbageCollector) updateObject(item objectReference, obj *unstructured
if err != nil {
return nil, err
}
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Update(obj)
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Update(obj, metav1.UpdateOptions{})
}
func (gc *GarbageCollector) patchObject(item objectReference, patch []byte, pt types.PatchType) (*unstructured.Unstructured, error) {
@ -81,7 +81,7 @@ func (gc *GarbageCollector) patchObject(item objectReference, patch []byte, pt t
if err != nil {
return nil, err
}
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Patch(item.Name, pt, patch)
return gc.dynamicClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.Namespace)).Patch(item.Name, pt, patch, metav1.UpdateOptions{})
}
// TODO: Using Patch when strategicmerge supports deleting an entry from a
@ -110,7 +110,7 @@ func (gc *GarbageCollector) removeFinalizer(owner *node, targetFinalizer string)
newFinalizers = append(newFinalizers, f)
}
if !found {
glog.V(5).Infof("the %s finalizer is already removed from object %s", targetFinalizer, owner.identity)
klog.V(5).Infof("the %s finalizer is already removed from object %s", targetFinalizer, owner.identity)
return nil
}
// remove the owner from dependent's OwnerReferences