vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -2,9 +2,11 @@ approvers:
- erictune
- liggitt
- deads2k
- mikedanese
reviewers:
- erictune
- liggitt
- deads2k
- ericchiang
- enj
- mikedanese

View File

@ -152,13 +152,12 @@ func (t *TokenAuthenticator) AuthenticateToken(token string) (user.Info, bool, e
// Copied from k8s.io/client-go/tools/bootstrap/token/api
func getSecretString(secret *api.Secret, key string) string {
if secret.Data == nil {
data, ok := secret.Data[key]
if !ok {
return ""
}
if val, ok := secret.Data[key]; ok {
return string(val)
}
return ""
return string(data)
}
// Copied from k8s.io/client-go/tools/bootstrap/token/api
@ -167,11 +166,13 @@ func isSecretExpired(secret *api.Secret) bool {
if len(expiration) > 0 {
expTime, err2 := time.Parse(time.RFC3339, expiration)
if err2 != nil {
tokenErrorf(secret, "has unparsable expiration time (%s). Treating as expired.", expiration)
glog.V(3).Infof("Unparseable expiration time (%s) in %s/%s Secret: %v. Treating as expired.",
expiration, secret.Namespace, secret.Name, err2)
return true
}
if time.Now().After(expTime) {
tokenErrorf(secret, "has expired.", expiration)
glog.V(3).Infof("Expired bootstrap token in %s/%s Secret: %v",
secret.Namespace, secret.Name, expiration)
return true
}
}
@ -181,8 +182,10 @@ func isSecretExpired(secret *api.Secret) bool {
// Copied from kubernetes/cmd/kubeadm/app/util/token
var (
// tokenRegexpString defines id.secret regular expression pattern
tokenRegexpString = "^([a-z0-9]{6})\\.([a-z0-9]{16})$"
tokenRegexp = regexp.MustCompile(tokenRegexpString)
// tokenRegexp is a compiled regular expression of TokenRegexpString
tokenRegexp = regexp.MustCompile(tokenRegexpString)
)
// parseToken tries and parse a valid token from a string.

View File

@ -8,15 +8,21 @@ load(
go_test(
name = "go_default_test",
srcs = ["node_authorizer_test.go"],
srcs = [
"graph_test.go",
"intset_test.go",
"node_authorizer_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/auth/nodeidentifier:go_default_library",
"//pkg/features:go_default_library",
"//plugin/pkg/auth/authorizer/rbac/bootstrappolicy:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
@ -28,6 +34,7 @@ go_library(
srcs = [
"graph.go",
"graph_populator.go",
"intset.go",
"node_authorizer.go",
],
importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/node",
@ -35,7 +42,6 @@ go_library(
"//pkg/api/persistentvolume:go_default_library",
"//pkg/api/pod:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/rbac:go_default_library",
"//pkg/apis/storage:go_default_library",
"//pkg/auth/nodeidentifier:go_default_library",
"//pkg/client/informers/informers_generated/internalversion/core/internalversion:go_default_library",
@ -45,6 +51,7 @@ go_library(
"//third_party/forked/gonum/graph/simple:go_default_library",
"//third_party/forked/gonum/graph/traverse:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",

View File

@ -80,6 +80,11 @@ type Graph struct {
graph *simple.DirectedAcyclicGraph
// vertices is a map of type -> namespace -> name -> vertex
vertices map[vertexType]namespaceVertexMapping
// destinationEdgeIndex is a map of vertex -> set of destination IDs
destinationEdgeIndex map[int]*intSet
// destinationEdgeThreshold is the minimum number of distinct destination IDs at which to maintain an index
destinationEdgeThreshold int
}
// namespaceVertexMapping is a map of namespace -> name -> vertex
@ -92,6 +97,11 @@ func NewGraph() *Graph {
return &Graph{
vertices: map[vertexType]namespaceVertexMapping{},
graph: simple.NewDirectedAcyclicGraph(0, 0),
destinationEdgeIndex: map[int]*intSet{},
// experimentally determined to be the point at which iteration adds an order of magnitude to the authz check.
// since maintaining indexes costs time/memory while processing graph changes, we don't want to make this too low.
destinationEdgeThreshold: 200,
}
}
@ -165,6 +175,7 @@ func (g *Graph) deleteVertex_locked(vertexType vertexType, namespace, name strin
// find existing neighbors with a single edge (meaning we are their only neighbor)
neighborsToRemove := []graph.Node{}
neighborsToRecompute := []graph.Node{}
g.graph.VisitFrom(vertex, func(neighbor graph.Node) bool {
// this downstream neighbor has only one edge (which must be from us), so remove them as well
if g.graph.Degree(neighbor) == 1 {
@ -173,28 +184,115 @@ func (g *Graph) deleteVertex_locked(vertexType vertexType, namespace, name strin
return true
})
g.graph.VisitTo(vertex, func(neighbor graph.Node) bool {
// this upstream neighbor has only one edge (which must be to us), so remove them as well
if g.graph.Degree(neighbor) == 1 {
// this upstream neighbor has only one edge (which must be to us), so remove them as well
neighborsToRemove = append(neighborsToRemove, neighbor)
} else {
// recompute the destination edge index on this neighbor
neighborsToRecompute = append(neighborsToRemove, neighbor)
}
return true
})
// remove the vertex
g.graph.RemoveNode(vertex)
delete(g.vertices[vertexType][namespace], name)
if len(g.vertices[vertexType][namespace]) == 0 {
delete(g.vertices[vertexType], namespace)
}
g.removeVertex_locked(vertex)
// remove neighbors that are now edgeless
for _, neighbor := range neighborsToRemove {
g.graph.RemoveNode(neighbor)
n := neighbor.(*namedVertex)
delete(g.vertices[n.vertexType][n.namespace], n.name)
if len(g.vertices[n.vertexType][n.namespace]) == 0 {
delete(g.vertices[n.vertexType], n.namespace)
g.removeVertex_locked(neighbor.(*namedVertex))
}
// recompute destination indexes for neighbors that dropped outbound edges
for _, neighbor := range neighborsToRecompute {
g.recomputeDestinationIndex_locked(neighbor)
}
}
// must be called under write lock
// deletes edges from a given vertex type to a specific vertex
// will delete each orphaned "from" vertex, but will never delete the "to" vertex
func (g *Graph) deleteEdges_locked(fromType, toType vertexType, toNamespace, toName string) {
// get the "to" side
toVert, exists := g.getVertex_rlocked(toType, toNamespace, toName)
if !exists {
return
}
// delete all edges between vertices of fromType and toVert
neighborsToRemove := []*namedVertex{}
neighborsToRecompute := []*namedVertex{}
g.graph.VisitTo(toVert, func(from graph.Node) bool {
fromVert := from.(*namedVertex)
if fromVert.vertexType != fromType {
return true
}
// remove the edge
g.graph.RemoveEdge(simple.Edge{F: fromVert, T: toVert})
// track vertexes that changed edges
if g.graph.Degree(fromVert) == 0 {
neighborsToRemove = append(neighborsToRemove, fromVert)
} else {
neighborsToRecompute = append(neighborsToRecompute, fromVert)
}
return true
})
// clean up orphaned verts
for _, v := range neighborsToRemove {
g.removeVertex_locked(v)
}
// recompute destination indexes for neighbors that dropped outbound edges
for _, v := range neighborsToRecompute {
g.recomputeDestinationIndex_locked(v)
}
}
// must be called under write lock
// removeVertex_locked removes the specified vertex from the graph and from the maintained indices.
// It does nothing to indexes of neighbor vertices.
func (g *Graph) removeVertex_locked(v *namedVertex) {
g.graph.RemoveNode(v)
delete(g.destinationEdgeIndex, v.ID())
delete(g.vertices[v.vertexType][v.namespace], v.name)
if len(g.vertices[v.vertexType][v.namespace]) == 0 {
delete(g.vertices[v.vertexType], v.namespace)
}
}
// must be called under write lock
// recomputeDestinationIndex_locked recomputes the index of destination ids for the specified vertex
func (g *Graph) recomputeDestinationIndex_locked(n graph.Node) {
// don't maintain indices for nodes with few edges
edgeCount := g.graph.Degree(n)
if edgeCount < g.destinationEdgeThreshold {
delete(g.destinationEdgeIndex, n.ID())
return
}
// get or create the index
index := g.destinationEdgeIndex[n.ID()]
if index == nil {
index = newIntSet()
} else {
index.startNewGeneration()
}
// populate the index
g.graph.VisitFrom(n, func(dest graph.Node) bool {
if destinationEdge, ok := g.graph.EdgeBetween(n, dest).(*destinationEdge); ok {
index.mark(destinationEdge.DestinationID())
}
return true
})
// remove existing items no longer in the list
index.sweep()
if len(index.members) < g.destinationEdgeThreshold {
delete(g.destinationEdgeIndex, n.ID())
} else {
g.destinationEdgeIndex[n.ID()] = index
}
}
@ -221,22 +319,30 @@ func (g *Graph) AddPod(pod *api.Pod) {
//
// ref https://github.com/kubernetes/kubernetes/issues/58790
if len(pod.Spec.ServiceAccountName) > 0 {
g.graph.SetEdge(newDestinationEdge(g.getOrCreateVertex_locked(serviceAccountVertexType, pod.Namespace, pod.Spec.ServiceAccountName), podVertex, nodeVertex))
serviceAccountVertex := g.getOrCreateVertex_locked(serviceAccountVertexType, pod.Namespace, pod.Spec.ServiceAccountName)
g.graph.SetEdge(newDestinationEdge(serviceAccountVertex, podVertex, nodeVertex))
g.recomputeDestinationIndex_locked(serviceAccountVertex)
}
podutil.VisitPodSecretNames(pod, func(secret string) bool {
g.graph.SetEdge(newDestinationEdge(g.getOrCreateVertex_locked(secretVertexType, pod.Namespace, secret), podVertex, nodeVertex))
secretVertex := g.getOrCreateVertex_locked(secretVertexType, pod.Namespace, secret)
g.graph.SetEdge(newDestinationEdge(secretVertex, podVertex, nodeVertex))
g.recomputeDestinationIndex_locked(secretVertex)
return true
})
podutil.VisitPodConfigmapNames(pod, func(configmap string) bool {
g.graph.SetEdge(newDestinationEdge(g.getOrCreateVertex_locked(configMapVertexType, pod.Namespace, configmap), podVertex, nodeVertex))
configmapVertex := g.getOrCreateVertex_locked(configMapVertexType, pod.Namespace, configmap)
g.graph.SetEdge(newDestinationEdge(configmapVertex, podVertex, nodeVertex))
g.recomputeDestinationIndex_locked(configmapVertex)
return true
})
for _, v := range pod.Spec.Volumes {
if v.PersistentVolumeClaim != nil {
g.graph.SetEdge(newDestinationEdge(g.getOrCreateVertex_locked(pvcVertexType, pod.Namespace, v.PersistentVolumeClaim.ClaimName), podVertex, nodeVertex))
pvcVertex := g.getOrCreateVertex_locked(pvcVertexType, pod.Namespace, v.PersistentVolumeClaim.ClaimName)
g.graph.SetEdge(newDestinationEdge(pvcVertex, podVertex, nodeVertex))
g.recomputeDestinationIndex_locked(pvcVertex)
}
}
}
@ -301,3 +407,25 @@ func (g *Graph) DeleteVolumeAttachment(name string) {
defer g.lock.Unlock()
g.deleteVertex_locked(vaVertexType, "", name)
}
// SetNodeConfigMap sets up edges for the Node.Spec.ConfigSource.ConfigMap relationship:
//
// configmap -> node
func (g *Graph) SetNodeConfigMap(nodeName, configMapName, configMapNamespace string) {
g.lock.Lock()
defer g.lock.Unlock()
// TODO(mtaufen): ensure len(nodeName) > 0 in all cases (would sure be nice to have a dependently-typed language here...)
// clear edges configmaps -> node where the destination is the current node *only*
// at present, a node can only have one *direct* configmap reference at a time
g.deleteEdges_locked(configMapVertexType, nodeVertexType, "", nodeName)
// establish new edges if we have a real ConfigMap to reference
if len(configMapName) > 0 && len(configMapNamespace) > 0 {
configmapVertex := g.getOrCreateVertex_locked(configMapVertexType, configMapNamespace, configMapName)
nodeVertex := g.getOrCreateVertex_locked(nodeVertexType, "", nodeName)
g.graph.SetEdge(newDestinationEdge(configmapVertex, nodeVertex, nodeVertex))
}
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package node
import (
"fmt"
"github.com/golang/glog"
storagev1beta1 "k8s.io/api/storage/v1beta1"
@ -34,6 +35,7 @@ type graphPopulator struct {
func AddGraphEventHandlers(
graph *Graph,
nodes coreinformers.NodeInformer,
pods coreinformers.PodInformer,
pvs coreinformers.PersistentVolumeInformer,
attachments storageinformers.VolumeAttachmentInformer,
@ -42,6 +44,14 @@ func AddGraphEventHandlers(
graph: graph,
}
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
nodes.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: g.addNode,
UpdateFunc: g.updateNode,
DeleteFunc: g.deleteNode,
})
}
pods.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: g.addPod,
UpdateFunc: g.updatePod,
@ -63,6 +73,62 @@ func AddGraphEventHandlers(
}
}
func (g *graphPopulator) addNode(obj interface{}) {
g.updateNode(nil, obj)
}
func (g *graphPopulator) updateNode(oldObj, obj interface{}) {
node := obj.(*api.Node)
var oldNode *api.Node
if oldObj != nil {
oldNode = oldObj.(*api.Node)
}
// we only set up rules for ConfigMap today, because that is the only reference type
var name, namespace string
if source := node.Spec.ConfigSource; source != nil && source.ConfigMap != nil {
name = source.ConfigMap.Name
namespace = source.ConfigMap.Namespace
}
var oldName, oldNamespace string
if oldNode != nil {
if oldSource := oldNode.Spec.ConfigSource; oldSource != nil && oldSource.ConfigMap != nil {
oldName = oldSource.ConfigMap.Name
oldNamespace = oldSource.ConfigMap.Namespace
}
}
// if Node.Spec.ConfigSource wasn't updated, nothing for us to do
if name == oldName && namespace == oldNamespace {
return
}
path := "nil"
if node.Spec.ConfigSource != nil {
path = fmt.Sprintf("%s/%s", namespace, name)
}
glog.V(4).Infof("updateNode configSource reference to %s for node %s", path, node.Name)
g.graph.SetNodeConfigMap(node.Name, name, namespace)
}
func (g *graphPopulator) deleteNode(obj interface{}) {
if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
obj = tombstone.Obj
}
node, ok := obj.(*api.Node)
if !ok {
glog.Infof("unexpected type %T", obj)
return
}
// NOTE: We don't remove the node, because if the node is re-created not all pod -> node
// links are re-established (we don't get relevant events because the no mutations need
// to happen in the API; the state is already there).
g.graph.SetNodeConfigMap(node.Name, "", "")
}
func (g *graphPopulator) addPod(obj interface{}) {
g.updatePod(nil, obj)
}

View File

@ -0,0 +1,178 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"sort"
"testing"
"github.com/stretchr/testify/assert"
)
func TestDeleteEdges_locked(t *testing.T) {
cases := []struct {
desc string
fromType vertexType
toType vertexType
toNamespace string
toName string
start *Graph
expect *Graph
}{
{
// single edge from a configmap to a node, will delete edge and orphaned configmap
desc: "edges and source orphans are deleted, destination orphans are preserved",
fromType: configMapVertexType,
toType: nodeVertexType,
toNamespace: "",
toName: "node1",
start: func() *Graph {
g := NewGraph()
g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap2")
nodeVertex := g.getOrCreateVertex_locked(nodeVertexType, "", "node1")
configmapVertex := g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap1")
g.graph.SetEdge(newDestinationEdge(configmapVertex, nodeVertex, nodeVertex))
return g
}(),
expect: func() *Graph {
g := NewGraph()
g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap2")
g.getOrCreateVertex_locked(nodeVertexType, "", "node1")
return g
}(),
},
{
// two edges from the same configmap to distinct nodes, will delete one of the edges
desc: "edges are deleted, non-orphans and destination orphans are preserved",
fromType: configMapVertexType,
toType: nodeVertexType,
toNamespace: "",
toName: "node2",
start: func() *Graph {
g := NewGraph()
nodeVertex1 := g.getOrCreateVertex_locked(nodeVertexType, "", "node1")
nodeVertex2 := g.getOrCreateVertex_locked(nodeVertexType, "", "node2")
configmapVertex := g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap1")
g.graph.SetEdge(newDestinationEdge(configmapVertex, nodeVertex1, nodeVertex1))
g.graph.SetEdge(newDestinationEdge(configmapVertex, nodeVertex2, nodeVertex2))
return g
}(),
expect: func() *Graph {
g := NewGraph()
nodeVertex1 := g.getOrCreateVertex_locked(nodeVertexType, "", "node1")
g.getOrCreateVertex_locked(nodeVertexType, "", "node2")
configmapVertex := g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap1")
g.graph.SetEdge(newDestinationEdge(configmapVertex, nodeVertex1, nodeVertex1))
return g
}(),
},
{
desc: "no edges to delete",
fromType: configMapVertexType,
toType: nodeVertexType,
toNamespace: "",
toName: "node1",
start: func() *Graph {
g := NewGraph()
g.getOrCreateVertex_locked(nodeVertexType, "", "node1")
g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap1")
return g
}(),
expect: func() *Graph {
g := NewGraph()
g.getOrCreateVertex_locked(nodeVertexType, "", "node1")
g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap1")
return g
}(),
},
{
desc: "destination vertex does not exist",
fromType: configMapVertexType,
toType: nodeVertexType,
toNamespace: "",
toName: "node1",
start: func() *Graph {
g := NewGraph()
g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap1")
return g
}(),
expect: func() *Graph {
g := NewGraph()
g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap1")
return g
}(),
},
{
desc: "source vertex type doesn't exist",
fromType: configMapVertexType,
toType: nodeVertexType,
toNamespace: "",
toName: "node1",
start: func() *Graph {
g := NewGraph()
g.getOrCreateVertex_locked(nodeVertexType, "", "node1")
return g
}(),
expect: func() *Graph {
g := NewGraph()
g.getOrCreateVertex_locked(nodeVertexType, "", "node1")
return g
}(),
},
}
for _, c := range cases {
t.Run(c.desc, func(t *testing.T) {
c.start.deleteEdges_locked(c.fromType, c.toType, c.toNamespace, c.toName)
// Note: We assert on substructures (graph.Nodes(), graph.Edges()) because the graph tracks
// freed IDs for reuse, which results in an irrelevant inequality between start and expect.
// sort the nodes by ID
// (the slices we get back are from map iteration, where order is not guaranteed)
expectNodes := c.expect.graph.Nodes()
sort.Slice(expectNodes, func(i, j int) bool {
return expectNodes[i].ID() < expectNodes[j].ID()
})
startNodes := c.start.graph.Nodes()
sort.Slice(startNodes, func(i, j int) bool {
return startNodes[i].ID() < startNodes[j].ID()
})
assert.Equal(t, expectNodes, startNodes)
// sort the edges by from ID, then to ID
// (the slices we get back are from map iteration, where order is not guaranteed)
expectEdges := c.expect.graph.Edges()
sort.Slice(expectEdges, func(i, j int) bool {
if expectEdges[i].From().ID() == expectEdges[j].From().ID() {
return expectEdges[i].To().ID() < expectEdges[j].To().ID()
}
return expectEdges[i].From().ID() < expectEdges[j].From().ID()
})
startEdges := c.start.graph.Edges()
sort.Slice(startEdges, func(i, j int) bool {
if startEdges[i].From().ID() == startEdges[j].From().ID() {
return startEdges[i].To().ID() < startEdges[j].To().ID()
}
return startEdges[i].From().ID() < startEdges[j].From().ID()
})
assert.Equal(t, expectEdges, startEdges)
// vertices is a recursive map, no need to sort
assert.Equal(t, c.expect.vertices, c.start.vertices)
})
}
}

View File

@ -0,0 +1,62 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
// intSet maintains a set of ints, and supports promoting and culling the previous generation.
// this allows tracking a large, mostly-stable set without constantly reallocating the entire set.
type intSet struct {
currentGeneration byte
members map[int]byte
}
func newIntSet() *intSet {
return &intSet{members: map[int]byte{}}
}
// has returns true if the specified int is in the set.
// it is safe to call concurrently, but must not be called concurrently with any of the other methods.
func (s *intSet) has(i int) bool {
if s == nil {
return false
}
_, present := s.members[i]
return present
}
// startNewGeneration begins a new generation.
// it must be followed by a call to mark() for every member of the generation,
// then a call to sweep() to remove members not present in the generation.
// it is not thread-safe.
func (s *intSet) startNewGeneration() {
s.currentGeneration++
}
// mark indicates the specified int belongs to the current generation.
// it is not thread-safe.
func (s *intSet) mark(i int) {
s.members[i] = s.currentGeneration
}
// sweep removes items not in the current generation.
// it is not thread-safe.
func (s *intSet) sweep() {
for k, v := range s.members {
if v != s.currentGeneration {
delete(s.members, k)
}
}
}

View File

@ -0,0 +1,62 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestIntSet(t *testing.T) {
i := newIntSet()
assert.False(t, i.has(1))
assert.False(t, i.has(2))
assert.False(t, i.has(3))
assert.False(t, i.has(4))
i.startNewGeneration()
i.mark(1)
i.mark(2)
i.sweep()
assert.True(t, i.has(1))
assert.True(t, i.has(2))
assert.False(t, i.has(3))
assert.False(t, i.has(4))
i.startNewGeneration()
i.mark(2)
i.mark(3)
i.sweep()
assert.False(t, i.has(1))
assert.True(t, i.has(2))
assert.True(t, i.has(3))
assert.False(t, i.has(4))
i.startNewGeneration()
i.mark(3)
i.mark(4)
i.sweep()
assert.False(t, i.has(1))
assert.False(t, i.has(2))
assert.True(t, i.has(3))
assert.True(t, i.has(4))
}

View File

@ -21,11 +21,11 @@ import (
"github.com/golang/glog"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/authorization/authorizer"
utilfeature "k8s.io/apiserver/pkg/util/feature"
api "k8s.io/kubernetes/pkg/apis/core"
rbacapi "k8s.io/kubernetes/pkg/apis/rbac"
storageapi "k8s.io/kubernetes/pkg/apis/storage"
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
"k8s.io/kubernetes/pkg/features"
@ -38,6 +38,7 @@ import (
// 1. If a request is not from a node (NodeIdentity() returns isNode=false), reject
// 2. If a specific node cannot be identified (NodeIdentity() returns nodeName=""), reject
// 3. If a request is for a secret, configmap, persistent volume or persistent volume claim, reject unless the verb is get, and the requested object is related to the requesting node:
// node <- configmap
// node <- pod
// node <- pod <- secret
// node <- pod <- configmap
@ -48,14 +49,14 @@ import (
type NodeAuthorizer struct {
graph *Graph
identifier nodeidentifier.NodeIdentifier
nodeRules []rbacapi.PolicyRule
nodeRules []rbacv1.PolicyRule
// allows overriding for testing
features utilfeature.FeatureGate
}
// NewAuthorizer returns a new node authorizer
func NewAuthorizer(graph *Graph, identifier nodeidentifier.NodeIdentifier, rules []rbacapi.PolicyRule) authorizer.Authorizer {
func NewAuthorizer(graph *Graph, identifier nodeidentifier.NodeIdentifier, rules []rbacv1.PolicyRule) authorizer.Authorizer {
return &NodeAuthorizer{
graph: graph,
identifier: identifier,
@ -90,9 +91,9 @@ func (r *NodeAuthorizer) Authorize(attrs authorizer.Attributes) (authorizer.Deci
requestResource := schema.GroupResource{Group: attrs.GetAPIGroup(), Resource: attrs.GetResource()}
switch requestResource {
case secretResource:
return r.authorizeGet(nodeName, secretVertexType, attrs)
return r.authorizeReadNamespacedObject(nodeName, secretVertexType, attrs)
case configMapResource:
return r.authorizeGet(nodeName, configMapVertexType, attrs)
return r.authorizeReadNamespacedObject(nodeName, configMapVertexType, attrs)
case pvcResource:
if r.features.Enabled(features.ExpandPersistentVolumes) {
if attrs.GetSubresource() == "status" {
@ -153,6 +154,24 @@ func (r *NodeAuthorizer) authorizeGet(nodeName string, startingType vertexType,
return r.authorize(nodeName, startingType, attrs)
}
// authorizeReadNamespacedObject authorizes "get", "list" and "watch" requests to single objects of a
// specified types if they are related to the specified node.
func (r *NodeAuthorizer) authorizeReadNamespacedObject(nodeName string, startingType vertexType, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
if attrs.GetVerb() != "get" && attrs.GetVerb() != "list" && attrs.GetVerb() != "watch" {
glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs)
return authorizer.DecisionNoOpinion, "can only read resources of this type", nil
}
if len(attrs.GetSubresource()) > 0 {
glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs)
return authorizer.DecisionNoOpinion, "cannot read subresource", nil
}
if len(attrs.GetNamespace()) == 0 {
glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs)
return authorizer.DecisionNoOpinion, "can only read namespaced object of this type", nil
}
return r.authorize(nodeName, startingType, attrs)
}
func (r *NodeAuthorizer) authorize(nodeName string, startingType vertexType, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
if len(attrs.GetName()) == 0 {
glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs)
@ -211,6 +230,11 @@ func (r *NodeAuthorizer) hasPathFrom(nodeName string, startingType vertexType, s
return false, fmt.Errorf("node %q cannot get unknown %s %s/%s", nodeName, vertexTypes[startingType], startingNamespace, startingName)
}
// Fast check to see if we know of a destination edge
if r.graph.destinationEdgeIndex[startingVertex.ID()].has(nodeVertex.ID()) {
return true, nil
}
found := false
traversal := &traverse.VisitingDepthFirst{
EdgeFilter: func(edge graph.Edge) bool {

View File

@ -20,12 +20,15 @@ import (
"fmt"
"runtime"
"runtime/pprof"
"sync/atomic"
"testing"
"time"
"os"
storagev1beta1 "k8s.io/api/storage/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
utilfeature "k8s.io/apiserver/pkg/util/feature"
@ -72,8 +75,8 @@ func TestAuthorizer(t *testing.T) {
sharedPVCsPerPod: 0,
uniquePVCsPerPod: 1,
}
pods, pvs, attachments := generate(opts)
populate(g, pods, pvs, attachments)
nodes, pods, pvs, attachments := generate(opts)
populate(g, nodes, pods, pvs, attachments)
identifier := nodeidentifier.NewDefaultNodeIdentifier()
authz := NewAuthorizer(g, identifier, bootstrappolicy.NodeRules()).(*NodeAuthorizer)
@ -86,6 +89,11 @@ func TestAuthorizer(t *testing.T) {
expect authorizer.Decision
features utilfeature.FeatureGate
}{
{
name: "allowed node configmap",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "node0-configmap", Namespace: "ns0"},
expect: authorizer.DecisionAllow,
},
{
name: "allowed configmap",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "configmap0-pod0-node0", Namespace: "ns0"},
@ -96,6 +104,31 @@ func TestAuthorizer(t *testing.T) {
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "secrets", Name: "secret0-pod0-node0", Namespace: "ns0"},
expect: authorizer.DecisionAllow,
},
{
name: "list allowed secret via pod",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "list", Resource: "secrets", Name: "secret0-pod0-node0", Namespace: "ns0"},
expect: authorizer.DecisionAllow,
},
{
name: "watch allowed secret via pod",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "watch", Resource: "secrets", Name: "secret0-pod0-node0", Namespace: "ns0"},
expect: authorizer.DecisionAllow,
},
{
name: "disallowed list many secrets",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "list", Resource: "secrets", Name: "", Namespace: "ns0"},
expect: authorizer.DecisionNoOpinion,
},
{
name: "disallowed watch many secrets",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "watch", Resource: "secrets", Name: "", Namespace: "ns0"},
expect: authorizer.DecisionNoOpinion,
},
{
name: "disallowed list secrets from all namespaces with name",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "list", Resource: "secrets", Name: "secret0-pod0-node0", Namespace: ""},
expect: authorizer.DecisionNoOpinion,
},
{
name: "allowed shared secret via pod",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "secrets", Name: "secret0-shared", Namespace: "ns0"},
@ -117,6 +150,11 @@ func TestAuthorizer(t *testing.T) {
expect: authorizer.DecisionAllow,
},
{
name: "disallowed node configmap",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "node1-configmap", Namespace: "ns0"},
expect: authorizer.DecisionNoOpinion,
},
{
name: "disallowed configmap",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "configmap0-pod0-node1", Namespace: "ns0"},
@ -209,6 +247,7 @@ func TestAuthorizer(t *testing.T) {
func TestAuthorizerSharedResources(t *testing.T) {
g := NewGraph()
g.destinationEdgeThreshold = 1
identifier := nodeidentifier.NewDefaultNodeIdentifier()
authz := NewAuthorizer(g, identifier, bootstrappolicy.NodeRules())
@ -237,7 +276,8 @@ func TestAuthorizerSharedResources(t *testing.T) {
},
},
})
g.AddPod(&api.Pod{
pod3 := &api.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod3-node3", Namespace: "ns1"},
Spec: api.PodSpec{
NodeName: "node3",
@ -245,11 +285,17 @@ func TestAuthorizerSharedResources(t *testing.T) {
{VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "shared-all"}}},
},
},
})
}
g.AddPod(pod3)
g.SetNodeConfigMap("node1", "shared-configmap", "ns1")
g.SetNodeConfigMap("node2", "shared-configmap", "ns1")
g.SetNodeConfigMap("node3", "configmap", "ns1")
testcases := []struct {
User user.Info
Secret string
ConfigMap string
ExpectAllowed bool
}{
{User: node1, ExpectAllowed: true, Secret: "node1-only"},
@ -263,18 +309,67 @@ func TestAuthorizerSharedResources(t *testing.T) {
{User: node3, ExpectAllowed: false, Secret: "node1-only"},
{User: node3, ExpectAllowed: false, Secret: "node1-node2-only"},
{User: node3, ExpectAllowed: true, Secret: "shared-all"},
{User: node1, ExpectAllowed: true, ConfigMap: "shared-configmap"},
{User: node1, ExpectAllowed: false, ConfigMap: "configmap"},
{User: node2, ExpectAllowed: true, ConfigMap: "shared-configmap"},
{User: node2, ExpectAllowed: false, ConfigMap: "configmap"},
{User: node3, ExpectAllowed: false, ConfigMap: "shared-configmap"},
{User: node3, ExpectAllowed: true, ConfigMap: "configmap"},
}
for i, tc := range testcases {
decision, _, err := authz.Authorize(authorizer.AttributesRecord{User: tc.User, ResourceRequest: true, Verb: "get", Resource: "secrets", Namespace: "ns1", Name: tc.Secret})
if err != nil {
t.Errorf("%d: unexpected error: %v", i, err)
continue
var (
decision authorizer.Decision
err error
)
if len(tc.Secret) > 0 {
decision, _, err = authz.Authorize(authorizer.AttributesRecord{User: tc.User, ResourceRequest: true, Verb: "get", Resource: "secrets", Namespace: "ns1", Name: tc.Secret})
if err != nil {
t.Errorf("%d: unexpected error: %v", i, err)
continue
}
} else if len(tc.ConfigMap) > 0 {
decision, _, err = authz.Authorize(authorizer.AttributesRecord{User: tc.User, ResourceRequest: true, Verb: "get", Resource: "configmaps", Namespace: "ns1", Name: tc.ConfigMap})
if err != nil {
t.Errorf("%d: unexpected error: %v", i, err)
continue
}
} else {
t.Fatalf("test case must include a request for a Secret or ConfigMap")
}
if (decision == authorizer.DecisionAllow) != tc.ExpectAllowed {
t.Errorf("%d: expected %v, got %v", i, tc.ExpectAllowed, decision)
}
}
{
node3SharedSecretGet := authorizer.AttributesRecord{User: node3, ResourceRequest: true, Verb: "get", Resource: "secrets", Namespace: "ns1", Name: "shared-all"}
decision, _, err := authz.Authorize(node3SharedSecretGet)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if decision != authorizer.DecisionAllow {
t.Error("expected allowed")
}
// should trigger recalculation of the shared secret index
pod3.Spec.Volumes = nil
g.AddPod(pod3)
decision, _, err = authz.Authorize(node3SharedSecretGet)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if decision == authorizer.DecisionAllow {
t.Errorf("unexpectedly allowed")
}
}
}
type sampleDataOpts struct {
@ -309,12 +404,12 @@ func BenchmarkPopulationAllocation(b *testing.B) {
uniquePVCsPerPod: 1,
}
pods, pvs, attachments := generate(opts)
nodes, pods, pvs, attachments := generate(opts)
b.ResetTimer()
for i := 0; i < b.N; i++ {
g := NewGraph()
populate(g, pods, pvs, attachments)
populate(g, nodes, pods, pvs, attachments)
}
}
@ -340,14 +435,14 @@ func BenchmarkPopulationRetention(b *testing.B) {
uniquePVCsPerPod: 1,
}
pods, pvs, attachments := generate(opts)
nodes, pods, pvs, attachments := generate(opts)
// Garbage collect before the first iteration
runtime.GC()
b.ResetTimer()
for i := 0; i < b.N; i++ {
g := NewGraph()
populate(g, pods, pvs, attachments)
populate(g, nodes, pods, pvs, attachments)
if i == 0 {
f, _ := os.Create("BenchmarkPopulationRetention.profile")
@ -360,10 +455,47 @@ func BenchmarkPopulationRetention(b *testing.B) {
}
}
func BenchmarkWriteIndexMaintenance(b *testing.B) {
// Run with:
// go test ./plugin/pkg/auth/authorizer/node -benchmem -bench BenchmarkWriteIndexMaintenance -run None
opts := sampleDataOpts{
// simulate high replication in a small number of namespaces:
nodes: 5000,
namespaces: 1,
podsPerNode: 1,
attachmentsPerNode: 20,
sharedConfigMapsPerPod: 0,
uniqueConfigMapsPerPod: 1,
sharedSecretsPerPod: 1,
uniqueSecretsPerPod: 1,
sharedPVCsPerPod: 0,
uniquePVCsPerPod: 1,
}
nodes, pods, pvs, attachments := generate(opts)
g := NewGraph()
populate(g, nodes, pods, pvs, attachments)
// Garbage collect before the first iteration
runtime.GC()
b.ResetTimer()
b.SetParallelism(100)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
g.AddPod(pods[0])
}
})
}
func BenchmarkAuthorization(b *testing.B) {
g := NewGraph()
opts := sampleDataOpts{
// To simulate high replication in a small number of namespaces:
// nodes: 5000,
// namespaces: 10,
// podsPerNode: 10,
nodes: 500,
namespaces: 200,
podsPerNode: 200,
@ -375,8 +507,8 @@ func BenchmarkAuthorization(b *testing.B) {
sharedPVCsPerPod: 0,
uniquePVCsPerPod: 1,
}
pods, pvs, attachments := generate(opts)
populate(g, pods, pvs, attachments)
nodes, pods, pvs, attachments := generate(opts)
populate(g, nodes, pods, pvs, attachments)
identifier := nodeidentifier.NewDefaultNodeIdentifier()
authz := NewAuthorizer(g, identifier, bootstrappolicy.NodeRules()).(*NodeAuthorizer)
@ -389,6 +521,11 @@ func BenchmarkAuthorization(b *testing.B) {
expect authorizer.Decision
features utilfeature.FeatureGate
}{
{
name: "allowed node configmap",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "node0-configmap", Namespace: "ns0"},
expect: authorizer.DecisionAllow,
},
{
name: "allowed configmap",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "configmap0-pod0-node0", Namespace: "ns0"},
@ -404,6 +541,12 @@ func BenchmarkAuthorization(b *testing.B) {
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "secrets", Name: "secret0-shared", Namespace: "ns0"},
expect: authorizer.DecisionAllow,
},
{
name: "disallowed node configmap",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "node1-configmap", Namespace: "ns0"},
expect: authorizer.DecisionNoOpinion,
},
{
name: "disallowed configmap",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "configmap0-pod0-node1", Namespace: "ns0"},
@ -450,26 +593,102 @@ func BenchmarkAuthorization(b *testing.B) {
}
b.ResetTimer()
for _, tc := range tests {
if tc.features == nil {
authz.features = utilfeature.DefaultFeatureGate
} else {
authz.features = tc.features
}
b.Run(tc.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
decision, _, _ := authz.Authorize(tc.attrs)
if decision != tc.expect {
b.Errorf("expected %v, got %v", tc.expect, decision)
for _, testWriteContention := range []bool{false, true} {
shouldWrite := int32(1)
writes := int64(0)
_1ms := int64(0)
_10ms := int64(0)
_25ms := int64(0)
_50ms := int64(0)
_100ms := int64(0)
_250ms := int64(0)
_500ms := int64(0)
_1000ms := int64(0)
_1s := int64(0)
contentionPrefix := ""
if testWriteContention {
contentionPrefix = "contentious "
// Start a writer pushing graph modifications 100x a second
go func() {
for shouldWrite == 1 {
go func() {
start := time.Now()
authz.graph.AddPod(&api.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "testwrite", Namespace: "ns0"},
Spec: api.PodSpec{
NodeName: "node0",
ServiceAccountName: "default",
Volumes: []api.Volume{
{Name: "token", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "secret0-shared"}}},
},
},
})
diff := time.Now().Sub(start)
atomic.AddInt64(&writes, 1)
switch {
case diff < time.Millisecond:
atomic.AddInt64(&_1ms, 1)
case diff < 10*time.Millisecond:
atomic.AddInt64(&_10ms, 1)
case diff < 25*time.Millisecond:
atomic.AddInt64(&_25ms, 1)
case diff < 50*time.Millisecond:
atomic.AddInt64(&_50ms, 1)
case diff < 100*time.Millisecond:
atomic.AddInt64(&_100ms, 1)
case diff < 250*time.Millisecond:
atomic.AddInt64(&_250ms, 1)
case diff < 500*time.Millisecond:
atomic.AddInt64(&_500ms, 1)
case diff < 1000*time.Millisecond:
atomic.AddInt64(&_1000ms, 1)
default:
atomic.AddInt64(&_1s, 1)
}
}()
time.Sleep(10 * time.Millisecond)
}
}()
}
for _, tc := range tests {
if tc.features == nil {
authz.features = utilfeature.DefaultFeatureGate
} else {
authz.features = tc.features
}
})
b.Run(contentionPrefix+tc.name, func(b *testing.B) {
// Run authorization checks in parallel
b.SetParallelism(5000)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
decision, _, _ := authz.Authorize(tc.attrs)
if decision != tc.expect {
b.Errorf("expected %v, got %v", tc.expect, decision)
}
}
})
})
}
atomic.StoreInt32(&shouldWrite, 0)
if testWriteContention {
b.Logf("graph modifications during contention test: %d", writes)
b.Logf("<1ms=%d, <10ms=%d, <25ms=%d, <50ms=%d, <100ms=%d, <250ms=%d, <500ms=%d, <1000ms=%d, >1000ms=%d", _1ms, _10ms, _25ms, _50ms, _100ms, _250ms, _500ms, _1000ms, _1s)
} else {
b.Logf("graph modifications during non-contention test: %d", writes)
}
}
}
func populate(graph *Graph, pods []*api.Pod, pvs []*api.PersistentVolume, attachments []*storagev1beta1.VolumeAttachment) {
func populate(graph *Graph, nodes []*api.Node, pods []*api.Pod, pvs []*api.PersistentVolume, attachments []*storagev1beta1.VolumeAttachment) {
p := &graphPopulator{}
p.graph = graph
for _, node := range nodes {
p.addNode(node)
}
for _, pod := range pods {
p.addPod(pod)
}
@ -485,7 +704,8 @@ func populate(graph *Graph, pods []*api.Pod, pvs []*api.PersistentVolume, attach
// the secret/configmap/pvc/node references in the pod and pv objects are named to indicate the connections between the objects.
// for example, secret0-pod0-node0 is a secret referenced by pod0 which is bound to node0.
// when populated into the graph, the node authorizer should allow node0 to access that secret, but not node1.
func generate(opts sampleDataOpts) ([]*api.Pod, []*api.PersistentVolume, []*storagev1beta1.VolumeAttachment) {
func generate(opts sampleDataOpts) ([]*api.Node, []*api.Pod, []*api.PersistentVolume, []*storagev1beta1.VolumeAttachment) {
nodes := make([]*api.Node, 0, opts.nodes)
pods := make([]*api.Pod, 0, opts.nodes*opts.podsPerNode)
pvs := make([]*api.PersistentVolume, 0, (opts.nodes*opts.podsPerNode*opts.uniquePVCsPerPod)+(opts.sharedPVCsPerPod*opts.namespaces))
attachments := make([]*storagev1beta1.VolumeAttachment, 0, opts.nodes*opts.attachmentsPerNode)
@ -552,6 +772,21 @@ func generate(opts sampleDataOpts) ([]*api.Pod, []*api.PersistentVolume, []*stor
attachment.Spec.NodeName = nodeName
attachments = append(attachments, attachment)
}
name := fmt.Sprintf("%s-configmap", nodeName)
nodes = append(nodes, &api.Node{
ObjectMeta: metav1.ObjectMeta{Name: nodeName},
Spec: api.NodeSpec{
ConfigSource: &api.NodeConfigSource{
ConfigMap: &api.ConfigMapNodeConfigSource{
Name: name,
Namespace: "ns0",
UID: types.UID(fmt.Sprintf("ns0-%s", name)),
KubeletConfigKey: "kubelet",
},
},
},
})
}
return pods, pvs, attachments
return nodes, pods, pvs, attachments
}

View File

@ -14,14 +14,15 @@ go_library(
],
importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac",
deps = [
"//pkg/apis/rbac:go_default_library",
"//pkg/client/listers/rbac/internalversion:go_default_library",
"//pkg/apis/rbac/v1:go_default_library",
"//pkg/registry/rbac/validation:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
"//vendor/k8s.io/client-go/listers/rbac/v1:go_default_library",
],
)
@ -33,9 +34,10 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/rbac:go_default_library",
"//pkg/apis/rbac/v1:go_default_library",
"//pkg/registry/rbac/validation:go_default_library",
"//plugin/pkg/auth/authorizer/rbac/bootstrappolicy:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",

View File

@ -15,9 +15,10 @@ go_library(
],
importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy",
deps = [
"//pkg/apis/rbac:go_default_library",
"//pkg/apis/rbac/v1:go_default_library",
"//pkg/features:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/rbac/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
@ -37,8 +38,8 @@ go_test(
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/install:go_default_library",
"//pkg/apis/rbac:go_default_library",
"//pkg/apis/rbac/install:go_default_library",
"//pkg/apis/rbac/v1:go_default_library",
"//pkg/registry/rbac/validation:go_default_library",
"//vendor/github.com/ghodss/yaml:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",

View File

@ -21,15 +21,16 @@ import (
"github.com/golang/glog"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
rbac "k8s.io/kubernetes/pkg/apis/rbac"
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
"k8s.io/kubernetes/pkg/features"
)
const saRolePrefix = "system:controller:"
func addControllerRole(controllerRoles *[]rbac.ClusterRole, controllerRoleBindings *[]rbac.ClusterRoleBinding, role rbac.ClusterRole) {
func addControllerRole(controllerRoles *[]rbacv1.ClusterRole, controllerRoleBindings *[]rbacv1.ClusterRoleBinding, role rbacv1.ClusterRole) {
if !strings.HasPrefix(role.Name, saRolePrefix) {
glog.Fatalf(`role %q must start with %q`, role.Name, saRolePrefix)
}
@ -44,317 +45,313 @@ func addControllerRole(controllerRoles *[]rbac.ClusterRole, controllerRoleBindin
addClusterRoleLabel(*controllerRoles)
*controllerRoleBindings = append(*controllerRoleBindings,
rbac.NewClusterBinding(role.Name).SAs("kube-system", role.Name[len(saRolePrefix):]).BindingOrDie())
rbacv1helpers.NewClusterBinding(role.Name).SAs("kube-system", role.Name[len(saRolePrefix):]).BindingOrDie())
addClusterRoleBindingLabel(*controllerRoleBindings)
}
func eventsRule() rbac.PolicyRule {
return rbac.NewRule("create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie()
func eventsRule() rbacv1.PolicyRule {
return rbacv1helpers.NewRule("create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie()
}
func buildControllerRoles() ([]rbac.ClusterRole, []rbac.ClusterRoleBinding) {
func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) {
// controllerRoles is a slice of roles used for controllers
controllerRoles := []rbac.ClusterRole{}
controllerRoles := []rbacv1.ClusterRole{}
// controllerRoleBindings is a slice of roles used for controllers
controllerRoleBindings := []rbac.ClusterRoleBinding{}
controllerRoleBindings := []rbacv1.ClusterRoleBinding{}
addControllerRole(&controllerRoles, &controllerRoleBindings, func() rbac.ClusterRole {
role := rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, func() rbacv1.ClusterRole {
role := rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "attachdetach-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("persistentvolumes", "persistentvolumeclaims").RuleOrDie(),
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbac.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("persistentvolumes", "persistentvolumeclaims").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
eventsRule(),
},
}
if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
role.Rules = append(role.Rules, rbac.NewRule("get", "create", "delete", "list", "watch").Groups(storageGroup).Resources("volumeattachments").RuleOrDie())
role.Rules = append(role.Rules, rbacv1helpers.NewRule("get", "create", "delete", "list", "watch").Groups(storageGroup).Resources("volumeattachments").RuleOrDie())
}
return role
}())
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "clusterrole-aggregation-controller"},
Rules: []rbac.PolicyRule{
Rules: []rbacv1.PolicyRule{
// this controller must have full permissions to allow it to mutate any role in any way
rbac.NewRule("*").Groups("*").Resources("*").RuleOrDie(),
rbac.NewRule("*").URLs("*").RuleOrDie(),
rbacv1helpers.NewRule("*").Groups("*").Resources("*").RuleOrDie(),
rbacv1helpers.NewRule("*").URLs("*").RuleOrDie(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "cronjob-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("cronjobs").RuleOrDie(),
rbac.NewRule("get", "list", "watch", "create", "update", "delete", "patch").Groups(batchGroup).Resources("jobs").RuleOrDie(),
rbac.NewRule("update").Groups(batchGroup).Resources("cronjobs/status").RuleOrDie(),
rbac.NewRule("update").Groups(batchGroup).Resources("cronjobs/finalizers").RuleOrDie(),
rbac.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("cronjobs").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "delete", "patch").Groups(batchGroup).Resources("jobs").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("cronjobs/status").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("cronjobs/finalizers").RuleOrDie(),
rbacv1helpers.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "daemon-set-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch").Groups(extensionsGroup, appsGroup).Resources("daemonsets").RuleOrDie(),
rbac.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("daemonsets/status").RuleOrDie(),
rbac.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("daemonsets/finalizers").RuleOrDie(),
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbac.NewRule("list", "watch", "create", "delete", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbac.NewRule("create").Groups(legacyGroup).Resources("pods/binding").RuleOrDie(),
rbac.NewRule("list", "watch", "create", "delete", "update", "patch").Groups(appsGroup).Resources("controllerrevisions").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch").Groups(extensionsGroup, appsGroup).Resources("daemonsets").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("daemonsets/status").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("daemonsets/finalizers").RuleOrDie(),
rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbacv1helpers.NewRule("list", "watch", "create", "delete", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/binding").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "create", "delete", "update", "patch").Groups(appsGroup).Resources("controllerrevisions").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "deployment-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch", "update").Groups(extensionsGroup, appsGroup).Resources("deployments").RuleOrDie(),
rbac.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("deployments/status").RuleOrDie(),
rbac.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("deployments/finalizers").RuleOrDie(),
rbac.NewRule("get", "list", "watch", "create", "update", "patch", "delete").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(extensionsGroup, appsGroup).Resources("deployments").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("deployments/status").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("deployments/finalizers").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "patch", "delete").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
// TODO: remove "update" once
// https://github.com/kubernetes/kubernetes/issues/36897 is resolved.
rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("pods").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "disruption-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch").Groups(extensionsGroup, appsGroup).Resources("deployments").RuleOrDie(),
rbac.NewRule("get", "list", "watch").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(),
rbac.NewRule("get", "list", "watch").Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
rbac.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
rbac.NewRule("update").Groups(policyGroup).Resources("poddisruptionbudgets/status").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch").Groups(extensionsGroup, appsGroup).Resources("deployments").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch").Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(policyGroup).Resources("poddisruptionbudgets/status").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "endpoint-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services", "pods").RuleOrDie(),
rbac.NewRule("get", "list", "create", "update", "delete").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
rbac.NewRule("create").Groups(legacyGroup).Resources("endpoints/restricted").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services", "pods").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "create", "update", "delete").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints/restricted").RuleOrDie(),
eventsRule(),
},
})
if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) {
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "expand-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch", "update", "patch").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
rbac.NewRule("update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie(),
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch", "update", "patch").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
// glusterfs
rbac.NewRule("get", "list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
rbac.NewRule("get").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
rbac.NewRule("get").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
eventsRule(),
},
})
}
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "generic-garbage-collector"},
Rules: []rbac.PolicyRule{
Rules: []rbacv1.PolicyRule{
// the GC controller needs to run list/watches, selective gets, and updates against any resource
rbac.NewRule("get", "list", "watch", "patch", "update", "delete").Groups("*").Resources("*").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "patch", "update", "delete").Groups("*").Resources("*").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "horizontal-pod-autoscaler"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch").Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
rbac.NewRule("update").Groups(autoscalingGroup).Resources("horizontalpodautoscalers/status").RuleOrDie(),
rbac.NewRule("get", "update").Groups("*").Resources("*/scale").RuleOrDie(),
rbac.NewRule("list").Groups(legacyGroup).Resources("pods").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch").Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(autoscalingGroup).Resources("horizontalpodautoscalers/status").RuleOrDie(),
rbacv1helpers.NewRule("get", "update").Groups("*").Resources("*/scale").RuleOrDie(),
rbacv1helpers.NewRule("list").Groups(legacyGroup).Resources("pods").RuleOrDie(),
// TODO: restrict this to the appropriate namespace
rbac.NewRule("get").Groups(legacyGroup).Resources("services/proxy").Names("https:heapster:", "http:heapster:").RuleOrDie(),
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("services/proxy").Names("https:heapster:", "http:heapster:").RuleOrDie(),
// allow listing resource metrics and custom metrics
rbac.NewRule("list").Groups(resMetricsGroup).Resources("pods").RuleOrDie(),
rbac.NewRule("get", "list").Groups(customMetricsGroup).Resources("*").RuleOrDie(),
rbacv1helpers.NewRule("list").Groups(resMetricsGroup).Resources("pods").RuleOrDie(),
rbacv1helpers.NewRule("get", "list").Groups(customMetricsGroup).Resources("*").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "job-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("jobs").RuleOrDie(),
rbac.NewRule("update").Groups(batchGroup).Resources("jobs/status").RuleOrDie(),
rbac.NewRule("update").Groups(batchGroup).Resources("jobs/finalizers").RuleOrDie(),
rbac.NewRule("list", "watch", "create", "delete", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("jobs").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("jobs/status").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("jobs/finalizers").RuleOrDie(),
rbacv1helpers.NewRule("list", "watch", "create", "delete", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "namespace-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
rbac.NewRule("update").Groups(legacyGroup).Resources("namespaces/finalize", "namespaces/status").RuleOrDie(),
rbac.NewRule("get", "list", "delete", "deletecollection").Groups("*").Resources("*").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("namespaces/finalize", "namespaces/status").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "delete", "deletecollection").Groups("*").Resources("*").RuleOrDie(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "node-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "update", "delete", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbac.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "update", "delete", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
// used for pod eviction
rbac.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
rbac.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
rbacv1helpers.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "persistent-volume-binder"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch", "update", "create", "delete").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
rbac.NewRule("update").Groups(legacyGroup).Resources("persistentvolumes/status").RuleOrDie(),
rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
rbac.NewRule("update").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie(),
rbac.NewRule("list", "watch", "get", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch", "update", "create", "delete").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("persistentvolumes/status").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie(),
rbacv1helpers.NewRule("list", "watch", "get", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
// glusterfs
rbac.NewRule("get", "list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
rbac.NewRule("get", "create", "delete").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
rbac.NewRule("get").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
rbacv1helpers.NewRule("get", "create", "delete").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
// openstack
rbac.NewRule("get", "list").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbacv1helpers.NewRule("get", "list").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
// recyclerClient.WatchPod
rbac.NewRule("watch").Groups(legacyGroup).Resources("events").RuleOrDie(),
rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("events").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pod-garbage-collector"},
Rules: []rbac.PolicyRule{
rbac.NewRule("list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbac.NewRule("list").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbacv1helpers.NewRule("list").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "replicaset-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch", "update").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
rbac.NewRule("update").Groups(appsGroup, extensionsGroup).Resources("replicasets/status").RuleOrDie(),
rbac.NewRule("update").Groups(appsGroup, extensionsGroup).Resources("replicasets/finalizers").RuleOrDie(),
rbac.NewRule("list", "watch", "patch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(appsGroup, extensionsGroup).Resources("replicasets/status").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(appsGroup, extensionsGroup).Resources("replicasets/finalizers").RuleOrDie(),
rbacv1helpers.NewRule("list", "watch", "patch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "replication-controller"},
Rules: []rbac.PolicyRule{
Rules: []rbacv1.PolicyRule{
// 1.0 controllers needed get, update, so without these old controllers break on new servers
rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(),
rbac.NewRule("update").Groups(legacyGroup).Resources("replicationcontrollers/status").RuleOrDie(),
rbac.NewRule("update").Groups(legacyGroup).Resources("replicationcontrollers/finalizers").RuleOrDie(),
rbac.NewRule("list", "watch", "patch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("replicationcontrollers/status").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("replicationcontrollers/finalizers").RuleOrDie(),
rbacv1helpers.NewRule("list", "watch", "patch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "resourcequota-controller"},
Rules: []rbac.PolicyRule{
Rules: []rbacv1.PolicyRule{
// quota can count quota on anything for reconciliation, so it needs full viewing powers
rbac.NewRule("list", "watch").Groups("*").Resources("*").RuleOrDie(),
rbac.NewRule("update").Groups(legacyGroup).Resources("resourcequotas/status").RuleOrDie(),
rbacv1helpers.NewRule("list", "watch").Groups("*").Resources("*").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("resourcequotas/status").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "route-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbac.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbacv1helpers.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "service-account-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "service-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services").RuleOrDie(),
rbac.NewRule("update").Groups(legacyGroup).Resources("services/status").RuleOrDie(),
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("services/status").RuleOrDie(),
rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "statefulset-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbac.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
rbac.NewRule("update").Groups(appsGroup).Resources("statefulsets/status").RuleOrDie(),
rbac.NewRule("update").Groups(appsGroup).Resources("statefulsets/finalizers").RuleOrDie(),
rbac.NewRule("get", "create", "delete", "update", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbac.NewRule("get", "create", "delete", "update", "patch", "list", "watch").Groups(appsGroup).Resources("controllerrevisions").RuleOrDie(),
rbac.NewRule("get", "create").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(appsGroup).Resources("statefulsets/status").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(appsGroup).Resources("statefulsets/finalizers").RuleOrDie(),
rbacv1helpers.NewRule("get", "create", "delete", "update", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbacv1helpers.NewRule("get", "create", "delete", "update", "patch", "list", "watch").Groups(appsGroup).Resources("controllerrevisions").RuleOrDie(),
rbacv1helpers.NewRule("get", "create").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "ttl-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("update", "patch", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("update", "patch", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "certificate-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch", "delete").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
rbac.NewRule("update").Groups(certificatesGroup).Resources("certificatesigningrequests/status", "certificatesigningrequests/approval").RuleOrDie(),
rbac.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(certificatesGroup).Resources("certificatesigningrequests/status", "certificatesigningrequests/approval").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pvc-protection-controller"},
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
rbacv1helpers.NewRule("list", "watch", "get").Groups(legacyGroup).Resources("pods").RuleOrDie(),
eventsRule(),
},
})
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pv-protection-controller"},
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
eventsRule(),
},
})
if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) {
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pvc-protection-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
rbac.NewRule("list", "watch", "get").Groups(legacyGroup).Resources("pods").RuleOrDie(),
eventsRule(),
},
})
}
if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) {
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pv-protection-controller"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
eventsRule(),
},
})
}
return controllerRoles, controllerRoleBindings
}
// ControllerRoles returns the cluster roles used by controllers
func ControllerRoles() []rbac.ClusterRole {
func ControllerRoles() []rbacv1.ClusterRole {
controllerRoles, _ := buildControllerRoles()
return controllerRoles
}
// ControllerRoleBindings returns the role bindings used by controllers
func ControllerRoleBindings() []rbac.ClusterRoleBinding {
func ControllerRoleBindings() []rbacv1.ClusterRoleBinding {
_, controllerRoleBindings := buildControllerRoles()
return controllerRoleBindings
}

View File

@ -21,19 +21,20 @@ import (
"github.com/golang/glog"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
rbac "k8s.io/kubernetes/pkg/apis/rbac"
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
)
var (
// namespaceRoles is a map of namespace to slice of roles to create
namespaceRoles = map[string][]rbac.Role{}
namespaceRoles = map[string][]rbacv1.Role{}
// namespaceRoleBindings is a map of namespace to slice of roleBindings to create
namespaceRoleBindings = map[string][]rbac.RoleBinding{}
namespaceRoleBindings = map[string][]rbacv1.RoleBinding{}
)
func addNamespaceRole(namespace string, role rbac.Role) {
func addNamespaceRole(namespace string, role rbacv1.Role) {
if !strings.HasPrefix(namespace, "kube-") {
glog.Fatalf(`roles can only be bootstrapped into reserved namespaces starting with "kube-", not %q`, namespace)
}
@ -51,7 +52,7 @@ func addNamespaceRole(namespace string, role rbac.Role) {
namespaceRoles[namespace] = existingRoles
}
func addNamespaceRoleBinding(namespace string, roleBinding rbac.RoleBinding) {
func addNamespaceRoleBinding(namespace string, roleBinding rbacv1.RoleBinding) {
if !strings.HasPrefix(namespace, "kube-") {
glog.Fatalf(`rolebindings can only be bootstrapped into reserved namespaces starting with "kube-", not %q`, namespace)
}
@ -70,86 +71,86 @@ func addNamespaceRoleBinding(namespace string, roleBinding rbac.RoleBinding) {
}
func init() {
addNamespaceRole(metav1.NamespaceSystem, rbac.Role{
addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
// role for finding authentication config info for starting a server
ObjectMeta: metav1.ObjectMeta{Name: "extension-apiserver-authentication-reader"},
Rules: []rbac.PolicyRule{
Rules: []rbacv1.PolicyRule{
// this particular config map is exposed and contains authentication configuration information
rbac.NewRule("get").Groups(legacyGroup).Resources("configmaps").Names("extension-apiserver-authentication").RuleOrDie(),
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("configmaps").Names("extension-apiserver-authentication").RuleOrDie(),
},
})
addNamespaceRole(metav1.NamespaceSystem, rbac.Role{
addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
// role for the bootstrap signer to be able to inspect kube-system secrets
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "bootstrap-signer"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
},
})
addNamespaceRole(metav1.NamespaceSystem, rbac.Role{
addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
// role for the cloud providers to access/create kube-system configmaps
// Deprecated starting Kubernetes 1.10 and will be deleted according to GA deprecation policy.
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "cloud-provider"},
Rules: []rbac.PolicyRule{
rbac.NewRule("create", "get", "list", "watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
},
})
addNamespaceRole(metav1.NamespaceSystem, rbac.Role{
addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
// role for the token-cleaner to be able to remove secrets, but only in kube-system
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "token-cleaner"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
eventsRule(),
},
})
// TODO: Create util on Role+Binding for leader locking if more cases evolve.
addNamespaceRole(metav1.NamespaceSystem, rbac.Role{
addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
// role for the leader locking on supplied configmap
ObjectMeta: metav1.ObjectMeta{Name: "system::leader-locking-kube-controller-manager"},
Rules: []rbac.PolicyRule{
rbac.NewRule("watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
rbac.NewRule("get", "update").Groups(legacyGroup).Resources("configmaps").Names("kube-controller-manager").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
rbacv1helpers.NewRule("get", "update").Groups(legacyGroup).Resources("configmaps").Names("kube-controller-manager").RuleOrDie(),
},
})
addNamespaceRole(metav1.NamespaceSystem, rbac.Role{
addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
// role for the leader locking on supplied configmap
ObjectMeta: metav1.ObjectMeta{Name: "system::leader-locking-kube-scheduler"},
Rules: []rbac.PolicyRule{
rbac.NewRule("watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
rbac.NewRule("get", "update").Groups(legacyGroup).Resources("configmaps").Names("kube-scheduler").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
rbacv1helpers.NewRule("get", "update").Groups(legacyGroup).Resources("configmaps").Names("kube-scheduler").RuleOrDie(),
},
})
addNamespaceRoleBinding(metav1.NamespaceSystem,
rbac.NewRoleBinding("system::leader-locking-kube-controller-manager", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "kube-controller-manager").BindingOrDie())
rbacv1helpers.NewRoleBinding("system::leader-locking-kube-controller-manager", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "kube-controller-manager").BindingOrDie())
addNamespaceRoleBinding(metav1.NamespaceSystem,
rbac.NewRoleBinding("system::leader-locking-kube-scheduler", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "kube-scheduler").BindingOrDie())
rbacv1helpers.NewRoleBinding("system::leader-locking-kube-scheduler", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "kube-scheduler").BindingOrDie())
addNamespaceRoleBinding(metav1.NamespaceSystem,
rbac.NewRoleBinding(saRolePrefix+"bootstrap-signer", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "bootstrap-signer").BindingOrDie())
rbacv1helpers.NewRoleBinding(saRolePrefix+"bootstrap-signer", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "bootstrap-signer").BindingOrDie())
// cloud-provider is deprecated starting Kubernetes 1.10 and will be deleted according to GA deprecation policy.
addNamespaceRoleBinding(metav1.NamespaceSystem,
rbac.NewRoleBinding(saRolePrefix+"cloud-provider", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "cloud-provider").BindingOrDie())
rbacv1helpers.NewRoleBinding(saRolePrefix+"cloud-provider", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "cloud-provider").BindingOrDie())
addNamespaceRoleBinding(metav1.NamespaceSystem,
rbac.NewRoleBinding(saRolePrefix+"token-cleaner", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "token-cleaner").BindingOrDie())
rbacv1helpers.NewRoleBinding(saRolePrefix+"token-cleaner", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "token-cleaner").BindingOrDie())
addNamespaceRole(metav1.NamespacePublic, rbac.Role{
addNamespaceRole(metav1.NamespacePublic, rbacv1.Role{
// role for the bootstrap signer to be able to write its configmap
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "bootstrap-signer"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
rbac.NewRule("update").Groups(legacyGroup).Resources("configmaps").Names("cluster-info").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("configmaps").Names("cluster-info").RuleOrDie(),
eventsRule(),
},
})
addNamespaceRoleBinding(metav1.NamespacePublic,
rbac.NewRoleBinding(saRolePrefix+"bootstrap-signer", metav1.NamespacePublic).SAs(metav1.NamespaceSystem, "bootstrap-signer").BindingOrDie())
rbacv1helpers.NewRoleBinding(saRolePrefix+"bootstrap-signer", metav1.NamespacePublic).SAs(metav1.NamespaceSystem, "bootstrap-signer").BindingOrDie())
}
// NamespaceRoles returns a map of namespace to slice of roles to create
func NamespaceRoles() map[string][]rbac.Role {
func NamespaceRoles() map[string][]rbacv1.Role {
return namespaceRoles
}
// NamespaceRoleBindings returns a map of namespace to slice of roles to create
func NamespaceRoleBindings() map[string][]rbac.RoleBinding {
func NamespaceRoleBindings() map[string][]rbacv1.RoleBinding {
return namespaceRoleBindings
}

View File

@ -17,12 +17,13 @@ limitations under the License.
package bootstrappolicy
import (
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/authentication/user"
utilfeature "k8s.io/apiserver/pkg/util/feature"
rbac "k8s.io/kubernetes/pkg/apis/rbac"
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
"k8s.io/kubernetes/pkg/features"
)
@ -32,7 +33,7 @@ var (
ReadUpdate = []string{"get", "list", "watch", "update", "patch"}
Label = map[string]string{"kubernetes.io/bootstrapping": "rbac-defaults"}
Annotation = map[string]string{rbac.AutoUpdateAnnotationKey: "true"}
Annotation = map[string]string{rbacv1.AutoUpdateAnnotationKey: "true"}
)
const (
@ -78,105 +79,105 @@ func addDefaultMetadata(obj runtime.Object) {
metadata.SetAnnotations(annotations)
}
func addClusterRoleLabel(roles []rbac.ClusterRole) {
func addClusterRoleLabel(roles []rbacv1.ClusterRole) {
for i := range roles {
addDefaultMetadata(&roles[i])
}
return
}
func addClusterRoleBindingLabel(rolebindings []rbac.ClusterRoleBinding) {
func addClusterRoleBindingLabel(rolebindings []rbacv1.ClusterRoleBinding) {
for i := range rolebindings {
addDefaultMetadata(&rolebindings[i])
}
return
}
func NodeRules() []rbac.PolicyRule {
nodePolicyRules := []rbac.PolicyRule{
func NodeRules() []rbacv1.PolicyRule {
nodePolicyRules := []rbacv1.PolicyRule{
// Needed to check API access. These creates are non-mutating
rbac.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
rbac.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews", "localsubjectaccessreviews").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews", "localsubjectaccessreviews").RuleOrDie(),
// Needed to build serviceLister, to populate env vars for services
rbac.NewRule(Read...).Groups(legacyGroup).Resources("services").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("services").RuleOrDie(),
// Nodes can register Node API objects and report status.
// Use the NodeRestriction admission plugin to limit a node to creating/updating its own API object.
rbac.NewRule("create", "get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbac.NewRule("update", "patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
rbac.NewRule("update", "patch", "delete").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
rbacv1helpers.NewRule("update", "patch", "delete").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
// TODO: restrict to the bound node as creator in the NodeRestrictions admission plugin
rbac.NewRule("create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(),
rbacv1helpers.NewRule("create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(),
// TODO: restrict to pods scheduled on the bound node once field selectors are supported by list/watch authorization
rbac.NewRule(Read...).Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("pods").RuleOrDie(),
// Needed for the node to create/delete mirror pods.
// Use the NodeRestriction admission plugin to limit a node to creating/deleting mirror pods bound to itself.
rbac.NewRule("create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbacv1helpers.NewRule("create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
// Needed for the node to report status of pods it is running.
// Use the NodeRestriction admission plugin to limit a node to updating status of pods bound to itself.
rbac.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
// Needed for the node to create pod evictions.
// Use the NodeRestriction admission plugin to limit a node to creating evictions for pods bound to itself.
rbac.NewRule("create").Groups(legacyGroup).Resources("pods/eviction").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/eviction").RuleOrDie(),
// Needed for imagepullsecrets, rbd/ceph and secret volumes, and secrets in envs
// Needed for configmap volume and envs
// Use the Node authorization mode to limit a node to get secrets/configmaps referenced by pods bound to itself.
rbac.NewRule("get").Groups(legacyGroup).Resources("secrets", "configmaps").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("secrets", "configmaps").RuleOrDie(),
// Needed for persistent volumes
// Use the Node authorization mode to limit a node to get pv/pvc objects referenced by pods bound to itself.
rbac.NewRule("get").Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(),
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(),
// TODO: add to the Node authorizer and restrict to endpoints referenced by pods or PVs bound to the node
// Needed for glusterfs volumes
rbac.NewRule("get").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
// Used to create a certificatesigningrequest for a node-specific client certificate, and watch
// for it to be signed. This allows the kubelet to rotate it's own certificate.
rbac.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
}
if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) {
// Use the Node authorization mode to limit a node to update status of pvc objects referenced by pods bound to itself.
// Use the NodeRestriction admission plugin to limit a node to just update the status stanza.
pvcStatusPolicyRule := rbac.NewRule("get", "update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie()
pvcStatusPolicyRule := rbacv1helpers.NewRule("get", "update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie()
nodePolicyRules = append(nodePolicyRules, pvcStatusPolicyRule)
}
if utilfeature.DefaultFeatureGate.Enabled(features.TokenRequest) {
// Use the Node authorization to limit a node to create tokens for service accounts running on that node
// Use the NodeRestriction admission plugin to limit a node to create tokens bound to pods on that node
tokenRequestRule := rbac.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts/token").RuleOrDie()
tokenRequestRule := rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts/token").RuleOrDie()
nodePolicyRules = append(nodePolicyRules, tokenRequestRule)
}
// CSI
if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
volAttachRule := rbac.NewRule("get").Groups(storageGroup).Resources("volumeattachments").RuleOrDie()
volAttachRule := rbacv1helpers.NewRule("get").Groups(storageGroup).Resources("volumeattachments").RuleOrDie()
nodePolicyRules = append(nodePolicyRules, volAttachRule)
}
return nodePolicyRules
}
// ClusterRoles returns the cluster roles to bootstrap an API server with
func ClusterRoles() []rbac.ClusterRole {
roles := []rbac.ClusterRole{
func ClusterRoles() []rbacv1.ClusterRole {
roles := []rbacv1.ClusterRole{
{
// a "root" role which can do absolutely anything
ObjectMeta: metav1.ObjectMeta{Name: "cluster-admin"},
Rules: []rbac.PolicyRule{
rbac.NewRule("*").Groups("*").Resources("*").RuleOrDie(),
rbac.NewRule("*").URLs("*").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("*").Groups("*").Resources("*").RuleOrDie(),
rbacv1helpers.NewRule("*").URLs("*").RuleOrDie(),
},
},
{
// a role which provides just enough power to determine if the server is ready and discover API versions for negotiation
ObjectMeta: metav1.ObjectMeta{Name: "system:discovery"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get").URLs(
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get").URLs(
"/healthz", "/version", "/version/",
// remove once swagger 1.2 support is removed
"/swaggerapi", "/swaggerapi/*",
@ -192,16 +193,16 @@ func ClusterRoles() []rbac.ClusterRole {
{
// a role which provides minimal resource access to allow a "normal" user to learn information about themselves
ObjectMeta: metav1.ObjectMeta{Name: "system:basic-user"},
Rules: []rbac.PolicyRule{
Rules: []rbacv1.PolicyRule{
// TODO add future selfsubjectrulesreview, project request APIs, project listing APIs
rbac.NewRule("create").Groups(authorizationGroup).Resources("selfsubjectaccessreviews", "selfsubjectrulesreviews").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("selfsubjectaccessreviews", "selfsubjectrulesreviews").RuleOrDie(),
},
},
{
// a role for a namespace level admin. It is `edit` plus the power to grant permissions to other users.
ObjectMeta: metav1.ObjectMeta{Name: "admin"},
AggregationRule: &rbac.AggregationRule{
AggregationRule: &rbacv1.AggregationRule{
ClusterRoleSelectors: []metav1.LabelSelector{{MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-admin": "true"}}},
},
},
@ -210,7 +211,7 @@ func ClusterRoles() []rbac.ClusterRole {
// It does not grant powers for "privileged" resources which are domain of the system: `/status`
// subresources or `quota`/`limits` which are used to control namespaces
ObjectMeta: metav1.ObjectMeta{Name: "edit"},
AggregationRule: &rbac.AggregationRule{
AggregationRule: &rbacv1.AggregationRule{
ClusterRoleSelectors: []metav1.LabelSelector{{MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-edit": "true"}}},
},
},
@ -218,45 +219,46 @@ func ClusterRoles() []rbac.ClusterRole {
// a role for namespace level viewing. It grants Read-only access to non-escalating resources in
// a namespace.
ObjectMeta: metav1.ObjectMeta{Name: "view"},
AggregationRule: &rbac.AggregationRule{
AggregationRule: &rbacv1.AggregationRule{
ClusterRoleSelectors: []metav1.LabelSelector{{MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-view": "true"}}},
},
},
{
// a role for a namespace level admin. It is `edit` plus the power to grant permissions to other users.
ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-admin", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-admin": "true"}},
Rules: []rbac.PolicyRule{
rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule(ReadWrite...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(),
rbacv1helpers.NewRule(ReadWrite...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
"services", "services/proxy", "endpoints", "persistentvolumeclaims", "configmaps", "secrets").RuleOrDie(),
rbac.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
"pods/status", "resourcequotas/status", "namespaces/status", "replicationcontrollers/status", "pods/log").RuleOrDie(),
// read access to namespaces at the namespace scope means you can read *this* namespace. This can be used as an
// indicator of which namespaces you have access to.
rbac.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
rbac.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
rbacv1helpers.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(appsGroup).Resources("statefulsets",
rbacv1helpers.NewRule(ReadWrite...).Groups(appsGroup).Resources(
"statefulsets", "statefulsets/scale",
"daemonsets",
"deployments", "deployments/scale", "deployments/rollback",
"replicasets", "replicasets/scale").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
rbacv1helpers.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
rbacv1helpers.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets",
rbacv1helpers.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets",
"deployments", "deployments/scale", "deployments/rollback", "ingresses",
"replicasets", "replicasets/scale", "replicationcontrollers/scale",
"networkpolicies").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
rbacv1helpers.NewRule(ReadWrite...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(),
rbacv1helpers.NewRule(ReadWrite...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(),
// additional admin powers
rbac.NewRule("create").Groups(authorizationGroup).Resources("localsubjectaccessreviews").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(rbacGroup).Resources("roles", "rolebindings").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("localsubjectaccessreviews").RuleOrDie(),
rbacv1helpers.NewRule(ReadWrite...).Groups(rbacGroup).Resources("roles", "rolebindings").RuleOrDie(),
},
},
{
@ -264,73 +266,75 @@ func ClusterRoles() []rbac.ClusterRole {
// It does not grant powers for "privileged" resources which are domain of the system: `/status`
// subresources or `quota`/`limits` which are used to control namespaces
ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-edit", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-edit": "true"}},
Rules: []rbac.PolicyRule{
rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule(ReadWrite...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(),
rbacv1helpers.NewRule(ReadWrite...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
"services", "services/proxy", "endpoints", "persistentvolumeclaims", "configmaps", "secrets").RuleOrDie(),
rbac.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
"pods/status", "resourcequotas/status", "namespaces/status", "replicationcontrollers/status", "pods/log").RuleOrDie(),
// read access to namespaces at the namespace scope means you can read *this* namespace. This can be used as an
// indicator of which namespaces you have access to.
rbac.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
rbac.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
rbacv1helpers.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(appsGroup).Resources("statefulsets",
rbacv1helpers.NewRule(ReadWrite...).Groups(appsGroup).Resources(
"statefulsets", "statefulsets/scale",
"daemonsets",
"deployments", "deployments/scale", "deployments/rollback",
"replicasets", "replicasets/scale").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
rbacv1helpers.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
rbacv1helpers.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets",
rbacv1helpers.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets",
"deployments", "deployments/scale", "deployments/rollback", "ingresses",
"replicasets", "replicasets/scale", "replicationcontrollers/scale",
"networkpolicies").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
rbacv1helpers.NewRule(ReadWrite...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
rbac.NewRule(ReadWrite...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(),
rbacv1helpers.NewRule(ReadWrite...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(),
},
},
{
// a role for namespace level viewing. It grants Read-only access to non-escalating resources in
// a namespace.
ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-view", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-view": "true"}},
Rules: []rbac.PolicyRule{
rbac.NewRule(Read...).Groups(legacyGroup).Resources("pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
"services", "endpoints", "persistentvolumeclaims", "configmaps").RuleOrDie(),
rbac.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
"pods/status", "resourcequotas/status", "namespaces/status", "replicationcontrollers/status", "pods/log").RuleOrDie(),
// read access to namespaces at the namespace scope means you can read *this* namespace. This can be used as an
// indicator of which namespaces you have access to.
rbac.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
rbac.NewRule(Read...).Groups(appsGroup).Resources("statefulsets",
rbacv1helpers.NewRule(Read...).Groups(appsGroup).Resources(
"statefulsets", "statefulsets/scale",
"daemonsets",
"deployments", "deployments/scale",
"replicasets", "replicasets/scale").RuleOrDie(),
rbac.NewRule(Read...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
rbac.NewRule(Read...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
rbac.NewRule(Read...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale",
rbacv1helpers.NewRule(Read...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale",
"ingresses", "replicasets", "replicasets/scale", "replicationcontrollers/scale",
"networkpolicies").RuleOrDie(),
rbac.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
rbac.NewRule(Read...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(),
},
},
{
// a role to use for heapster's connections back to the API server
ObjectMeta: metav1.ObjectMeta{Name: "system:heapster"},
Rules: []rbac.PolicyRule{
rbac.NewRule(Read...).Groups(legacyGroup).Resources("events", "pods", "nodes", "namespaces").RuleOrDie(),
rbac.NewRule(Read...).Groups(extensionsGroup).Resources("deployments").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("events", "pods", "nodes", "namespaces").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(extensionsGroup).Resources("deployments").RuleOrDie(),
},
},
{
@ -342,19 +346,19 @@ func ClusterRoles() []rbac.ClusterRole {
// a role to use for node-problem-detector access. It does not get bound to default location since
// deployment locations can reasonably vary.
ObjectMeta: metav1.ObjectMeta{Name: "system:node-problem-detector"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbac.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbacv1helpers.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
eventsRule(),
},
},
{
// a role to use for setting up a proxy
ObjectMeta: metav1.ObjectMeta{Name: "system:node-proxier"},
Rules: []rbac.PolicyRule{
Rules: []rbacv1.PolicyRule{
// Used to build serviceLister
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
rbac.NewRule("get").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
eventsRule(),
},
@ -362,142 +366,156 @@ func ClusterRoles() []rbac.ClusterRole {
{
// a role to use for full access to the kubelet API
ObjectMeta: metav1.ObjectMeta{Name: "system:kubelet-api-admin"},
Rules: []rbac.PolicyRule{
Rules: []rbacv1.PolicyRule{
// Allow read-only access to the Node API objects
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
// Allow all API calls to the nodes
rbac.NewRule("proxy").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbac.NewRule("*").Groups(legacyGroup).Resources("nodes/proxy", "nodes/metrics", "nodes/spec", "nodes/stats", "nodes/log").RuleOrDie(),
rbacv1helpers.NewRule("proxy").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbacv1helpers.NewRule("*").Groups(legacyGroup).Resources("nodes/proxy", "nodes/metrics", "nodes/spec", "nodes/stats", "nodes/log").RuleOrDie(),
},
},
{
// a role to use for bootstrapping a node's client certificates
ObjectMeta: metav1.ObjectMeta{Name: "system:node-bootstrapper"},
Rules: []rbac.PolicyRule{
Rules: []rbacv1.PolicyRule{
// used to create a certificatesigningrequest for a node-specific client certificate, and watch for it to be signed
rbac.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
},
},
{
// a role to use for allowing authentication and authorization delegation
ObjectMeta: metav1.ObjectMeta{Name: "system:auth-delegator"},
Rules: []rbac.PolicyRule{
Rules: []rbacv1.PolicyRule{
// These creates are non-mutating
rbac.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
rbac.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
},
},
{
// a role to use for the API registry, summarization, and proxy handling
ObjectMeta: metav1.ObjectMeta{Name: "system:kube-aggregator"},
Rules: []rbac.PolicyRule{
Rules: []rbacv1.PolicyRule{
// it needs to see all services so that it knows whether the ones it points to exist or not
rbac.NewRule(Read...).Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
},
},
{
// a role to use for bootstrapping the kube-controller-manager so it can create the shared informers
// service accounts, and secrets that we need to create separate identities for other controllers
ObjectMeta: metav1.ObjectMeta{Name: "system:kube-controller-manager"},
Rules: []rbac.PolicyRule{
Rules: []rbacv1.PolicyRule{
eventsRule(),
rbac.NewRule("create").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(),
rbac.NewRule("delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
rbac.NewRule("get").Groups(legacyGroup).Resources("endpoints", "namespaces", "secrets", "serviceaccounts").RuleOrDie(),
rbac.NewRule("update").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(),
rbacv1helpers.NewRule("delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("endpoints", "namespaces", "secrets", "serviceaccounts").RuleOrDie(),
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(),
// Needed to check API access. These creates are non-mutating
rbac.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
// Needed for all shared informers
rbac.NewRule("list", "watch").Groups("*").Resources("*").RuleOrDie(),
rbacv1helpers.NewRule("list", "watch").Groups("*").Resources("*").RuleOrDie(),
},
},
{
// a role to use for the kube-scheduler
ObjectMeta: metav1.ObjectMeta{Name: "system:kube-scheduler"},
Rules: []rbac.PolicyRule{
Rules: []rbacv1.PolicyRule{
eventsRule(),
// this is for leaderlease access
// TODO: scope this to the kube-system namespace
rbac.NewRule("create").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
rbac.NewRule("get", "update", "patch", "delete").Groups(legacyGroup).Resources("endpoints").Names("kube-scheduler").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
rbacv1helpers.NewRule("get", "update", "patch", "delete").Groups(legacyGroup).Resources("endpoints").Names("kube-scheduler").RuleOrDie(),
// fundamental resources
rbac.NewRule(Read...).Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbac.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbac.NewRule("create").Groups(legacyGroup).Resources("pods/binding", "bindings").RuleOrDie(),
rbac.NewRule("patch", "update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/binding", "bindings").RuleOrDie(),
rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
// things that select pods
rbac.NewRule(Read...).Groups(legacyGroup).Resources("services", "replicationcontrollers").RuleOrDie(),
rbac.NewRule(Read...).Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
rbac.NewRule(Read...).Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("services", "replicationcontrollers").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
// things that pods use or applies to them
rbac.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
rbac.NewRule(Read...).Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(),
},
},
{
// a role to use for the kube-dns pod
ObjectMeta: metav1.ObjectMeta{Name: "system:kube-dns"},
Rules: []rbac.PolicyRule{
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("endpoints", "services").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("endpoints", "services").RuleOrDie(),
},
},
{
// a role for an external/out-of-tree persistent volume provisioner
ObjectMeta: metav1.ObjectMeta{Name: "system:persistent-volume-provisioner"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch", "create", "delete").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch", "create", "delete").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
// update is needed in addition to read access for setting lock annotations on PVCs
rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
rbac.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
// Needed for watching provisioning success and failure events
rbac.NewRule("watch").Groups(legacyGroup).Resources("events").RuleOrDie(),
rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("events").RuleOrDie(),
eventsRule(),
},
},
{
// a role for the csi external provisioner
ObjectMeta: metav1.ObjectMeta{Name: "system:csi-external-provisioner"},
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("create", "delete", "get", "list", "watch").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
rbacv1helpers.NewRule("list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(),
},
},
{
// a role for the csi external attacher
ObjectMeta: metav1.ObjectMeta{Name: "system:csi-external-attacher"},
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch", "update", "patch").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "update", "patch").Groups(storageGroup).Resources("volumeattachments").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(),
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "system:aws-cloud-provider"},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
eventsRule(),
},
},
{
// a role making the csrapprover controller approve a node client CSR
ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:certificatesigningrequests:nodeclient"},
Rules: []rbac.PolicyRule{
rbac.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/nodeclient").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/nodeclient").RuleOrDie(),
},
},
{
// a role making the csrapprover controller approve a node client CSR requested by the node itself
ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:certificatesigningrequests:selfnodeclient"},
Rules: []rbac.PolicyRule{
rbac.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/selfnodeclient").RuleOrDie(),
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/selfnodeclient").RuleOrDie(),
},
},
}
if utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) {
roles = append(roles, rbac.ClusterRole{
// a role making the csrapprover controller approve a node server CSR requested by the node itself
ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:certificatesigningrequests:selfnodeserver"},
Rules: []rbac.PolicyRule{
rbac.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/selfnodeserver").RuleOrDie(),
},
})
}
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
roles = append(roles, rbac.ClusterRole{
rules := []rbacv1.PolicyRule{
rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
}
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
rules = append(rules, rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie())
}
roles = append(roles, rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: "system:volume-scheduler"},
Rules: []rbac.PolicyRule{
rbac.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
rbac.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
},
Rules: rules,
})
}
@ -508,27 +526,27 @@ func ClusterRoles() []rbac.ClusterRole {
const systemNodeRoleName = "system:node"
// ClusterRoleBindings return default rolebindings to the default roles
func ClusterRoleBindings() []rbac.ClusterRoleBinding {
rolebindings := []rbac.ClusterRoleBinding{
rbac.NewClusterBinding("cluster-admin").Groups(user.SystemPrivilegedGroup).BindingOrDie(),
rbac.NewClusterBinding("system:discovery").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(),
rbac.NewClusterBinding("system:basic-user").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(),
rbac.NewClusterBinding("system:node-proxier").Users(user.KubeProxy).BindingOrDie(),
rbac.NewClusterBinding("system:kube-controller-manager").Users(user.KubeControllerManager).BindingOrDie(),
rbac.NewClusterBinding("system:kube-dns").SAs("kube-system", "kube-dns").BindingOrDie(),
rbac.NewClusterBinding("system:kube-scheduler").Users(user.KubeScheduler).BindingOrDie(),
rbac.NewClusterBinding("system:aws-cloud-provider").SAs("kube-system", "aws-cloud-provider").BindingOrDie(),
func ClusterRoleBindings() []rbacv1.ClusterRoleBinding {
rolebindings := []rbacv1.ClusterRoleBinding{
rbacv1helpers.NewClusterBinding("cluster-admin").Groups(user.SystemPrivilegedGroup).BindingOrDie(),
rbacv1helpers.NewClusterBinding("system:discovery").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(),
rbacv1helpers.NewClusterBinding("system:basic-user").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(),
rbacv1helpers.NewClusterBinding("system:node-proxier").Users(user.KubeProxy).BindingOrDie(),
rbacv1helpers.NewClusterBinding("system:kube-controller-manager").Users(user.KubeControllerManager).BindingOrDie(),
rbacv1helpers.NewClusterBinding("system:kube-dns").SAs("kube-system", "kube-dns").BindingOrDie(),
rbacv1helpers.NewClusterBinding("system:kube-scheduler").Users(user.KubeScheduler).BindingOrDie(),
rbacv1helpers.NewClusterBinding("system:aws-cloud-provider").SAs("kube-system", "aws-cloud-provider").BindingOrDie(),
// This default binding of the system:node role to the system:nodes group is deprecated in 1.7 with the availability of the Node authorizer.
// This leaves the binding, but with an empty set of subjects, so that tightening reconciliation can remove the subject.
{
ObjectMeta: metav1.ObjectMeta{Name: systemNodeRoleName},
RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "ClusterRole", Name: systemNodeRoleName},
RoleRef: rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: systemNodeRoleName},
},
}
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
rolebindings = append(rolebindings, rbac.NewClusterBinding("system:volume-scheduler").Users(user.KubeScheduler).BindingOrDie())
rolebindings = append(rolebindings, rbacv1helpers.NewClusterBinding("system:volume-scheduler").Users(user.KubeScheduler).BindingOrDie())
}
addClusterRoleBindingLabel(rolebindings)

View File

@ -34,8 +34,8 @@ import (
"k8s.io/kubernetes/pkg/api/legacyscheme"
api "k8s.io/kubernetes/pkg/apis/core"
_ "k8s.io/kubernetes/pkg/apis/core/install"
"k8s.io/kubernetes/pkg/apis/rbac"
_ "k8s.io/kubernetes/pkg/apis/rbac/install"
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation"
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy"
)
@ -43,12 +43,12 @@ import (
// semanticRoles is a few enumerated roles for which the relationships are well established
// and we want to maintain symmetric roles
type semanticRoles struct {
admin *rbac.ClusterRole
edit *rbac.ClusterRole
view *rbac.ClusterRole
admin *rbacv1.ClusterRole
edit *rbacv1.ClusterRole
view *rbacv1.ClusterRole
}
func getSemanticRoles(roles []rbac.ClusterRole) semanticRoles {
func getSemanticRoles(roles []rbacv1.ClusterRole) semanticRoles {
ret := semanticRoles{}
for i := range roles {
role := roles[i]
@ -81,10 +81,10 @@ func TestCovers(t *testing.T) {
// additionalAdminPowers is the list of powers that we expect to be different than the editor role.
// one resource per rule to make the "does not already contain" check easy
var additionalAdminPowers = []rbac.PolicyRule{
rbac.NewRule("create").Groups("authorization.k8s.io").Resources("localsubjectaccessreviews").RuleOrDie(),
rbac.NewRule(bootstrappolicy.ReadWrite...).Groups("rbac.authorization.k8s.io").Resources("rolebindings").RuleOrDie(),
rbac.NewRule(bootstrappolicy.ReadWrite...).Groups("rbac.authorization.k8s.io").Resources("roles").RuleOrDie(),
var additionalAdminPowers = []rbacv1.PolicyRule{
rbacv1helpers.NewRule("create").Groups("authorization.k8s.io").Resources("localsubjectaccessreviews").RuleOrDie(),
rbacv1helpers.NewRule(bootstrappolicy.ReadWrite...).Groups("rbac.authorization.k8s.io").Resources("rolebindings").RuleOrDie(),
rbacv1helpers.NewRule(bootstrappolicy.ReadWrite...).Groups("rbac.authorization.k8s.io").Resources("roles").RuleOrDie(),
}
func TestAdminEditRelationship(t *testing.T) {
@ -92,7 +92,7 @@ func TestAdminEditRelationship(t *testing.T) {
// confirm that the edit role doesn't already have extra powers
for _, rule := range additionalAdminPowers {
if covers, _ := rbacregistryvalidation.Covers(semanticRoles.edit.Rules, []rbac.PolicyRule{rule}); covers {
if covers, _ := rbacregistryvalidation.Covers(semanticRoles.edit.Rules, []rbacv1.PolicyRule{rule}); covers {
t.Errorf("edit has extra powers: %#v", rule)
}
}
@ -109,19 +109,19 @@ func TestAdminEditRelationship(t *testing.T) {
// viewEscalatingNamespaceResources is the list of rules that would allow privilege escalation attacks based on
// ability to view (GET) them
var viewEscalatingNamespaceResources = []rbac.PolicyRule{
rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/attach").RuleOrDie(),
rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/proxy").RuleOrDie(),
rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/exec").RuleOrDie(),
rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/portforward").RuleOrDie(),
rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("secrets").RuleOrDie(),
rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("services/proxy").RuleOrDie(),
var viewEscalatingNamespaceResources = []rbacv1.PolicyRule{
rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/attach").RuleOrDie(),
rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/proxy").RuleOrDie(),
rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/exec").RuleOrDie(),
rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/portforward").RuleOrDie(),
rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("secrets").RuleOrDie(),
rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("services/proxy").RuleOrDie(),
}
// ungettableResources is the list of rules that don't allow to view (GET) them
// this is purposefully separate list to distinguish from escalating privs
var ungettableResources = []rbac.PolicyRule{
rbac.NewRule(bootstrappolicy.Read...).Groups("apps", "extensions").Resources("deployments/rollback").RuleOrDie(),
var ungettableResources = []rbacv1.PolicyRule{
rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("apps", "extensions").Resources("deployments/rollback").RuleOrDie(),
}
func TestEditViewRelationship(t *testing.T) {
@ -143,7 +143,7 @@ func TestEditViewRelationship(t *testing.T) {
// confirm that the view role doesn't already have extra powers
for _, rule := range viewEscalatingNamespaceResources {
if covers, _ := rbacregistryvalidation.Covers(semanticRoles.view.Rules, []rbac.PolicyRule{rule}); covers {
if covers, _ := rbacregistryvalidation.Covers(semanticRoles.view.Rules, []rbacv1.PolicyRule{rule}); covers {
t.Errorf("view has extra powers: %#v", rule)
}
}
@ -151,7 +151,7 @@ func TestEditViewRelationship(t *testing.T) {
// confirm that the view role doesn't have ungettable resources
for _, rule := range ungettableResources {
if covers, _ := rbacregistryvalidation.Covers(semanticRoles.view.Rules, []rbac.PolicyRule{rule}); covers {
if covers, _ := rbacregistryvalidation.Covers(semanticRoles.view.Rules, []rbacv1.PolicyRule{rule}); covers {
t.Errorf("view has ungettable resource: %#v", rule)
}
}

View File

@ -138,7 +138,6 @@ items:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects: null
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:

View File

@ -137,6 +137,7 @@ items:
- replicasets
- replicasets/scale
- statefulsets
- statefulsets/scale
verbs:
- create
- delete
@ -329,6 +330,7 @@ items:
- replicasets
- replicasets/scale
- statefulsets
- statefulsets/scale
verbs:
- create
- delete
@ -471,6 +473,7 @@ items:
- replicasets
- replicasets/scale
- statefulsets
- statefulsets/scale
verbs:
- get
- list
@ -619,6 +622,103 @@ items:
- certificatesigningrequests/selfnodeclient
verbs:
- create
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
creationTimestamp: null
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:csi-external-attacher
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- storage.k8s.io
resources:
- volumeattachments
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- get
- list
- patch
- update
- watch
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
creationTimestamp: null
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:csi-external-provisioner
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- get
- list
- patch
- update
- watch
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
@ -999,6 +1099,7 @@ items:
resources:
- pods/status
verbs:
- patch
- update
- apiGroups:
- ""
@ -1013,6 +1114,8 @@ items:
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
@ -1035,6 +1138,14 @@ items:
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims/status
verbs:
- get
- patch
- update
- apiGroups:
- storage.k8s.io
resources:

View File

@ -136,6 +136,23 @@ items:
- kind: ServiceAccount
name: endpoint-controller
namespace: kube-system
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
creationTimestamp: null
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:controller:expand-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:controller:expand-controller
subjects:
- kind: ServiceAccount
name: expand-controller
namespace: kube-system
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:

View File

@ -239,6 +239,7 @@ items:
verbs:
- create
- delete
- get
- list
- patch
- update
@ -424,6 +425,70 @@ items:
- create
- patch
- update
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
creationTimestamp: null
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:controller:expand-controller
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims/status
verbs:
- patch
- update
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- watch
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- endpoints
- services
verbs:
- get
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
- apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:

View File

@ -18,18 +18,18 @@ limitations under the License.
package rbac
import (
"bytes"
"fmt"
"github.com/golang/glog"
"bytes"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/kubernetes/pkg/apis/rbac"
rbaclisters "k8s.io/kubernetes/pkg/client/listers/rbac/internalversion"
rbaclisters "k8s.io/client-go/listers/rbac/v1"
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation"
)
@ -38,12 +38,12 @@ type RequestToRuleMapper interface {
// Any rule returned is still valid, since rules are deny by default. If you can pass with the rules
// supplied, you do not have to fail the request. If you cannot, you should indicate the error along
// with your denial.
RulesFor(subject user.Info, namespace string) ([]rbac.PolicyRule, error)
RulesFor(subject user.Info, namespace string) ([]rbacv1.PolicyRule, error)
// VisitRulesFor invokes visitor() with each rule that applies to a given user in a given namespace,
// and each error encountered resolving those rules. Rule may be nil if err is non-nil.
// If visitor() returns false, visiting is short-circuited.
VisitRulesFor(user user.Info, namespace string, visitor func(source fmt.Stringer, rule *rbac.PolicyRule, err error) bool)
VisitRulesFor(user user.Info, namespace string, visitor func(source fmt.Stringer, rule *rbacv1.PolicyRule, err error) bool)
}
type RBACAuthorizer struct {
@ -59,10 +59,10 @@ type authorizingVisitor struct {
errors []error
}
func (v *authorizingVisitor) visit(source fmt.Stringer, rule *rbac.PolicyRule, err error) bool {
func (v *authorizingVisitor) visit(source fmt.Stringer, rule *rbacv1.PolicyRule, err error) bool {
if rule != nil && RuleAllows(v.requestAttributes, rule) {
v.allowed = true
v.reason = fmt.Sprintf("allowed by %s", source.String())
v.reason = fmt.Sprintf("RBAC: allowed by %s", source.String())
return false
}
if err != nil {
@ -120,7 +120,7 @@ func (r *RBACAuthorizer) Authorize(requestAttributes authorizer.Attributes) (aut
reason := ""
if len(ruleCheckingVisitor.errors) > 0 {
reason = fmt.Sprintf("%v", utilerrors.NewAggregate(ruleCheckingVisitor.errors))
reason = fmt.Sprintf("RBAC: %v", utilerrors.NewAggregate(ruleCheckingVisitor.errors))
}
return authorizer.DecisionNoOpinion, reason, nil
}
@ -164,7 +164,7 @@ func New(roles rbacregistryvalidation.RoleGetter, roleBindings rbacregistryvalid
return authorizer
}
func RulesAllow(requestAttributes authorizer.Attributes, rules ...rbac.PolicyRule) bool {
func RulesAllow(requestAttributes authorizer.Attributes, rules ...rbacv1.PolicyRule) bool {
for i := range rules {
if RuleAllows(requestAttributes, &rules[i]) {
return true
@ -174,28 +174,28 @@ func RulesAllow(requestAttributes authorizer.Attributes, rules ...rbac.PolicyRul
return false
}
func RuleAllows(requestAttributes authorizer.Attributes, rule *rbac.PolicyRule) bool {
func RuleAllows(requestAttributes authorizer.Attributes, rule *rbacv1.PolicyRule) bool {
if requestAttributes.IsResourceRequest() {
combinedResource := requestAttributes.GetResource()
if len(requestAttributes.GetSubresource()) > 0 {
combinedResource = requestAttributes.GetResource() + "/" + requestAttributes.GetSubresource()
}
return rbac.VerbMatches(rule, requestAttributes.GetVerb()) &&
rbac.APIGroupMatches(rule, requestAttributes.GetAPIGroup()) &&
rbac.ResourceMatches(rule, combinedResource, requestAttributes.GetSubresource()) &&
rbac.ResourceNameMatches(rule, requestAttributes.GetName())
return rbacv1helpers.VerbMatches(rule, requestAttributes.GetVerb()) &&
rbacv1helpers.APIGroupMatches(rule, requestAttributes.GetAPIGroup()) &&
rbacv1helpers.ResourceMatches(rule, combinedResource, requestAttributes.GetSubresource()) &&
rbacv1helpers.ResourceNameMatches(rule, requestAttributes.GetName())
}
return rbac.VerbMatches(rule, requestAttributes.GetVerb()) &&
rbac.NonResourceURLMatches(rule, requestAttributes.GetPath())
return rbacv1helpers.VerbMatches(rule, requestAttributes.GetVerb()) &&
rbacv1helpers.NonResourceURLMatches(rule, requestAttributes.GetPath())
}
type RoleGetter struct {
Lister rbaclisters.RoleLister
}
func (g *RoleGetter) GetRole(namespace, name string) (*rbac.Role, error) {
func (g *RoleGetter) GetRole(namespace, name string) (*rbacv1.Role, error) {
return g.Lister.Roles(namespace).Get(name)
}
@ -203,7 +203,7 @@ type RoleBindingLister struct {
Lister rbaclisters.RoleBindingLister
}
func (l *RoleBindingLister) ListRoleBindings(namespace string) ([]*rbac.RoleBinding, error) {
func (l *RoleBindingLister) ListRoleBindings(namespace string) ([]*rbacv1.RoleBinding, error) {
return l.Lister.RoleBindings(namespace).List(labels.Everything())
}
@ -211,7 +211,7 @@ type ClusterRoleGetter struct {
Lister rbaclisters.ClusterRoleLister
}
func (g *ClusterRoleGetter) GetClusterRole(name string) (*rbac.ClusterRole, error) {
func (g *ClusterRoleGetter) GetClusterRole(name string) (*rbacv1.ClusterRole, error) {
return g.Lister.Get(name)
}
@ -219,6 +219,6 @@ type ClusterRoleBindingLister struct {
Lister rbaclisters.ClusterRoleBindingLister
}
func (l *ClusterRoleBindingLister) ListClusterRoleBindings() ([]*rbac.ClusterRoleBinding, error) {
func (l *ClusterRoleBindingLister) ListClusterRoleBindings() ([]*rbacv1.ClusterRoleBinding, error) {
return l.Lister.List(labels.Everything())
}

View File

@ -21,16 +21,17 @@ import (
"strings"
"testing"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/kubernetes/pkg/apis/rbac"
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation"
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy"
)
func newRule(verbs, apiGroups, resources, nonResourceURLs string) rbac.PolicyRule {
return rbac.PolicyRule{
func newRule(verbs, apiGroups, resources, nonResourceURLs string) rbacv1.PolicyRule {
return rbacv1.PolicyRule{
Verbs: strings.Split(verbs, ","),
APIGroups: strings.Split(apiGroups, ","),
Resources: strings.Split(resources, ","),
@ -38,12 +39,12 @@ func newRule(verbs, apiGroups, resources, nonResourceURLs string) rbac.PolicyRul
}
}
func newRole(name, namespace string, rules ...rbac.PolicyRule) *rbac.Role {
return &rbac.Role{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, Rules: rules}
func newRole(name, namespace string, rules ...rbacv1.PolicyRule) *rbacv1.Role {
return &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, Rules: rules}
}
func newClusterRole(name string, rules ...rbac.PolicyRule) *rbac.ClusterRole {
return &rbac.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: name}, Rules: rules}
func newClusterRole(name string, rules ...rbacv1.PolicyRule) *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: name}, Rules: rules}
}
const (
@ -51,26 +52,26 @@ const (
bindToClusterRole uint16 = 0x1
)
func newClusterRoleBinding(roleName string, subjects ...string) *rbac.ClusterRoleBinding {
r := &rbac.ClusterRoleBinding{
func newClusterRoleBinding(roleName string, subjects ...string) *rbacv1.ClusterRoleBinding {
r := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{},
RoleRef: rbac.RoleRef{
APIGroup: rbac.GroupName,
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole", // ClusterRoleBindings can only refer to ClusterRole
Name: roleName,
},
}
r.Subjects = make([]rbac.Subject, len(subjects))
r.Subjects = make([]rbacv1.Subject, len(subjects))
for i, subject := range subjects {
split := strings.SplitN(subject, ":", 2)
r.Subjects[i].Kind, r.Subjects[i].Name = split[0], split[1]
switch r.Subjects[i].Kind {
case rbac.ServiceAccountKind:
case rbacv1.ServiceAccountKind:
r.Subjects[i].APIGroup = ""
case rbac.UserKind, rbac.GroupKind:
r.Subjects[i].APIGroup = rbac.GroupName
case rbacv1.UserKind, rbacv1.GroupKind:
r.Subjects[i].APIGroup = rbacv1.GroupName
default:
panic(fmt.Errorf("invalid kind %s", r.Subjects[i].Kind))
}
@ -78,26 +79,26 @@ func newClusterRoleBinding(roleName string, subjects ...string) *rbac.ClusterRol
return r
}
func newRoleBinding(namespace, roleName string, bindType uint16, subjects ...string) *rbac.RoleBinding {
r := &rbac.RoleBinding{ObjectMeta: metav1.ObjectMeta{Namespace: namespace}}
func newRoleBinding(namespace, roleName string, bindType uint16, subjects ...string) *rbacv1.RoleBinding {
r := &rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Namespace: namespace}}
switch bindType {
case bindToRole:
r.RoleRef = rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "Role", Name: roleName}
r.RoleRef = rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: "Role", Name: roleName}
case bindToClusterRole:
r.RoleRef = rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "ClusterRole", Name: roleName}
r.RoleRef = rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: roleName}
}
r.Subjects = make([]rbac.Subject, len(subjects))
r.Subjects = make([]rbacv1.Subject, len(subjects))
for i, subject := range subjects {
split := strings.SplitN(subject, ":", 2)
r.Subjects[i].Kind, r.Subjects[i].Name = split[0], split[1]
switch r.Subjects[i].Kind {
case rbac.ServiceAccountKind:
case rbacv1.ServiceAccountKind:
r.Subjects[i].APIGroup = ""
case rbac.UserKind, rbac.GroupKind:
r.Subjects[i].APIGroup = rbac.GroupName
case rbacv1.UserKind, rbacv1.GroupKind:
r.Subjects[i].APIGroup = rbacv1.GroupName
default:
panic(fmt.Errorf("invalid kind %s", r.Subjects[i].Kind))
}
@ -136,19 +137,19 @@ func (d *defaultAttributes) GetPath() string { return "" }
func TestAuthorizer(t *testing.T) {
tests := []struct {
roles []*rbac.Role
roleBindings []*rbac.RoleBinding
clusterRoles []*rbac.ClusterRole
clusterRoleBindings []*rbac.ClusterRoleBinding
roles []*rbacv1.Role
roleBindings []*rbacv1.RoleBinding
clusterRoles []*rbacv1.ClusterRole
clusterRoleBindings []*rbacv1.ClusterRoleBinding
shouldPass []authorizer.Attributes
shouldFail []authorizer.Attributes
}{
{
clusterRoles: []*rbac.ClusterRole{
clusterRoles: []*rbacv1.ClusterRole{
newClusterRole("admin", newRule("*", "*", "*", "*")),
},
roleBindings: []*rbac.RoleBinding{
roleBindings: []*rbacv1.RoleBinding{
newRoleBinding("ns1", "admin", bindToClusterRole, "User:admin", "Group:admins"),
},
shouldPass: []authorizer.Attributes{
@ -167,12 +168,12 @@ func TestAuthorizer(t *testing.T) {
},
{
// Non-resource-url tests
clusterRoles: []*rbac.ClusterRole{
clusterRoles: []*rbacv1.ClusterRole{
newClusterRole("non-resource-url-getter", newRule("get", "", "", "/apis")),
newClusterRole("non-resource-url", newRule("*", "", "", "/apis")),
newClusterRole("non-resource-url-prefix", newRule("get", "", "", "/apis/*")),
},
clusterRoleBindings: []*rbac.ClusterRoleBinding{
clusterRoleBindings: []*rbacv1.ClusterRoleBinding{
newClusterRoleBinding("non-resource-url-getter", "User:foo", "Group:bar"),
newClusterRoleBinding("non-resource-url", "User:admin", "Group:admin"),
newClusterRoleBinding("non-resource-url-prefix", "User:prefixed", "Group:prefixed"),
@ -208,10 +209,10 @@ func TestAuthorizer(t *testing.T) {
},
{
// test subresource resolution
clusterRoles: []*rbac.ClusterRole{
clusterRoles: []*rbacv1.ClusterRole{
newClusterRole("admin", newRule("*", "*", "pods", "*")),
},
roleBindings: []*rbac.RoleBinding{
roleBindings: []*rbacv1.RoleBinding{
newRoleBinding("ns1", "admin", bindToClusterRole, "User:admin", "Group:admins"),
},
shouldPass: []authorizer.Attributes{
@ -223,13 +224,13 @@ func TestAuthorizer(t *testing.T) {
},
{
// test subresource resolution
clusterRoles: []*rbac.ClusterRole{
clusterRoles: []*rbacv1.ClusterRole{
newClusterRole("admin",
newRule("*", "*", "pods/status", "*"),
newRule("*", "*", "*/scale", "*"),
),
},
roleBindings: []*rbac.RoleBinding{
roleBindings: []*rbacv1.RoleBinding{
newRoleBinding("ns1", "admin", bindToClusterRole, "User:admin", "Group:admins"),
},
shouldPass: []authorizer.Attributes{
@ -263,13 +264,13 @@ func TestAuthorizer(t *testing.T) {
func TestRuleMatches(t *testing.T) {
tests := []struct {
name string
rule rbac.PolicyRule
rule rbacv1.PolicyRule
requestsToExpected map[authorizer.AttributesRecord]bool
}{
{
name: "star verb, exact match other",
rule: rbac.NewRule("*").Groups("group1").Resources("resource1").RuleOrDie(),
rule: rbacv1helpers.NewRule("*").Groups("group1").Resources("resource1").RuleOrDie(),
requestsToExpected: map[authorizer.AttributesRecord]bool{
resourceRequest("verb1").Group("group1").Resource("resource1").New(): true,
resourceRequest("verb1").Group("group2").Resource("resource1").New(): false,
@ -283,7 +284,7 @@ func TestRuleMatches(t *testing.T) {
},
{
name: "star group, exact match other",
rule: rbac.NewRule("verb1").Groups("*").Resources("resource1").RuleOrDie(),
rule: rbacv1helpers.NewRule("verb1").Groups("*").Resources("resource1").RuleOrDie(),
requestsToExpected: map[authorizer.AttributesRecord]bool{
resourceRequest("verb1").Group("group1").Resource("resource1").New(): true,
resourceRequest("verb1").Group("group2").Resource("resource1").New(): true,
@ -297,7 +298,7 @@ func TestRuleMatches(t *testing.T) {
},
{
name: "star resource, exact match other",
rule: rbac.NewRule("verb1").Groups("group1").Resources("*").RuleOrDie(),
rule: rbacv1helpers.NewRule("verb1").Groups("group1").Resources("*").RuleOrDie(),
requestsToExpected: map[authorizer.AttributesRecord]bool{
resourceRequest("verb1").Group("group1").Resource("resource1").New(): true,
resourceRequest("verb1").Group("group2").Resource("resource1").New(): false,
@ -311,7 +312,7 @@ func TestRuleMatches(t *testing.T) {
},
{
name: "tuple expansion",
rule: rbac.NewRule("verb1", "verb2").Groups("group1", "group2").Resources("resource1", "resource2").RuleOrDie(),
rule: rbacv1helpers.NewRule("verb1", "verb2").Groups("group1", "group2").Resources("resource1", "resource2").RuleOrDie(),
requestsToExpected: map[authorizer.AttributesRecord]bool{
resourceRequest("verb1").Group("group1").Resource("resource1").New(): true,
resourceRequest("verb1").Group("group2").Resource("resource1").New(): true,
@ -325,7 +326,7 @@ func TestRuleMatches(t *testing.T) {
},
{
name: "subresource expansion",
rule: rbac.NewRule("*").Groups("*").Resources("resource1/subresource1").RuleOrDie(),
rule: rbacv1helpers.NewRule("*").Groups("*").Resources("resource1/subresource1").RuleOrDie(),
requestsToExpected: map[authorizer.AttributesRecord]bool{
resourceRequest("verb1").Group("group1").Resource("resource1").Subresource("subresource1").New(): true,
resourceRequest("verb1").Group("group2").Resource("resource1").Subresource("subresource2").New(): false,
@ -339,7 +340,7 @@ func TestRuleMatches(t *testing.T) {
},
{
name: "star nonresource, exact match other",
rule: rbac.NewRule("verb1").URLs("*").RuleOrDie(),
rule: rbacv1helpers.NewRule("verb1").URLs("*").RuleOrDie(),
requestsToExpected: map[authorizer.AttributesRecord]bool{
nonresourceRequest("verb1").URL("/foo").New(): true,
nonresourceRequest("verb1").URL("/foo/bar").New(): true,
@ -355,7 +356,7 @@ func TestRuleMatches(t *testing.T) {
},
{
name: "star nonresource subpath",
rule: rbac.NewRule("verb1").URLs("/foo/*").RuleOrDie(),
rule: rbacv1helpers.NewRule("verb1").URLs("/foo/*").RuleOrDie(),
requestsToExpected: map[authorizer.AttributesRecord]bool{
nonresourceRequest("verb1").URL("/foo").New(): false,
nonresourceRequest("verb1").URL("/foo/bar").New(): true,
@ -371,7 +372,7 @@ func TestRuleMatches(t *testing.T) {
},
{
name: "star verb, exact nonresource",
rule: rbac.NewRule("*").URLs("/foo", "/foo/bar/one").RuleOrDie(),
rule: rbacv1helpers.NewRule("*").URLs("/foo", "/foo/bar/one").RuleOrDie(),
requestsToExpected: map[authorizer.AttributesRecord]bool{
nonresourceRequest("verb1").URL("/foo").New(): true,
nonresourceRequest("verb1").URL("/foo/bar").New(): false,
@ -441,19 +442,19 @@ func (r *requestAttributeBuilder) New() authorizer.AttributesRecord {
}
func BenchmarkAuthorize(b *testing.B) {
bootstrapRoles := []rbac.ClusterRole{}
bootstrapRoles := []rbacv1.ClusterRole{}
bootstrapRoles = append(bootstrapRoles, bootstrappolicy.ControllerRoles()...)
bootstrapRoles = append(bootstrapRoles, bootstrappolicy.ClusterRoles()...)
bootstrapBindings := []rbac.ClusterRoleBinding{}
bootstrapBindings := []rbacv1.ClusterRoleBinding{}
bootstrapBindings = append(bootstrapBindings, bootstrappolicy.ClusterRoleBindings()...)
bootstrapBindings = append(bootstrapBindings, bootstrappolicy.ControllerRoleBindings()...)
clusterRoles := []*rbac.ClusterRole{}
clusterRoles := []*rbacv1.ClusterRole{}
for i := range bootstrapRoles {
clusterRoles = append(clusterRoles, &bootstrapRoles[i])
}
clusterRoleBindings := []*rbac.ClusterRoleBinding{}
clusterRoleBindings := []*rbacv1.ClusterRoleBinding{}
for i := range bootstrapBindings {
clusterRoleBindings = append(clusterRoleBindings, &bootstrapBindings[i])
}

View File

@ -18,21 +18,21 @@ limitations under the License.
package rbac
import (
rbacv1 "k8s.io/api/rbac/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/kubernetes/pkg/apis/rbac"
rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation"
)
type RoleToRuleMapper interface {
// GetRoleReferenceRules attempts to resolve the role reference of a RoleBinding or ClusterRoleBinding. The passed namespace should be the namespace
// of the role binding, the empty string if a cluster role binding.
GetRoleReferenceRules(roleRef rbac.RoleRef, namespace string) ([]rbac.PolicyRule, error)
GetRoleReferenceRules(roleRef rbacv1.RoleRef, namespace string) ([]rbacv1.PolicyRule, error)
}
type SubjectLocator interface {
AllowedSubjects(attributes authorizer.Attributes) ([]rbac.Subject, error)
AllowedSubjects(attributes authorizer.Attributes) ([]rbacv1.Subject, error)
}
var _ = SubjectLocator(&SubjectAccessEvaluator{})
@ -59,10 +59,10 @@ func NewSubjectAccessEvaluator(roles rbacregistryvalidation.RoleGetter, roleBind
// AllowedSubjects returns the subjects that can perform an action and any errors encountered while computing the list.
// It is possible to have both subjects and errors returned if some rolebindings couldn't be resolved, but others could be.
func (r *SubjectAccessEvaluator) AllowedSubjects(requestAttributes authorizer.Attributes) ([]rbac.Subject, error) {
subjects := []rbac.Subject{{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup}}
func (r *SubjectAccessEvaluator) AllowedSubjects(requestAttributes authorizer.Attributes) ([]rbacv1.Subject, error) {
subjects := []rbacv1.Subject{{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup}}
if len(r.superUser) > 0 {
subjects = append(subjects, rbac.Subject{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: r.superUser})
subjects = append(subjects, rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: r.superUser})
}
errorlist := []error{}
@ -104,7 +104,7 @@ func (r *SubjectAccessEvaluator) AllowedSubjects(requestAttributes authorizer.At
}
}
dedupedSubjects := []rbac.Subject{}
dedupedSubjects := []rbacv1.Subject{}
for _, subject := range subjects {
found := false
for _, curr := range dedupedSubjects {

View File

@ -20,24 +20,24 @@ import (
"reflect"
"testing"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/kubernetes/pkg/apis/rbac"
rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation"
)
func TestSubjectLocator(t *testing.T) {
type actionToSubjects struct {
action authorizer.Attributes
subjects []rbac.Subject
subjects []rbacv1.Subject
}
tests := []struct {
name string
roles []*rbac.Role
roleBindings []*rbac.RoleBinding
clusterRoles []*rbac.ClusterRole
clusterRoleBindings []*rbac.ClusterRoleBinding
roles []*rbacv1.Role
roleBindings []*rbacv1.RoleBinding
clusterRoles []*rbacv1.ClusterRole
clusterRoleBindings []*rbacv1.ClusterRoleBinding
superUser string
@ -45,42 +45,42 @@ func TestSubjectLocator(t *testing.T) {
}{
{
name: "no super user, star matches star",
clusterRoles: []*rbac.ClusterRole{
clusterRoles: []*rbacv1.ClusterRole{
newClusterRole("admin", newRule("*", "*", "*", "*")),
},
clusterRoleBindings: []*rbac.ClusterRoleBinding{
clusterRoleBindings: []*rbacv1.ClusterRoleBinding{
newClusterRoleBinding("admin", "User:super-admin", "Group:super-admins"),
},
roleBindings: []*rbac.RoleBinding{
roleBindings: []*rbacv1.RoleBinding{
newRoleBinding("ns1", "admin", bindToClusterRole, "User:admin", "Group:admins"),
},
actionsToSubjects: []actionToSubjects{
{
&defaultAttributes{"", "", "get", "Pods", "", "ns1", ""},
[]rbac.Subject{
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup},
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"},
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"},
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "admin"},
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "admins"},
[]rbacv1.Subject{
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup},
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"},
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"},
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "admin"},
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "admins"},
},
},
{
// cluster role matches star in namespace
&defaultAttributes{"", "", "*", "Pods", "", "*", ""},
[]rbac.Subject{
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup},
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"},
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"},
[]rbacv1.Subject{
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup},
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"},
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"},
},
},
{
// empty ns
&defaultAttributes{"", "", "*", "Pods", "", "", ""},
[]rbac.Subject{
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup},
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"},
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"},
[]rbacv1.Subject{
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup},
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"},
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"},
},
},
},
@ -88,48 +88,48 @@ func TestSubjectLocator(t *testing.T) {
{
name: "super user, local roles work",
superUser: "foo",
clusterRoles: []*rbac.ClusterRole{
clusterRoles: []*rbacv1.ClusterRole{
newClusterRole("admin", newRule("*", "*", "*", "*")),
},
clusterRoleBindings: []*rbac.ClusterRoleBinding{
clusterRoleBindings: []*rbacv1.ClusterRoleBinding{
newClusterRoleBinding("admin", "User:super-admin", "Group:super-admins"),
},
roles: []*rbac.Role{
roles: []*rbacv1.Role{
newRole("admin", "ns1", newRule("get", "*", "Pods", "*")),
},
roleBindings: []*rbac.RoleBinding{
roleBindings: []*rbacv1.RoleBinding{
newRoleBinding("ns1", "admin", bindToRole, "User:admin", "Group:admins"),
},
actionsToSubjects: []actionToSubjects{
{
&defaultAttributes{"", "", "get", "Pods", "", "ns1", ""},
[]rbac.Subject{
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup},
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "foo"},
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"},
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"},
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "admin"},
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "admins"},
[]rbacv1.Subject{
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup},
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "foo"},
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"},
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"},
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "admin"},
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "admins"},
},
},
{
// verb matchies correctly
&defaultAttributes{"", "", "create", "Pods", "", "ns1", ""},
[]rbac.Subject{
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup},
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "foo"},
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"},
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"},
[]rbacv1.Subject{
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup},
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "foo"},
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"},
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"},
},
},
{
// binding only works in correct ns
&defaultAttributes{"", "", "get", "Pods", "", "ns2", ""},
[]rbac.Subject{
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup},
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "foo"},
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"},
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"},
[]rbacv1.Subject{
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup},
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "foo"},
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"},
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"},
},
},
},