mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
5
vendor/k8s.io/kubernetes/plugin/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/plugin/BUILD
generated
vendored
@ -21,13 +21,10 @@ filegroup(
|
||||
"//plugin/pkg/admission/extendedresourcetoleration:all-srcs",
|
||||
"//plugin/pkg/admission/gc:all-srcs",
|
||||
"//plugin/pkg/admission/imagepolicy:all-srcs",
|
||||
"//plugin/pkg/admission/initialresources:all-srcs",
|
||||
"//plugin/pkg/admission/limitranger:all-srcs",
|
||||
"//plugin/pkg/admission/namespace/autoprovision:all-srcs",
|
||||
"//plugin/pkg/admission/namespace/exists:all-srcs",
|
||||
"//plugin/pkg/admission/noderestriction:all-srcs",
|
||||
"//plugin/pkg/admission/persistentvolume/label:all-srcs",
|
||||
"//plugin/pkg/admission/persistentvolume/resize:all-srcs",
|
||||
"//plugin/pkg/admission/podnodeselector:all-srcs",
|
||||
"//plugin/pkg/admission/podpreset:all-srcs",
|
||||
"//plugin/pkg/admission/podtolerationrestriction:all-srcs",
|
||||
@ -36,6 +33,8 @@ filegroup(
|
||||
"//plugin/pkg/admission/security:all-srcs",
|
||||
"//plugin/pkg/admission/securitycontext/scdeny:all-srcs",
|
||||
"//plugin/pkg/admission/serviceaccount:all-srcs",
|
||||
"//plugin/pkg/admission/storage/persistentvolume/label:all-srcs",
|
||||
"//plugin/pkg/admission/storage/persistentvolume/resize:all-srcs",
|
||||
"//plugin/pkg/admission/storage/storageclass/setdefault:all-srcs",
|
||||
"//plugin/pkg/admission/storage/storageobjectinuseprotection:all-srcs",
|
||||
"//plugin/pkg/auth:all-srcs",
|
||||
|
1
vendor/k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds/BUILD
generated
vendored
@ -24,7 +24,6 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
|
35
vendor/k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds/admission.go
generated
vendored
35
vendor/k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds/admission.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
@ -39,6 +38,20 @@ var (
|
||||
defaultUnreachableTolerationSeconds = flag.Int64("default-unreachable-toleration-seconds", 300,
|
||||
"Indicates the tolerationSeconds of the toleration for unreachable:NoExecute"+
|
||||
" that is added by default to every pod that does not already have such a toleration.")
|
||||
|
||||
notReadyToleration = api.Toleration{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: defaultNotReadyTolerationSeconds,
|
||||
}
|
||||
|
||||
unreachableToleration = api.Toleration{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: defaultUnreachableTolerationSeconds,
|
||||
}
|
||||
)
|
||||
|
||||
// Register registers a plugin
|
||||
@ -99,27 +112,13 @@ func (p *Plugin) Admit(attributes admission.Attributes) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// no change is required, return immediately
|
||||
if toleratesNodeNotReady && toleratesNodeUnreachable {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !toleratesNodeNotReady {
|
||||
helper.AddOrUpdateTolerationInPod(pod, &api.Toleration{
|
||||
Key: algorithm.TaintNodeNotReady,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: defaultNotReadyTolerationSeconds,
|
||||
})
|
||||
pod.Spec.Tolerations = append(pod.Spec.Tolerations, notReadyToleration)
|
||||
}
|
||||
|
||||
if !toleratesNodeUnreachable {
|
||||
helper.AddOrUpdateTolerationInPod(pod, &api.Toleration{
|
||||
Key: algorithm.TaintNodeUnreachable,
|
||||
Operator: api.TolerationOpExists,
|
||||
Effect: api.TaintEffectNoExecute,
|
||||
TolerationSeconds: defaultUnreachableTolerationSeconds,
|
||||
})
|
||||
pod.Spec.Tolerations = append(pod.Spec.Tolerations, unreachableToleration)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/BUILD
generated
vendored
@ -45,8 +45,6 @@ go_library(
|
||||
"//plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation:go_default_library",
|
||||
"//vendor/github.com/hashicorp/golang-lru:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
|
@ -12,9 +12,8 @@ go_library(
|
||||
deps = [
|
||||
"//plugin/pkg/admission/eventratelimit/apis/eventratelimit:go_default_library",
|
||||
"//plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -19,25 +19,15 @@ limitations under the License.
|
||||
package install
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/apimachinery/announced"
|
||||
"k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
internalapi "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit"
|
||||
versionedapi "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1"
|
||||
)
|
||||
|
||||
// Install registers the API group and adds types to a scheme
|
||||
func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) {
|
||||
if err := announced.NewGroupMetaFactory(
|
||||
&announced.GroupMetaFactoryArgs{
|
||||
GroupName: internalapi.GroupName,
|
||||
VersionPreferenceOrder: []string{versionedapi.SchemeGroupVersion.Version},
|
||||
AddInternalObjectsToScheme: internalapi.AddToScheme,
|
||||
},
|
||||
announced.VersionToSchemeFunc{
|
||||
versionedapi.SchemeGroupVersion.Version: versionedapi.AddToScheme,
|
||||
},
|
||||
).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
func Install(scheme *runtime.Scheme) {
|
||||
utilruntime.Must(internalapi.AddToScheme(scheme))
|
||||
utilruntime.Must(versionedapi.AddToScheme(scheme))
|
||||
utilruntime.Must(scheme.SetVersionPriority(versionedapi.SchemeGroupVersion))
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
11
vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/config.go
generated
vendored
11
vendor/k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/config.go
generated
vendored
@ -20,10 +20,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apimachinery/announced"
|
||||
"k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
eventratelimitapi "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit"
|
||||
@ -32,14 +29,12 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
groupFactoryRegistry = make(announced.APIGroupFactoryRegistry)
|
||||
registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS"))
|
||||
scheme = runtime.NewScheme()
|
||||
codecs = serializer.NewCodecFactory(scheme)
|
||||
scheme = runtime.NewScheme()
|
||||
codecs = serializer.NewCodecFactory(scheme)
|
||||
)
|
||||
|
||||
func init() {
|
||||
install.Install(groupFactoryRegistry, registry, scheme)
|
||||
install.Install(scheme)
|
||||
}
|
||||
|
||||
// LoadConfiguration loads the provided configuration.
|
||||
|
1
vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/BUILD
generated
vendored
@ -30,6 +30,7 @@ go_test(
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
|
43
vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/gc_admission.go
generated
vendored
43
vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/gc_admission.go
generated
vendored
@ -95,21 +95,26 @@ func (a *gcPermissionsEnforcement) Validate(attributes admission.Attributes) (er
|
||||
return nil
|
||||
}
|
||||
|
||||
deleteAttributes := authorizer.AttributesRecord{
|
||||
User: attributes.GetUserInfo(),
|
||||
Verb: "delete",
|
||||
Namespace: attributes.GetNamespace(),
|
||||
APIGroup: attributes.GetResource().Group,
|
||||
APIVersion: attributes.GetResource().Version,
|
||||
Resource: attributes.GetResource().Resource,
|
||||
Subresource: attributes.GetSubresource(),
|
||||
Name: attributes.GetName(),
|
||||
ResourceRequest: true,
|
||||
Path: "",
|
||||
}
|
||||
decision, reason, err := a.authorizer.Authorize(deleteAttributes)
|
||||
if decision != authorizer.DecisionAllow {
|
||||
return admission.NewForbidden(attributes, fmt.Errorf("cannot set an ownerRef on a resource you can't delete: %v, %v", reason, err))
|
||||
// if you are creating a thing, you should always be allowed to set an owner ref since you logically had the power
|
||||
// to never create it. We still need to check block owner deletion below, because the power to delete does not
|
||||
// imply the power to prevent deletion on other resources.
|
||||
if attributes.GetOperation() != admission.Create {
|
||||
deleteAttributes := authorizer.AttributesRecord{
|
||||
User: attributes.GetUserInfo(),
|
||||
Verb: "delete",
|
||||
Namespace: attributes.GetNamespace(),
|
||||
APIGroup: attributes.GetResource().Group,
|
||||
APIVersion: attributes.GetResource().Version,
|
||||
Resource: attributes.GetResource().Resource,
|
||||
Subresource: attributes.GetSubresource(),
|
||||
Name: attributes.GetName(),
|
||||
ResourceRequest: true,
|
||||
Path: "",
|
||||
}
|
||||
decision, reason, err := a.authorizer.Authorize(deleteAttributes)
|
||||
if decision != authorizer.DecisionAllow {
|
||||
return admission.NewForbidden(attributes, fmt.Errorf("cannot set an ownerRef on a resource you can't delete: %v, %v", reason, err))
|
||||
}
|
||||
}
|
||||
|
||||
// Further check if the user is setting ownerReference.blockOwnerDeletion to
|
||||
@ -119,7 +124,7 @@ func (a *gcPermissionsEnforcement) Validate(attributes admission.Attributes) (er
|
||||
for _, ref := range newBlockingRefs {
|
||||
records, err := a.ownerRefToDeleteAttributeRecords(ref, attributes)
|
||||
if err != nil {
|
||||
return admission.NewForbidden(attributes, fmt.Errorf("cannot set blockOwnerDeletion in this case because cannot find RESTMapping for APIVersion %s Kind %s: %v, %v", ref.APIVersion, ref.Kind, reason, err))
|
||||
return admission.NewForbidden(attributes, fmt.Errorf("cannot set blockOwnerDeletion in this case because cannot find RESTMapping for APIVersion %s Kind %s: %v", ref.APIVersion, ref.Kind, err))
|
||||
}
|
||||
// Multiple records are returned if ref.Kind could map to multiple
|
||||
// resources. User needs to have delete permission on all the
|
||||
@ -186,9 +191,9 @@ func (a *gcPermissionsEnforcement) ownerRefToDeleteAttributeRecords(ref metav1.O
|
||||
Verb: "update",
|
||||
// ownerReference can only refer to an object in the same namespace, so attributes.GetNamespace() equals to the owner's namespace
|
||||
Namespace: attributes.GetNamespace(),
|
||||
APIGroup: groupVersion.Group,
|
||||
APIVersion: groupVersion.Version,
|
||||
Resource: mapping.Resource,
|
||||
APIGroup: mapping.Resource.Group,
|
||||
APIVersion: mapping.Resource.Version,
|
||||
Resource: mapping.Resource.Resource,
|
||||
Subresource: "finalizers",
|
||||
Name: ref.Name,
|
||||
ResourceRequest: true,
|
||||
|
46
vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/gc_admission_test.go
generated
vendored
46
vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/gc_admission_test.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@ -87,7 +88,7 @@ func newGCPermissionsEnforcement() (*gcPermissionsEnforcement, error) {
|
||||
}
|
||||
|
||||
genericPluginInitializer := initializer.New(nil, nil, fakeAuthorizer{}, nil)
|
||||
pluginInitializer := kubeadmission.NewPluginInitializer(nil, nil, nil, legacyscheme.Registry.RESTMapper(), nil)
|
||||
pluginInitializer := kubeadmission.NewPluginInitializer(nil, nil, nil, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme), nil)
|
||||
initializersChain := admission.PluginInitializers{}
|
||||
initializersChain = append(initializersChain, genericPluginInitializer)
|
||||
initializersChain = append(initializersChain, pluginInitializer)
|
||||
@ -101,6 +102,9 @@ func TestGCAdmission(t *testing.T) {
|
||||
return err == nil
|
||||
}
|
||||
expectCantSetOwnerRefError := func(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
return strings.Contains(err.Error(), "cannot set an ownerRef on a resource you can't delete")
|
||||
}
|
||||
tests := []struct {
|
||||
@ -139,7 +143,7 @@ func TestGCAdmission(t *testing.T) {
|
||||
username: "non-deleter",
|
||||
resource: api.SchemeGroupVersion.WithResource("pods"),
|
||||
newObj: &api.Pod{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
checkError: expectCantSetOwnerRefError,
|
||||
checkError: expectNoError,
|
||||
},
|
||||
{
|
||||
name: "non-pod-deleter, create, no objectref change",
|
||||
@ -153,7 +157,7 @@ func TestGCAdmission(t *testing.T) {
|
||||
username: "non-pod-deleter",
|
||||
resource: api.SchemeGroupVersion.WithResource("pods"),
|
||||
newObj: &api.Pod{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
checkError: expectCantSetOwnerRefError,
|
||||
checkError: expectNoError,
|
||||
},
|
||||
{
|
||||
name: "non-pod-deleter, create, objectref change, but not a pod",
|
||||
@ -253,32 +257,34 @@ func TestGCAdmission(t *testing.T) {
|
||||
checkError: expectNoError,
|
||||
},
|
||||
}
|
||||
gcAdmit, err := newGCPermissionsEnforcement()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
operation := admission.Create
|
||||
if tc.oldObj != nil {
|
||||
operation = admission.Update
|
||||
}
|
||||
user := &user.DefaultInfo{Name: tc.username}
|
||||
attributes := admission.NewAttributesRecord(tc.newObj, tc.oldObj, schema.GroupVersionKind{}, metav1.NamespaceDefault, "foo", tc.resource, tc.subresource, operation, user)
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gcAdmit, err := newGCPermissionsEnforcement()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
err := gcAdmit.Validate(attributes)
|
||||
if !tc.checkError(err) {
|
||||
t.Errorf("%v: unexpected err: %v", tc.name, err)
|
||||
}
|
||||
operation := admission.Create
|
||||
if tc.oldObj != nil {
|
||||
operation = admission.Update
|
||||
}
|
||||
user := &user.DefaultInfo{Name: tc.username}
|
||||
attributes := admission.NewAttributesRecord(tc.newObj, tc.oldObj, schema.GroupVersionKind{}, metav1.NamespaceDefault, "foo", tc.resource, tc.subresource, operation, user)
|
||||
|
||||
err = gcAdmit.Validate(attributes)
|
||||
if !tc.checkError(err) {
|
||||
t.Errorf("unexpected err: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockOwnerDeletionAdmission(t *testing.T) {
|
||||
podWithOwnerRefs := func(refs ...metav1.OwnerReference) *api.Pod {
|
||||
var refSlice []metav1.OwnerReference
|
||||
for _, ref := range refs {
|
||||
refSlice = append(refSlice, ref)
|
||||
}
|
||||
refSlice = append(refSlice, refs...)
|
||||
|
||||
return &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
OwnerReferences: refSlice,
|
||||
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/admission.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/admission.go
generated
vendored
@ -239,7 +239,7 @@ func NewImagePolicyWebhook(configFile io.Reader) (*Plugin, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gw, err := webhook.NewGenericWebhook(legacyscheme.Registry, legacyscheme.Codecs, whConfig.KubeConfigFile, groupVersions, whConfig.RetryBackoff)
|
||||
gw, err := webhook.NewGenericWebhook(legacyscheme.Scheme, legacyscheme.Codecs, whConfig.KubeConfigFile, groupVersions, whConfig.RetryBackoff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/gencerts.sh
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/gencerts.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
|
70
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/BUILD
generated
vendored
70
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/BUILD
generated
vendored
@ -1,70 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"admission.go",
|
||||
"data_source.go",
|
||||
"gcm.go",
|
||||
"hawkular.go",
|
||||
"influxdb.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/plugin/pkg/admission/initialresources",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//vendor/cloud.google.com/go/compute/metadata:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/hawkular/hawkular-client-go/metrics:go_default_library",
|
||||
"//vendor/github.com/influxdata/influxdb/client:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2/google:go_default_library",
|
||||
"//vendor/google.golang.org/api/cloudmonitoring/v2beta2:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"admission_test.go",
|
||||
"data_source_test.go",
|
||||
"gcm_test.go",
|
||||
"hawkular_test.go",
|
||||
"influxdb_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2/google:go_default_library",
|
||||
"//vendor/google.golang.org/api/cloudmonitoring/v2beta2:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
220
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/admission.go
generated
vendored
220
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/admission.go
generated
vendored
@ -1,220 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package initialresources
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
var (
|
||||
source = flag.String("ir-data-source", "influxdb", "Data source used by InitialResources. Supported options: influxdb, gcm.")
|
||||
percentile = flag.Int64("ir-percentile", 90, "Which percentile of samples should InitialResources use when estimating resources. For experiment purposes.")
|
||||
nsOnly = flag.Bool("ir-namespace-only", false, "Whether the estimation should be made only based on data from the same namespace.")
|
||||
)
|
||||
|
||||
const (
|
||||
initialResourcesAnnotation = "kubernetes.io/initial-resources"
|
||||
samplesThreshold = 30
|
||||
week = 7 * 24 * time.Hour
|
||||
month = 30 * 24 * time.Hour
|
||||
// PluginName indicates name of admission plugin.
|
||||
PluginName = "InitialResources"
|
||||
)
|
||||
|
||||
// Register registers a plugin
|
||||
// WARNING: this feature is experimental and will definitely change.
|
||||
func Register(plugins *admission.Plugins) {
|
||||
plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) {
|
||||
// TODO: remove the usage of flags in favor of reading versioned configuration
|
||||
s, err := newDataSource(*source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newInitialResources(s, *percentile, *nsOnly), nil
|
||||
})
|
||||
}
|
||||
|
||||
type InitialResources struct {
|
||||
*admission.Handler
|
||||
source dataSource
|
||||
percentile int64
|
||||
nsOnly bool
|
||||
}
|
||||
|
||||
var _ admission.MutationInterface = &InitialResources{}
|
||||
|
||||
func newInitialResources(source dataSource, percentile int64, nsOnly bool) *InitialResources {
|
||||
return &InitialResources{
|
||||
Handler: admission.NewHandler(admission.Create),
|
||||
source: source,
|
||||
percentile: percentile,
|
||||
nsOnly: nsOnly,
|
||||
}
|
||||
}
|
||||
|
||||
// Admit makes an admission decision based on the request attributes
|
||||
func (ir InitialResources) Admit(a admission.Attributes) (err error) {
|
||||
// Ignore all calls to subresources or resources other than pods.
|
||||
if a.GetSubresource() != "" || a.GetResource().GroupResource() != api.Resource("pods") {
|
||||
return nil
|
||||
}
|
||||
pod, ok := a.GetObject().(*api.Pod)
|
||||
if !ok {
|
||||
return apierrors.NewBadRequest("Resource was marked with kind Pod but was unable to be converted")
|
||||
}
|
||||
|
||||
ir.estimateAndFillResourcesIfNotSet(pod)
|
||||
return nil
|
||||
}
|
||||
|
||||
// The method veryfies whether resources should be set for the given pod and
|
||||
// if there is estimation available the method fills Request field.
|
||||
func (ir InitialResources) estimateAndFillResourcesIfNotSet(pod *api.Pod) {
|
||||
var annotations []string
|
||||
for i := range pod.Spec.InitContainers {
|
||||
annotations = append(annotations, ir.estimateContainer(pod, &pod.Spec.InitContainers[i], "init container")...)
|
||||
}
|
||||
for i := range pod.Spec.Containers {
|
||||
annotations = append(annotations, ir.estimateContainer(pod, &pod.Spec.Containers[i], "container")...)
|
||||
}
|
||||
if len(annotations) > 0 {
|
||||
if pod.ObjectMeta.Annotations == nil {
|
||||
pod.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
val := "Initial Resources plugin set: " + strings.Join(annotations, "; ")
|
||||
pod.ObjectMeta.Annotations[initialResourcesAnnotation] = val
|
||||
}
|
||||
}
|
||||
|
||||
func (ir InitialResources) estimateContainer(pod *api.Pod, c *api.Container, message string) []string {
|
||||
var annotations []string
|
||||
req := c.Resources.Requests
|
||||
cpu := ir.getEstimationIfNeeded(api.ResourceCPU, c, pod.ObjectMeta.Namespace)
|
||||
mem := ir.getEstimationIfNeeded(api.ResourceMemory, c, pod.ObjectMeta.Namespace)
|
||||
// If Requests doesn't exits and an estimation was made, create Requests.
|
||||
if req == nil && (cpu != nil || mem != nil) {
|
||||
c.Resources.Requests = api.ResourceList{}
|
||||
req = c.Resources.Requests
|
||||
}
|
||||
setRes := []string{}
|
||||
if cpu != nil {
|
||||
glog.Infof("CPU estimation for %s %v in pod %v/%v is %v", message, c.Name, pod.ObjectMeta.Namespace, pod.ObjectMeta.Name, cpu.String())
|
||||
setRes = append(setRes, string(api.ResourceCPU))
|
||||
req[api.ResourceCPU] = *cpu
|
||||
}
|
||||
if mem != nil {
|
||||
glog.Infof("Memory estimation for %s %v in pod %v/%v is %v", message, c.Name, pod.ObjectMeta.Namespace, pod.ObjectMeta.Name, mem.String())
|
||||
setRes = append(setRes, string(api.ResourceMemory))
|
||||
req[api.ResourceMemory] = *mem
|
||||
}
|
||||
if len(setRes) > 0 {
|
||||
sort.Strings(setRes)
|
||||
a := strings.Join(setRes, ", ") + fmt.Sprintf(" request for %s %s", message, c.Name)
|
||||
annotations = append(annotations, a)
|
||||
}
|
||||
return annotations
|
||||
}
|
||||
|
||||
// getEstimationIfNeeded estimates compute resource for container if its corresponding
|
||||
// Request(min amount) and Limit(max amount) both are not specified.
|
||||
func (ir InitialResources) getEstimationIfNeeded(kind api.ResourceName, c *api.Container, ns string) *resource.Quantity {
|
||||
requests := c.Resources.Requests
|
||||
limits := c.Resources.Limits
|
||||
var quantity *resource.Quantity
|
||||
var err error
|
||||
if _, requestFound := requests[kind]; !requestFound {
|
||||
if _, limitFound := limits[kind]; !limitFound {
|
||||
quantity, err = ir.getEstimation(kind, c, ns)
|
||||
if err != nil {
|
||||
glog.Errorf("Error while trying to estimate resources: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return quantity
|
||||
}
|
||||
func (ir InitialResources) getEstimation(kind api.ResourceName, c *api.Container, ns string) (*resource.Quantity, error) {
|
||||
end := time.Now()
|
||||
start := end.Add(-week)
|
||||
var usage, samples int64
|
||||
var err error
|
||||
|
||||
// Historical data from last 7 days for the same image:tag within the same namespace.
|
||||
if usage, samples, err = ir.source.GetUsagePercentile(kind, ir.percentile, c.Image, ns, true, start, end); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if samples < samplesThreshold {
|
||||
// Historical data from last 30 days for the same image:tag within the same namespace.
|
||||
start := end.Add(-month)
|
||||
if usage, samples, err = ir.source.GetUsagePercentile(kind, ir.percentile, c.Image, ns, true, start, end); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// If we are allowed to estimate only based on data from the same namespace.
|
||||
if ir.nsOnly {
|
||||
if samples < samplesThreshold {
|
||||
// Historical data from last 30 days for the same image within the same namespace.
|
||||
start := end.Add(-month)
|
||||
image := strings.Split(c.Image, ":")[0]
|
||||
if usage, samples, err = ir.source.GetUsagePercentile(kind, ir.percentile, image, ns, false, start, end); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if samples < samplesThreshold {
|
||||
// Historical data from last 7 days for the same image:tag within all namespaces.
|
||||
start := end.Add(-week)
|
||||
if usage, samples, err = ir.source.GetUsagePercentile(kind, ir.percentile, c.Image, "", true, start, end); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if samples < samplesThreshold {
|
||||
// Historical data from last 30 days for the same image:tag within all namespaces.
|
||||
start := end.Add(-month)
|
||||
if usage, samples, err = ir.source.GetUsagePercentile(kind, ir.percentile, c.Image, "", true, start, end); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if samples < samplesThreshold {
|
||||
// Historical data from last 30 days for the same image within all namespaces.
|
||||
start := end.Add(-month)
|
||||
image := strings.Split(c.Image, ":")[0]
|
||||
if usage, samples, err = ir.source.GetUsagePercentile(kind, ir.percentile, image, "", false, start, end); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if samples > 0 && kind == api.ResourceCPU {
|
||||
return resource.NewMilliQuantity(usage, resource.DecimalSI), nil
|
||||
}
|
||||
if samples > 0 && kind == api.ResourceMemory {
|
||||
return resource.NewQuantity(usage, resource.DecimalSI), nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
300
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/admission_test.go
generated
vendored
300
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/admission_test.go
generated
vendored
@ -1,300 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package initialresources
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
type fakeSource struct {
|
||||
f func(kind api.ResourceName, perc int64, image, namespace string, exactMatch bool, start, end time.Time) (int64, int64, error)
|
||||
}
|
||||
|
||||
func (s *fakeSource) GetUsagePercentile(kind api.ResourceName, perc int64, image, namespace string, exactMatch bool, start, end time.Time) (usage int64, samples int64, err error) {
|
||||
return s.f(kind, perc, image, namespace, exactMatch, start, end)
|
||||
}
|
||||
|
||||
func parseReq(cpu, mem string) api.ResourceList {
|
||||
if cpu == "" && mem == "" {
|
||||
return nil
|
||||
}
|
||||
req := api.ResourceList{}
|
||||
if cpu != "" {
|
||||
req[api.ResourceCPU] = resource.MustParse(cpu)
|
||||
}
|
||||
if mem != "" {
|
||||
req[api.ResourceMemory] = resource.MustParse(mem)
|
||||
}
|
||||
return req
|
||||
}
|
||||
|
||||
func addContainer(pod *api.Pod, name, image string, request api.ResourceList) {
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, api.Container{
|
||||
Name: name,
|
||||
Image: image,
|
||||
Resources: api.ResourceRequirements{Requests: request},
|
||||
})
|
||||
}
|
||||
|
||||
func createPod(name string, image string, request api.ResourceList) *api.Pod {
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test-ns"},
|
||||
Spec: api.PodSpec{},
|
||||
}
|
||||
pod.Spec.Containers = []api.Container{}
|
||||
addContainer(pod, "i0", image, request)
|
||||
pod.Spec.InitContainers = pod.Spec.Containers
|
||||
pod.Spec.Containers = []api.Container{}
|
||||
addContainer(pod, "c0", image, request)
|
||||
return pod
|
||||
}
|
||||
|
||||
func getPods() []*api.Pod {
|
||||
return []*api.Pod{
|
||||
createPod("p0", "image:v0", parseReq("", "")),
|
||||
createPod("p1", "image:v1", parseReq("", "300")),
|
||||
createPod("p2", "image:v2", parseReq("300m", "")),
|
||||
createPod("p3", "image:v3", parseReq("300m", "300")),
|
||||
}
|
||||
}
|
||||
|
||||
func verifyContainer(t *testing.T, c *api.Container, cpu, mem int64) {
|
||||
req := c.Resources.Requests
|
||||
if req.Cpu().MilliValue() != cpu {
|
||||
t.Errorf("Wrong CPU request for container %v. Expected %v, got %v.", c.Name, cpu, req.Cpu().MilliValue())
|
||||
}
|
||||
if req.Memory().Value() != mem {
|
||||
t.Errorf("Wrong memory request for container %v. Expected %v, got %v.", c.Name, mem, req.Memory().Value())
|
||||
}
|
||||
}
|
||||
|
||||
func verifyPod(t *testing.T, pod *api.Pod, cpu, mem int64) {
|
||||
verifyContainer(t, &pod.Spec.Containers[0], cpu, mem)
|
||||
verifyContainer(t, &pod.Spec.InitContainers[0], cpu, mem)
|
||||
}
|
||||
|
||||
func verifyAnnotation(t *testing.T, pod *api.Pod, expected string) {
|
||||
a, ok := pod.ObjectMeta.Annotations[initialResourcesAnnotation]
|
||||
if !ok {
|
||||
t.Errorf("No annotation but expected %v", expected)
|
||||
}
|
||||
if a != expected {
|
||||
t.Errorf("Wrong annotation set by Initial Resources: got %v, expected %v", a, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func expectNoAnnotation(t *testing.T, pod *api.Pod) {
|
||||
if a, ok := pod.ObjectMeta.Annotations[initialResourcesAnnotation]; ok {
|
||||
t.Errorf("Expected no annotation but got %v", a)
|
||||
}
|
||||
}
|
||||
|
||||
func admit(t *testing.T, ir admission.MutationInterface, pods []*api.Pod) {
|
||||
for i := range pods {
|
||||
p := pods[i]
|
||||
|
||||
podKind := api.Kind("Pod").WithVersion("version")
|
||||
podRes := api.Resource("pods").WithVersion("version")
|
||||
attrs := admission.NewAttributesRecord(p, nil, podKind, "test", p.ObjectMeta.Name, podRes, "", admission.Create, nil)
|
||||
if err := ir.Admit(attrs); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testAdminScenarios(t *testing.T, ir admission.MutationInterface, p *api.Pod) {
|
||||
podKind := api.Kind("Pod").WithVersion("version")
|
||||
podRes := api.Resource("pods").WithVersion("version")
|
||||
|
||||
var tests = []struct {
|
||||
attrs admission.Attributes
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
admission.NewAttributesRecord(p, nil, podKind, "test", p.ObjectMeta.Name, podRes, "foo", admission.Create, nil),
|
||||
false,
|
||||
},
|
||||
{
|
||||
admission.NewAttributesRecord(&api.ReplicationController{}, nil, podKind, "test", "", podRes, "", admission.Create, nil),
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
err := ir.Admit(test.attrs)
|
||||
if err != nil && test.expectError == false {
|
||||
t.Error(err)
|
||||
} else if err == nil && test.expectError == true {
|
||||
t.Error("Error expected for Admit but received none")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func performTest(t *testing.T, ir admission.MutationInterface) {
|
||||
pods := getPods()
|
||||
admit(t, ir, pods)
|
||||
testAdminScenarios(t, ir, pods[0])
|
||||
|
||||
verifyPod(t, pods[0], 100, 100)
|
||||
verifyPod(t, pods[1], 100, 300)
|
||||
verifyPod(t, pods[2], 300, 100)
|
||||
verifyPod(t, pods[3], 300, 300)
|
||||
|
||||
verifyAnnotation(t, pods[0], "Initial Resources plugin set: cpu, memory request for init container i0; cpu, memory request for container c0")
|
||||
verifyAnnotation(t, pods[1], "Initial Resources plugin set: cpu request for init container i0")
|
||||
verifyAnnotation(t, pods[2], "Initial Resources plugin set: memory request for init container i0")
|
||||
expectNoAnnotation(t, pods[3])
|
||||
}
|
||||
|
||||
func TestEstimateReturnsErrorFromSource(t *testing.T) {
|
||||
f := func(_ api.ResourceName, _ int64, _, ns string, exactMatch bool, start, end time.Time) (int64, int64, error) {
|
||||
return 0, 0, errors.New("Example error")
|
||||
}
|
||||
ir := newInitialResources(&fakeSource{f: f}, 90, false)
|
||||
admit(t, ir, getPods())
|
||||
}
|
||||
|
||||
func TestEstimationBasedOnTheSameImageSameNamespace7d(t *testing.T) {
|
||||
f := func(_ api.ResourceName, _ int64, _, ns string, exactMatch bool, start, end time.Time) (int64, int64, error) {
|
||||
if exactMatch && end.Sub(start) == week && ns == "test-ns" {
|
||||
return 100, 120, nil
|
||||
}
|
||||
return 200, 120, nil
|
||||
}
|
||||
performTest(t, newInitialResources(&fakeSource{f: f}, 90, false))
|
||||
}
|
||||
|
||||
func TestEstimationBasedOnTheSameImageSameNamespace30d(t *testing.T) {
|
||||
f := func(_ api.ResourceName, _ int64, _, ns string, exactMatch bool, start, end time.Time) (int64, int64, error) {
|
||||
if exactMatch && end.Sub(start) == week && ns == "test-ns" {
|
||||
return 200, 20, nil
|
||||
}
|
||||
if exactMatch && end.Sub(start) == month && ns == "test-ns" {
|
||||
return 100, 120, nil
|
||||
}
|
||||
return 200, 120, nil
|
||||
}
|
||||
performTest(t, newInitialResources(&fakeSource{f: f}, 90, false))
|
||||
}
|
||||
|
||||
func TestEstimationBasedOnTheSameImageAllNamespaces7d(t *testing.T) {
|
||||
f := func(_ api.ResourceName, _ int64, _, ns string, exactMatch bool, start, end time.Time) (int64, int64, error) {
|
||||
if exactMatch && ns == "test-ns" {
|
||||
return 200, 20, nil
|
||||
}
|
||||
if exactMatch && end.Sub(start) == week && ns == "" {
|
||||
return 100, 120, nil
|
||||
}
|
||||
return 200, 120, nil
|
||||
}
|
||||
performTest(t, newInitialResources(&fakeSource{f: f}, 90, false))
|
||||
}
|
||||
|
||||
func TestEstimationBasedOnTheSameImageAllNamespaces30d(t *testing.T) {
|
||||
f := func(_ api.ResourceName, _ int64, _, ns string, exactMatch bool, start, end time.Time) (int64, int64, error) {
|
||||
if exactMatch && ns == "test-ns" {
|
||||
return 200, 20, nil
|
||||
}
|
||||
if exactMatch && end.Sub(start) == week && ns == "" {
|
||||
return 200, 20, nil
|
||||
}
|
||||
if exactMatch && end.Sub(start) == month && ns == "" {
|
||||
return 100, 120, nil
|
||||
}
|
||||
return 200, 120, nil
|
||||
}
|
||||
performTest(t, newInitialResources(&fakeSource{f: f}, 90, false))
|
||||
}
|
||||
|
||||
func TestEstimationBasedOnOtherImages(t *testing.T) {
|
||||
f := func(_ api.ResourceName, _ int64, image, ns string, exactMatch bool, _, _ time.Time) (int64, int64, error) {
|
||||
if image == "image" && !exactMatch && ns == "" {
|
||||
return 100, 5, nil
|
||||
}
|
||||
return 200, 20, nil
|
||||
}
|
||||
performTest(t, newInitialResources(&fakeSource{f: f}, 90, false))
|
||||
}
|
||||
|
||||
func TestNoData(t *testing.T) {
|
||||
f := func(_ api.ResourceName, _ int64, _, ns string, _ bool, _, _ time.Time) (int64, int64, error) {
|
||||
return 200, 0, nil
|
||||
}
|
||||
ir := newInitialResources(&fakeSource{f: f}, 90, false)
|
||||
|
||||
pods := []*api.Pod{
|
||||
createPod("p0", "image:v0", parseReq("", "")),
|
||||
}
|
||||
admit(t, ir, pods)
|
||||
|
||||
if pods[0].Spec.Containers[0].Resources.Requests != nil {
|
||||
t.Errorf("Unexpected resource estimation")
|
||||
}
|
||||
|
||||
expectNoAnnotation(t, pods[0])
|
||||
}
|
||||
|
||||
func TestManyContainers(t *testing.T) {
|
||||
f := func(_ api.ResourceName, _ int64, _, ns string, exactMatch bool, _, _ time.Time) (int64, int64, error) {
|
||||
if exactMatch {
|
||||
return 100, 120, nil
|
||||
}
|
||||
return 200, 30, nil
|
||||
}
|
||||
ir := newInitialResources(&fakeSource{f: f}, 90, false)
|
||||
|
||||
pod := createPod("p", "image:v0", parseReq("", ""))
|
||||
addContainer(pod, "c1", "image:v1", parseReq("", "300"))
|
||||
addContainer(pod, "c2", "image:v2", parseReq("300m", ""))
|
||||
addContainer(pod, "c3", "image:v3", parseReq("300m", "300"))
|
||||
admit(t, ir, []*api.Pod{pod})
|
||||
|
||||
verifyContainer(t, &pod.Spec.Containers[0], 100, 100)
|
||||
verifyContainer(t, &pod.Spec.Containers[1], 100, 300)
|
||||
verifyContainer(t, &pod.Spec.Containers[2], 300, 100)
|
||||
verifyContainer(t, &pod.Spec.Containers[3], 300, 300)
|
||||
|
||||
verifyAnnotation(t, pod, "Initial Resources plugin set: cpu, memory request for init container i0; cpu, memory request for container c0; cpu request for container c1; memory request for container c2")
|
||||
}
|
||||
|
||||
func TestNamespaceAware(t *testing.T) {
|
||||
f := func(_ api.ResourceName, _ int64, _, ns string, exactMatch bool, start, end time.Time) (int64, int64, error) {
|
||||
if ns == "test-ns" {
|
||||
return 200, 0, nil
|
||||
}
|
||||
return 200, 120, nil
|
||||
}
|
||||
ir := newInitialResources(&fakeSource{f: f}, 90, true)
|
||||
|
||||
pods := []*api.Pod{
|
||||
createPod("p0", "image:v0", parseReq("", "")),
|
||||
}
|
||||
admit(t, ir, pods)
|
||||
|
||||
if pods[0].Spec.Containers[0].Resources.Requests != nil {
|
||||
t.Errorf("Unexpected resource estimation")
|
||||
}
|
||||
|
||||
expectNoAnnotation(t, pods[0])
|
||||
}
|
56
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/data_source.go
generated
vendored
56
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/data_source.go
generated
vendored
@ -1,56 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package initialresources
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
influxdbHost = flag.String("ir-influxdb-host", "localhost:8080/api/v1/namespaces/kube-system/services/monitoring-influxdb:api/proxy", "Address of InfluxDB which contains metrics required by InitialResources")
|
||||
user = flag.String("ir-user", "root", "User used for connecting to InfluxDB")
|
||||
// TODO: figure out how to better pass password here
|
||||
password = flag.String("ir-password", "root", "Password used for connecting to InfluxDB")
|
||||
db = flag.String("ir-dbname", "k8s", "InfluxDB database name which contains metrics required by InitialResources")
|
||||
hawkularConfig = flag.String("ir-hawkular", "", "Hawkular configuration URL")
|
||||
)
|
||||
|
||||
// WARNING: If you are planning to add another implementation of dataSource interface please bear in mind,
|
||||
// that dataSource will be moved to Heapster some time in the future and possibly rewritten.
|
||||
type dataSource interface {
|
||||
// Returns <perc>th of sample values which represent usage of <kind> for containers running <image>,
|
||||
// within time range (start, end), number of samples considered and error if occurred.
|
||||
// If <exactMatch> then take only samples that concern the same image (both name and take are the same),
|
||||
// otherwise consider also samples with the same image a possibly different tag.
|
||||
GetUsagePercentile(kind api.ResourceName, perc int64, image, namespace string, exactMatch bool, start, end time.Time) (usage int64, samples int64, err error)
|
||||
}
|
||||
|
||||
func newDataSource(kind string) (dataSource, error) {
|
||||
if kind == "influxdb" {
|
||||
return newInfluxdbSource(*influxdbHost, *user, *password, *db)
|
||||
}
|
||||
if kind == "gcm" {
|
||||
return newGcmSource()
|
||||
}
|
||||
if kind == "hawkular" {
|
||||
return newHawkularSource(*hawkularConfig)
|
||||
}
|
||||
return nil, fmt.Errorf("unknown data source %v", kind)
|
||||
}
|
45
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/data_source_test.go
generated
vendored
45
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/data_source_test.go
generated
vendored
@ -1,45 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package initialresources
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestInfluxDBDataSource(t *testing.T) {
|
||||
ds, _ := newDataSource("influxdb")
|
||||
if _, ok := ds.(*influxdbSource); !ok {
|
||||
t.Errorf("newDataSource did not return valid InfluxDB type")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGCMDataSource(t *testing.T) {
|
||||
// No ProjectID set
|
||||
newDataSource("gcm")
|
||||
}
|
||||
|
||||
func TestHawkularDataSource(t *testing.T) {
|
||||
ds, _ := newDataSource("hawkular")
|
||||
if _, ok := ds.(*hawkularSource); !ok {
|
||||
t.Errorf("newDataSource did not return valid hawkularSource type")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoDataSourceFound(t *testing.T) {
|
||||
ds, err := newDataSource("")
|
||||
if ds != nil || err == nil {
|
||||
t.Errorf("newDataSource found for empty input")
|
||||
}
|
||||
}
|
132
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/gcm.go
generated
vendored
132
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/gcm.go
generated
vendored
@ -1,132 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package initialresources
|
||||
|
||||
import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
gce "cloud.google.com/go/compute/metadata"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
gcm "google.golang.org/api/cloudmonitoring/v2beta2"
|
||||
)
|
||||
|
||||
const (
|
||||
kubePrefix = "custom.cloudmonitoring.googleapis.com/kubernetes.io/"
|
||||
cpuMetricName = kubePrefix + "cpu/usage_rate"
|
||||
memMetricName = kubePrefix + "memory/usage"
|
||||
labelImage = kubePrefix + "label/container_base_image"
|
||||
labelNs = kubePrefix + "label/pod_namespace"
|
||||
)
|
||||
|
||||
type gcmSource struct {
|
||||
project string
|
||||
gcmService *gcm.Service
|
||||
}
|
||||
|
||||
func newGcmSource() (dataSource, error) {
|
||||
// Detect project ID
|
||||
projectId, err := gce.ProjectID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create Google Cloud Monitoring service.
|
||||
client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource(""))
|
||||
s, err := gcm.New(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &gcmSource{
|
||||
project: projectId,
|
||||
gcmService: s,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *gcmSource) query(metric, oldest, youngest string, labels []string, pageToken string) (*gcm.ListTimeseriesResponse, error) {
|
||||
req := s.gcmService.Timeseries.List(s.project, metric, youngest, nil).
|
||||
Oldest(oldest).
|
||||
Aggregator("mean").
|
||||
Window("1m")
|
||||
for _, l := range labels {
|
||||
req = req.Labels(l)
|
||||
}
|
||||
if pageToken != "" {
|
||||
req = req.PageToken(pageToken)
|
||||
}
|
||||
return req.Do()
|
||||
}
|
||||
|
||||
func retrieveRawSamples(res *gcm.ListTimeseriesResponse, output *[]int) {
|
||||
for _, ts := range res.Timeseries {
|
||||
for _, p := range ts.Points {
|
||||
*output = append(*output, int(*p.DoubleValue))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *gcmSource) GetUsagePercentile(kind api.ResourceName, perc int64, image, namespace string, exactMatch bool, start, end time.Time) (int64, int64, error) {
|
||||
var metric string
|
||||
if kind == api.ResourceCPU {
|
||||
metric = cpuMetricName
|
||||
} else if kind == api.ResourceMemory {
|
||||
metric = memMetricName
|
||||
}
|
||||
|
||||
var labels []string
|
||||
if exactMatch {
|
||||
labels = append(labels, labelImage+"=="+image)
|
||||
} else {
|
||||
labels = append(labels, labelImage+"=~"+image+".*")
|
||||
}
|
||||
if namespace != "" {
|
||||
labels = append(labels, labelNs+"=="+namespace)
|
||||
}
|
||||
|
||||
oldest := start.Format(time.RFC3339)
|
||||
youngest := end.Format(time.RFC3339)
|
||||
|
||||
rawSamples := make([]int, 0)
|
||||
pageToken := ""
|
||||
for {
|
||||
res, err := s.query(metric, oldest, youngest, labels, pageToken)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
retrieveRawSamples(res, &rawSamples)
|
||||
|
||||
pageToken = res.NextPageToken
|
||||
if pageToken == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
count := len(rawSamples)
|
||||
if count == 0 {
|
||||
return 0, 0, nil
|
||||
}
|
||||
sort.Ints(rawSamples)
|
||||
usageIndex := int64(math.Ceil(float64(count)*9/10)) - 1
|
||||
usage := rawSamples[usageIndex]
|
||||
|
||||
return int64(usage), int64(count), nil
|
||||
}
|
46
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/gcm_test.go
generated
vendored
46
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/gcm_test.go
generated
vendored
@ -1,46 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package initialresources
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
gcm "google.golang.org/api/cloudmonitoring/v2beta2"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
func TestGCMReturnsErrorIfClientCannotConnect(t *testing.T) {
|
||||
client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource(""))
|
||||
service, _ := gcm.New(client)
|
||||
source := &gcmSource{
|
||||
project: "",
|
||||
gcmService: service,
|
||||
}
|
||||
|
||||
_, _, err := source.GetUsagePercentile(api.ResourceCPU, 90, "", "", true, time.Now(), time.Now())
|
||||
if err == nil {
|
||||
t.Errorf("Expected error from GCM")
|
||||
}
|
||||
|
||||
_, _, err = source.GetUsagePercentile(api.ResourceMemory, 90, "", "foo", false, time.Now(), time.Now())
|
||||
if err == nil {
|
||||
t.Errorf("Expected error from GCM")
|
||||
}
|
||||
}
|
223
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/hawkular.go
generated
vendored
223
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/hawkular.go
generated
vendored
@ -1,223 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package initialresources
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/hawkular/hawkular-client-go/metrics"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
type hawkularSource struct {
|
||||
client *metrics.Client
|
||||
uri *url.URL
|
||||
useNamespace bool
|
||||
modifiers []metrics.Modifier
|
||||
}
|
||||
|
||||
const (
|
||||
containerImageTag string = "container_base_image"
|
||||
descriptorTag string = "descriptor_name"
|
||||
separator string = "/"
|
||||
|
||||
defaultServiceAccountFile = "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
)
|
||||
|
||||
// heapsterName gets the equivalent MetricDescriptor.Name used in the Heapster
|
||||
func heapsterName(kind api.ResourceName) string {
|
||||
switch kind {
|
||||
case api.ResourceCPU:
|
||||
return "cpu/usage"
|
||||
case api.ResourceMemory:
|
||||
return "memory/usage"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// tagQuery creates tagFilter query for Hawkular
|
||||
func tagQuery(kind api.ResourceName, image string, exactMatch bool) map[string]string {
|
||||
q := make(map[string]string)
|
||||
|
||||
// Add here the descriptor_tag..
|
||||
q[descriptorTag] = heapsterName(kind)
|
||||
|
||||
if exactMatch {
|
||||
q[containerImageTag] = image
|
||||
} else {
|
||||
split := strings.Index(image, "@")
|
||||
if split < 0 {
|
||||
split = strings.Index(image, ":")
|
||||
}
|
||||
q[containerImageTag] = fmt.Sprintf("%s:*", image[:split])
|
||||
}
|
||||
|
||||
return q
|
||||
}
|
||||
|
||||
// dataSource API
|
||||
|
||||
func (hs *hawkularSource) GetUsagePercentile(kind api.ResourceName, perc int64, image, namespace string, exactMatch bool, start, end time.Time) (int64, int64, error) {
|
||||
q := tagQuery(kind, image, exactMatch)
|
||||
|
||||
m := make([]metrics.Modifier, len(hs.modifiers), 2+len(hs.modifiers))
|
||||
copy(m, hs.modifiers)
|
||||
|
||||
if namespace != metav1.NamespaceAll {
|
||||
m = append(m, metrics.Tenant(namespace))
|
||||
}
|
||||
|
||||
p := float64(perc)
|
||||
m = append(m, metrics.Filters(metrics.TagsFilter(q), metrics.BucketsFilter(1), metrics.StartTimeFilter(start), metrics.EndTimeFilter(end), metrics.PercentilesFilter([]float64{p})))
|
||||
|
||||
bp, err := hs.client.ReadBuckets(metrics.Counter, m...)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
if len(bp) > 0 && len(bp[0].Percentiles) > 0 {
|
||||
return int64(bp[0].Percentiles[0].Value), int64(bp[0].Samples), nil
|
||||
}
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
// newHawkularSource creates a new Hawkular Source. The uri follows the scheme from Heapster
|
||||
func newHawkularSource(uri string) (dataSource, error) {
|
||||
u, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d := &hawkularSource{
|
||||
uri: u,
|
||||
}
|
||||
if err = d.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// init initializes the Hawkular dataSource. Almost equal to the Heapster initialization
|
||||
func (hs *hawkularSource) init() error {
|
||||
hs.modifiers = make([]metrics.Modifier, 0)
|
||||
p := metrics.Parameters{
|
||||
Tenant: "heapster", // This data is stored by the heapster - for no-namespace hits
|
||||
Url: hs.uri.String(),
|
||||
}
|
||||
|
||||
opts := hs.uri.Query()
|
||||
|
||||
if v, found := opts["tenant"]; found {
|
||||
p.Tenant = v[0]
|
||||
}
|
||||
|
||||
if v, found := opts["useServiceAccount"]; found {
|
||||
if b, _ := strconv.ParseBool(v[0]); b {
|
||||
accountFile := defaultServiceAccountFile
|
||||
if file, f := opts["serviceAccountFile"]; f {
|
||||
accountFile = file[0]
|
||||
}
|
||||
|
||||
// If a readable service account token exists, then use it
|
||||
if contents, err := ioutil.ReadFile(accountFile); err == nil {
|
||||
p.Token = string(contents)
|
||||
} else {
|
||||
glog.Errorf("Could not read contents of %s, no token authentication is used\n", defaultServiceAccountFile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Authentication / Authorization parameters
|
||||
tC := &tls.Config{}
|
||||
|
||||
if v, found := opts["auth"]; found {
|
||||
if _, f := opts["caCert"]; f {
|
||||
return fmt.Errorf("both auth and caCert files provided, combination is not supported")
|
||||
}
|
||||
if len(v[0]) > 0 {
|
||||
// Authfile
|
||||
kubeConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{
|
||||
ExplicitPath: v[0]},
|
||||
&clientcmd.ConfigOverrides{}).ClientConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tC, err = restclient.TLSConfigFor(kubeConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if u, found := opts["user"]; found {
|
||||
if _, wrong := opts["useServiceAccount"]; wrong {
|
||||
return fmt.Errorf("if user and password are used, serviceAccount cannot be used")
|
||||
}
|
||||
if p, f := opts["pass"]; f {
|
||||
hs.modifiers = append(hs.modifiers, func(req *http.Request) error {
|
||||
req.SetBasicAuth(u[0], p[0])
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if v, found := opts["caCert"]; found {
|
||||
caCert, err := ioutil.ReadFile(v[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
|
||||
tC.RootCAs = caCertPool
|
||||
}
|
||||
|
||||
if v, found := opts["insecure"]; found {
|
||||
insecure, err := strconv.ParseBool(v[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tC.InsecureSkipVerify = insecure
|
||||
}
|
||||
|
||||
p.TLSConfig = tC
|
||||
|
||||
c, err := metrics.NewHawkularClient(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hs.client = c
|
||||
|
||||
glog.Infof("Initialised Hawkular Source with parameters %v", p)
|
||||
return nil
|
||||
}
|
142
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/hawkular_test.go
generated
vendored
142
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/hawkular_test.go
generated
vendored
@ -1,142 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package initialresources
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
assert "github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
testImageName string = "hawkular/hawkular-metrics"
|
||||
testImageVersion string = "latest"
|
||||
testImageSHA string = "b727ece3780cdd30e9a86226e520f26bcc396071ed7a86b7ef6684bb93a9f717"
|
||||
testPartialMatch string = "hawkular/hawkular-metrics:*"
|
||||
)
|
||||
|
||||
func testImageWithVersion() string {
|
||||
return fmt.Sprintf("%s:%s", testImageName, testImageVersion)
|
||||
}
|
||||
|
||||
func testImageWithReference() string {
|
||||
return fmt.Sprintf("%s@sha256:%s", testImageName, testImageSHA)
|
||||
}
|
||||
|
||||
func TestTaqQuery(t *testing.T) {
|
||||
kind := api.ResourceCPU
|
||||
tQ := tagQuery(kind, testImageWithVersion(), false)
|
||||
|
||||
assert.Equal(t, 2, len(tQ))
|
||||
assert.Equal(t, testPartialMatch, tQ[containerImageTag])
|
||||
assert.Equal(t, "cpu/usage", tQ[descriptorTag])
|
||||
|
||||
tQe := tagQuery(kind, testImageWithVersion(), true)
|
||||
assert.Equal(t, 2, len(tQe))
|
||||
assert.Equal(t, testImageWithVersion(), tQe[containerImageTag])
|
||||
assert.Equal(t, "cpu/usage", tQe[descriptorTag])
|
||||
|
||||
tQr := tagQuery(kind, testImageWithReference(), false)
|
||||
assert.Equal(t, 2, len(tQe))
|
||||
assert.Equal(t, testPartialMatch, tQr[containerImageTag])
|
||||
assert.Equal(t, "cpu/usage", tQr[descriptorTag])
|
||||
|
||||
tQre := tagQuery(kind, testImageWithReference(), true)
|
||||
assert.Equal(t, 2, len(tQe))
|
||||
assert.Equal(t, testImageWithReference(), tQre[containerImageTag])
|
||||
assert.Equal(t, "cpu/usage", tQre[descriptorTag])
|
||||
|
||||
kind = api.ResourceMemory
|
||||
tQ = tagQuery(kind, testImageWithReference(), true)
|
||||
assert.Equal(t, "memory/usage", tQ[descriptorTag])
|
||||
|
||||
kind = api.ResourceStorage
|
||||
tQ = tagQuery(kind, testImageWithReference(), true)
|
||||
assert.Equal(t, "", tQ[descriptorTag])
|
||||
}
|
||||
|
||||
func newSource(t *testing.T) (map[string]string, dataSource) {
|
||||
tenant := "16a8884e4c155457ee38a8901df6b536"
|
||||
reqs := make(map[string]string)
|
||||
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.Equal(t, tenant, r.Header.Get("Hawkular-Tenant"))
|
||||
assert.Equal(t, "Basic", r.Header.Get("Authorization")[:5])
|
||||
|
||||
if strings.Contains(r.RequestURI, "counters/data") {
|
||||
assert.True(t, strings.Contains(r.RequestURI, url.QueryEscape(testImageWithVersion())))
|
||||
assert.True(t, strings.Contains(r.RequestURI, "cpu%2Fusage"))
|
||||
assert.True(t, strings.Contains(r.RequestURI, "percentiles=90"))
|
||||
|
||||
reqs["counters/data"] = r.RequestURI
|
||||
fmt.Fprintf(w, ` [{"start":1444620095882,"end":1444648895882,"min":1.45,"avg":1.45,"median":1.45,"max":1.45,"percentile95th":1.45,"samples":123456,"percentiles":[{"value":7896.54,"quantile":0.9},{"value":1.45,"quantile":0.99}],"empty":false}]`)
|
||||
} else {
|
||||
reqs["unknown"] = r.RequestURI
|
||||
}
|
||||
}))
|
||||
|
||||
paramUri := fmt.Sprintf("%s?user=test&pass=yep&tenant=foo&insecure=true", s.URL)
|
||||
|
||||
hSource, err := newHawkularSource(paramUri)
|
||||
assert.NoError(t, err)
|
||||
|
||||
return reqs, hSource
|
||||
}
|
||||
|
||||
func TestInsecureMustBeBool(t *testing.T) {
|
||||
paramUri := fmt.Sprintf("localhost?user=test&pass=yep&insecure=foo")
|
||||
_, err := newHawkularSource(paramUri)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error from newHawkularSource")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCAFileMustExist(t *testing.T) {
|
||||
paramUri := fmt.Sprintf("localhost?user=test&pass=yep&caCert=foo")
|
||||
_, err := newHawkularSource(paramUri)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error from newHawkularSource")
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceAccountIsMutuallyExclusiveWithAuth(t *testing.T) {
|
||||
paramUri := fmt.Sprintf("localhost?user=test&pass=yep&useServiceAccount=true")
|
||||
_, err := newHawkularSource(paramUri)
|
||||
if err == nil {
|
||||
t.Errorf("Expected error from newHawkularSource")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetUsagePercentile(t *testing.T) {
|
||||
reqs, hSource := newSource(t)
|
||||
|
||||
usage, samples, err := hSource.GetUsagePercentile(api.ResourceCPU, 90, testImageWithVersion(), "16a8884e4c155457ee38a8901df6b536", true, time.Now(), time.Now())
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(reqs))
|
||||
assert.Equal(t, "", reqs["unknown"])
|
||||
|
||||
assert.Equal(t, int64(123456), int64(samples))
|
||||
assert.Equal(t, int64(7896), usage) // float64 -> int64
|
||||
}
|
73
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/influxdb.go
generated
vendored
73
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/influxdb.go
generated
vendored
@ -1,73 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package initialresources
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
influxdb "github.com/influxdata/influxdb/client"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
const (
|
||||
cpuSeriesName = "autoscaling.cpu.usage.2m"
|
||||
memSeriesName = "autoscaling.memory.usage.2m"
|
||||
cpuContinuousQuery = "select derivative(value) as value from \"cpu/usage_ns_cumulative\" where pod_id <> '' group by pod_id, pod_namespace, container_name, container_base_image, time(2m) into " + cpuSeriesName
|
||||
memContinuousQuery = "select mean(value) as value from \"memory/usage_bytes_gauge\" where pod_id <> '' group by pod_id, pod_namespace, container_name, container_base_image, time(2m) into " + memSeriesName
|
||||
timeFormat = "2006-01-02 15:04:05"
|
||||
)
|
||||
|
||||
// TODO(piosz): rewrite this once we will migrate into InfluxDB v0.9.
|
||||
type influxdbSource struct{}
|
||||
|
||||
func newInfluxdbSource(host, user, password, db string) (dataSource, error) {
|
||||
return &influxdbSource{}, nil
|
||||
}
|
||||
|
||||
func (s *influxdbSource) query(query string) ([]*influxdb.Response, error) {
|
||||
// TODO(piosz): add support again
|
||||
return nil, fmt.Errorf("temporary not supported; see #18826 for more details")
|
||||
}
|
||||
|
||||
func (s *influxdbSource) GetUsagePercentile(kind api.ResourceName, perc int64, image, namespace string, exactMatch bool, start, end time.Time) (int64, int64, error) {
|
||||
var series string
|
||||
if kind == api.ResourceCPU {
|
||||
series = cpuSeriesName
|
||||
} else if kind == api.ResourceMemory {
|
||||
series = memSeriesName
|
||||
}
|
||||
|
||||
var imgPattern string
|
||||
if exactMatch {
|
||||
imgPattern = "='" + image + "'"
|
||||
} else {
|
||||
// Escape character "/" in image pattern.
|
||||
imgPattern = "=~/^" + strings.Replace(image, "/", "\\/", -1) + "/"
|
||||
}
|
||||
var namespaceCond string
|
||||
if namespace != "" {
|
||||
namespaceCond = " and pod_namespace='" + namespace + "'"
|
||||
}
|
||||
|
||||
query := fmt.Sprintf("select percentile(value, %v), count(pod_id) from %v where container_base_image%v%v and time > '%v' and time < '%v'", perc, series, imgPattern, namespaceCond, start.UTC().Format(timeFormat), end.UTC().Format(timeFormat))
|
||||
if _, err := s.query(query); err != nil {
|
||||
return 0, 0, fmt.Errorf("error while trying to query InfluxDB: %v", err)
|
||||
}
|
||||
return 0, 0, nil
|
||||
}
|
40
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/influxdb_test.go
generated
vendored
40
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/influxdb_test.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package initialresources
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
func TestInfluxDBGetUsagePercentileCPU(t *testing.T) {
|
||||
source, _ := newInfluxdbSource("", "", "", "")
|
||||
_, _, err := source.GetUsagePercentile(api.ResourceCPU, 90, "", "", true, time.Now(), time.Now())
|
||||
if err == nil {
|
||||
t.Errorf("Expected error because InfluxDB is temporarily disabled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInfluxDBGetUsagePercentileMemory(t *testing.T) {
|
||||
source, _ := newInfluxdbSource("", "", "", "")
|
||||
_, _, err := source.GetUsagePercentile(api.ResourceMemory, 90, "", "foo", false, time.Now(), time.Now())
|
||||
if err == nil {
|
||||
t.Errorf("Expected error because InfluxDB is temporarily disabled")
|
||||
}
|
||||
}
|
9
vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/BUILD
generated
vendored
9
vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/BUILD
generated
vendored
@ -16,13 +16,12 @@ go_library(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/policy:go_default_library",
|
||||
"//pkg/auth/nodeidentifier:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/client/listers/core/internalversion:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
@ -38,14 +37,14 @@ go_test(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/policy:go_default_library",
|
||||
"//pkg/auth/nodeidentifier:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library",
|
||||
"//pkg/client/listers/core/internalversion:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/OWNERS
generated
vendored
@ -2,7 +2,9 @@ approvers:
|
||||
- deads2k
|
||||
- liggitt
|
||||
- tallclair
|
||||
- mikedanese
|
||||
reviewers:
|
||||
- deads2k
|
||||
- liggitt
|
||||
- tallclair
|
||||
- mikedanese
|
||||
|
41
vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission.go
generated
vendored
41
vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
||||
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@ -31,8 +30,8 @@ import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
coreinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
internalversion "k8s.io/kubernetes/pkg/client/listers/core/internalversion"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
)
|
||||
@ -62,18 +61,18 @@ func NewPlugin(nodeIdentifier nodeidentifier.NodeIdentifier) *nodePlugin {
|
||||
type nodePlugin struct {
|
||||
*admission.Handler
|
||||
nodeIdentifier nodeidentifier.NodeIdentifier
|
||||
podsGetter coreinternalversion.PodsGetter
|
||||
podsGetter internalversion.PodLister
|
||||
// allows overriding for testing
|
||||
features utilfeature.FeatureGate
|
||||
}
|
||||
|
||||
var (
|
||||
_ = admission.Interface(&nodePlugin{})
|
||||
_ = kubeapiserveradmission.WantsInternalKubeClientSet(&nodePlugin{})
|
||||
_ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&nodePlugin{})
|
||||
)
|
||||
|
||||
func (p *nodePlugin) SetInternalKubeClientSet(f internalclientset.Interface) {
|
||||
p.podsGetter = f.Core()
|
||||
func (p *nodePlugin) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) {
|
||||
p.podsGetter = f.Core().InternalVersion().Pods().Lister()
|
||||
}
|
||||
|
||||
func (p *nodePlugin) ValidateInitialization() error {
|
||||
@ -183,14 +182,10 @@ func (c *nodePlugin) admitPod(nodeName string, a admission.Attributes) error {
|
||||
return nil
|
||||
|
||||
case admission.Delete:
|
||||
// get the existing pod from the server cache
|
||||
existingPod, err := c.podsGetter.Pods(a.GetNamespace()).Get(a.GetName(), v1.GetOptions{ResourceVersion: "0"})
|
||||
// get the existing pod
|
||||
existingPod, err := c.podsGetter.Pods(a.GetNamespace()).Get(a.GetName())
|
||||
if errors.IsNotFound(err) {
|
||||
// wasn't found in the server cache, do a live lookup before forbidding
|
||||
existingPod, err = c.podsGetter.Pods(a.GetNamespace()).Get(a.GetName(), v1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return admission.NewForbidden(a, err)
|
||||
@ -241,14 +236,10 @@ func (c *nodePlugin) admitPodEviction(nodeName string, a admission.Attributes) e
|
||||
}
|
||||
podName = eviction.Name
|
||||
}
|
||||
// get the existing pod from the server cache
|
||||
existingPod, err := c.podsGetter.Pods(a.GetNamespace()).Get(podName, v1.GetOptions{ResourceVersion: "0"})
|
||||
// get the existing pod
|
||||
existingPod, err := c.podsGetter.Pods(a.GetNamespace()).Get(podName)
|
||||
if errors.IsNotFound(err) {
|
||||
// wasn't found in the server cache, do a live lookup before forbidding
|
||||
existingPod, err = c.podsGetter.Pods(a.GetNamespace()).Get(podName, v1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return admission.NewForbidden(a, err)
|
||||
@ -347,6 +338,12 @@ func (c *nodePlugin) admitNode(nodeName string, a admission.Attributes) error {
|
||||
if node.Spec.ConfigSource != nil && !apiequality.Semantic.DeepEqual(node.Spec.ConfigSource, oldNode.Spec.ConfigSource) {
|
||||
return admission.NewForbidden(a, fmt.Errorf("cannot update configSource to a new non-nil configSource"))
|
||||
}
|
||||
|
||||
// Don't allow a node to update its own taints. This would allow a node to remove or modify its
|
||||
// taints in a way that would let it steer disallowed workloads to itself.
|
||||
if !apiequality.Semantic.DeepEqual(node.Spec.Taints, oldNode.Spec.Taints) {
|
||||
return admission.NewForbidden(a, fmt.Errorf("cannot modify taints"))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -376,7 +373,7 @@ func (c *nodePlugin) admitServiceAccount(nodeName string, a admission.Attributes
|
||||
if ref.UID == "" {
|
||||
return admission.NewForbidden(a, fmt.Errorf("node requested token with a pod binding without a uid"))
|
||||
}
|
||||
pod, err := c.podsGetter.Pods(a.GetNamespace()).Get(ref.Name, v1.GetOptions{})
|
||||
pod, err := c.podsGetter.Pods(a.GetNamespace()).Get(ref.Name)
|
||||
if errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
71
vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission_test.go
generated
vendored
71
vendor/k8s.io/kubernetes/plugin/pkg/admission/noderestriction/admission_test.go
generated
vendored
@ -25,12 +25,12 @@ import (
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
authenticationapi "k8s.io/kubernetes/pkg/apis/authentication"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
coreinternalversion "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
"k8s.io/kubernetes/pkg/client/listers/core/internalversion"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
@ -63,6 +63,7 @@ func makeTestPod(namespace, name, node string, mirror bool) *api.Pod {
|
||||
func makeTestPodEviction(name string) *policy.Eviction {
|
||||
eviction := &policy.Eviction{}
|
||||
eviction.Name = name
|
||||
eviction.Namespace = "ns"
|
||||
return eviction
|
||||
}
|
||||
|
||||
@ -91,10 +92,22 @@ func Test_nodePlugin_Admit(t *testing.T) {
|
||||
mynodeObjMeta = metav1.ObjectMeta{Name: "mynode"}
|
||||
mynodeObj = &api.Node{ObjectMeta: mynodeObjMeta}
|
||||
mynodeObjConfigA = &api.Node{ObjectMeta: mynodeObjMeta, Spec: api.NodeSpec{ConfigSource: &api.NodeConfigSource{
|
||||
ConfigMapRef: &api.ObjectReference{Name: "foo", Namespace: "bar", UID: "fooUID"}}}}
|
||||
ConfigMap: &api.ConfigMapNodeConfigSource{
|
||||
Name: "foo",
|
||||
Namespace: "bar",
|
||||
UID: "fooUID",
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}}}
|
||||
mynodeObjConfigB = &api.Node{ObjectMeta: mynodeObjMeta, Spec: api.NodeSpec{ConfigSource: &api.NodeConfigSource{
|
||||
ConfigMapRef: &api.ObjectReference{Name: "qux", Namespace: "bar", UID: "quxUID"}}}}
|
||||
othernodeObj = &api.Node{ObjectMeta: metav1.ObjectMeta{Name: "othernode"}}
|
||||
ConfigMap: &api.ConfigMapNodeConfigSource{
|
||||
Name: "qux",
|
||||
Namespace: "bar",
|
||||
UID: "quxUID",
|
||||
KubeletConfigKey: "kubelet",
|
||||
}}}}
|
||||
mynodeObjTaintA = &api.Node{ObjectMeta: mynodeObjMeta, Spec: api.NodeSpec{Taints: []api.Taint{{Key: "mykey", Value: "A"}}}}
|
||||
mynodeObjTaintB = &api.Node{ObjectMeta: mynodeObjMeta, Spec: api.NodeSpec{Taints: []api.Taint{{Key: "mykey", Value: "B"}}}}
|
||||
othernodeObj = &api.Node{ObjectMeta: metav1.ObjectMeta{Name: "othernode"}}
|
||||
|
||||
mymirrorpod = makeTestPod("ns", "mymirrorpod", "mynode", true)
|
||||
othermirrorpod = makeTestPod("ns", "othermirrorpod", "othernode", true)
|
||||
@ -125,10 +138,20 @@ func Test_nodePlugin_Admit(t *testing.T) {
|
||||
svcacctResource = api.Resource("serviceaccounts").WithVersion("v1")
|
||||
tokenrequestKind = api.Kind("TokenRequest").WithVersion("v1")
|
||||
|
||||
noExistingPods = fake.NewSimpleClientset().Core()
|
||||
existingPods = fake.NewSimpleClientset(mymirrorpod, othermirrorpod, unboundmirrorpod, mypod, otherpod, unboundpod).Core()
|
||||
noExistingPodsIndex = cache.NewIndexer(cache.MetaNamespaceKeyFunc, nil)
|
||||
noExistingPods = internalversion.NewPodLister(noExistingPodsIndex)
|
||||
|
||||
existingPodsIndex = cache.NewIndexer(cache.MetaNamespaceKeyFunc, nil)
|
||||
existingPods = internalversion.NewPodLister(existingPodsIndex)
|
||||
)
|
||||
|
||||
existingPodsIndex.Add(mymirrorpod)
|
||||
existingPodsIndex.Add(othermirrorpod)
|
||||
existingPodsIndex.Add(unboundmirrorpod)
|
||||
existingPodsIndex.Add(mypod)
|
||||
existingPodsIndex.Add(otherpod)
|
||||
existingPodsIndex.Add(unboundpod)
|
||||
|
||||
sapod := makeTestPod("ns", "mysapod", "mynode", true)
|
||||
sapod.Spec.ServiceAccountName = "foo"
|
||||
|
||||
@ -143,7 +166,7 @@ func Test_nodePlugin_Admit(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
podsGetter coreinternalversion.PodsGetter
|
||||
podsGetter internalversion.PodLister
|
||||
attributes admission.Attributes
|
||||
features utilfeature.FeatureGate
|
||||
err string
|
||||
@ -446,7 +469,7 @@ func Test_nodePlugin_Admit(t *testing.T) {
|
||||
err: "forbidden: unexpected operation",
|
||||
},
|
||||
{
|
||||
name: "forbid create of eviction for normal pod bound to another",
|
||||
name: "forbid create of unnamed eviction for normal pod bound to another",
|
||||
podsGetter: existingPods,
|
||||
attributes: admission.NewAttributesRecord(unnamedEviction, nil, evictionKind, otherpod.Namespace, otherpod.Name, podResource, "eviction", admission.Create, mynode),
|
||||
err: "spec.nodeName set to itself",
|
||||
@ -612,6 +635,12 @@ func Test_nodePlugin_Admit(t *testing.T) {
|
||||
attributes: admission.NewAttributesRecord(mynodeObj, nil, nodeKind, mynodeObj.Namespace, "", nodeResource, "", admission.Create, mynode),
|
||||
err: "",
|
||||
},
|
||||
{
|
||||
name: "allow create of my node with taints",
|
||||
podsGetter: noExistingPods,
|
||||
attributes: admission.NewAttributesRecord(mynodeObjTaintA, nil, nodeKind, mynodeObj.Namespace, "", nodeResource, "", admission.Create, mynode),
|
||||
err: "",
|
||||
},
|
||||
{
|
||||
name: "allow update of my node",
|
||||
podsGetter: existingPods,
|
||||
@ -660,6 +689,30 @@ func Test_nodePlugin_Admit(t *testing.T) {
|
||||
attributes: admission.NewAttributesRecord(mynodeObj, mynodeObjConfigA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, mynode),
|
||||
err: "",
|
||||
},
|
||||
{
|
||||
name: "allow update of my node: no change to taints",
|
||||
podsGetter: existingPods,
|
||||
attributes: admission.NewAttributesRecord(mynodeObjTaintA, mynodeObjTaintA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, mynode),
|
||||
err: "",
|
||||
},
|
||||
{
|
||||
name: "forbid update of my node: add taints",
|
||||
podsGetter: existingPods,
|
||||
attributes: admission.NewAttributesRecord(mynodeObjTaintA, mynodeObj, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, mynode),
|
||||
err: "cannot modify taints",
|
||||
},
|
||||
{
|
||||
name: "forbid update of my node: remove taints",
|
||||
podsGetter: existingPods,
|
||||
attributes: admission.NewAttributesRecord(mynodeObj, mynodeObjTaintA, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, mynode),
|
||||
err: "cannot modify taints",
|
||||
},
|
||||
{
|
||||
name: "forbid update of my node: change taints",
|
||||
podsGetter: existingPods,
|
||||
attributes: admission.NewAttributesRecord(mynodeObjTaintA, mynodeObjTaintB, nodeKind, mynodeObj.Namespace, mynodeObj.Name, nodeResource, "", admission.Update, mynode),
|
||||
err: "cannot modify taints",
|
||||
},
|
||||
|
||||
// Other node object
|
||||
{
|
||||
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/BUILD
generated
vendored
@ -51,8 +51,6 @@ go_library(
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
|
||||
|
114
vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/admission_test.go
generated
vendored
114
vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/admission_test.go
generated
vendored
@ -37,21 +37,6 @@ import (
|
||||
|
||||
// TestPodAdmission verifies various scenarios involving pod/namespace tolerations
|
||||
func TestPodAdmission(t *testing.T) {
|
||||
namespace := &api.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testNamespace",
|
||||
Namespace: "",
|
||||
},
|
||||
}
|
||||
|
||||
mockClient := &fake.Clientset{}
|
||||
handler, informerFactory, err := newHandlerForTest(mockClient)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error initializing handler: %v", err)
|
||||
}
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informerFactory.Start(stopCh)
|
||||
|
||||
CPU1000m := resource.MustParse("1000m")
|
||||
CPU500m := resource.MustParse("500m")
|
||||
@ -230,57 +215,74 @@ func TestPodAdmission(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if test.namespaceTolerations != nil {
|
||||
tolerationStr, err := json.Marshal(test.namespaceTolerations)
|
||||
if err != nil {
|
||||
t.Errorf("error in marshalling namespace tolerations %v", test.namespaceTolerations)
|
||||
t.Run(test.testName, func(t *testing.T) {
|
||||
namespace := &api.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testNamespace",
|
||||
Namespace: "",
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
}
|
||||
namespace.Annotations = map[string]string{NSDefaultTolerations: string(tolerationStr)}
|
||||
}
|
||||
|
||||
if test.whitelist != nil {
|
||||
tolerationStr, err := json.Marshal(test.whitelist)
|
||||
if err != nil {
|
||||
t.Errorf("error in marshalling namespace whitelist %v", test.whitelist)
|
||||
if test.namespaceTolerations != nil {
|
||||
tolerationStr, err := json.Marshal(test.namespaceTolerations)
|
||||
if err != nil {
|
||||
t.Errorf("error in marshalling namespace tolerations %v", test.namespaceTolerations)
|
||||
}
|
||||
namespace.Annotations = map[string]string{NSDefaultTolerations: string(tolerationStr)}
|
||||
}
|
||||
namespace.Annotations[NSWLTolerations] = string(tolerationStr)
|
||||
}
|
||||
|
||||
informerFactory.Core().InternalVersion().Namespaces().Informer().GetStore().Update(namespace)
|
||||
if test.whitelist != nil {
|
||||
tolerationStr, err := json.Marshal(test.whitelist)
|
||||
if err != nil {
|
||||
t.Errorf("error in marshalling namespace whitelist %v", test.whitelist)
|
||||
}
|
||||
namespace.Annotations[NSWLTolerations] = string(tolerationStr)
|
||||
}
|
||||
|
||||
handler.pluginConfig = &pluginapi.Configuration{Default: test.defaultClusterTolerations, Whitelist: test.clusterWhitelist}
|
||||
pod := test.pod
|
||||
pod.Spec.Tolerations = test.podTolerations
|
||||
mockClient := fake.NewSimpleClientset(namespace)
|
||||
handler, informerFactory, err := newHandlerForTest(mockClient)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error initializing handler: %v", err)
|
||||
}
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informerFactory.Start(stopCh)
|
||||
|
||||
// copy the original pod for tests of uninitialized pod updates.
|
||||
oldPod := *pod
|
||||
oldPod.Initializers = &metav1.Initializers{Pending: []metav1.Initializer{{Name: "init"}}}
|
||||
oldPod.Spec.Tolerations = []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue1", Effect: "NoSchedule", TolerationSeconds: nil}}
|
||||
handler.pluginConfig = &pluginapi.Configuration{Default: test.defaultClusterTolerations, Whitelist: test.clusterWhitelist}
|
||||
pod := test.pod
|
||||
pod.Spec.Tolerations = test.podTolerations
|
||||
|
||||
err := handler.Admit(admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "testNamespace", namespace.ObjectMeta.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
|
||||
if test.admit && err != nil {
|
||||
t.Errorf("Test: %s, expected no error but got: %s", test.testName, err)
|
||||
} else if !test.admit && err == nil {
|
||||
t.Errorf("Test: %s, expected an error", test.testName)
|
||||
}
|
||||
// copy the original pod for tests of uninitialized pod updates.
|
||||
oldPod := *pod
|
||||
oldPod.Initializers = &metav1.Initializers{Pending: []metav1.Initializer{{Name: "init"}}}
|
||||
oldPod.Spec.Tolerations = []api.Toleration{{Key: "testKey", Operator: "Equal", Value: "testValue1", Effect: "NoSchedule", TolerationSeconds: nil}}
|
||||
|
||||
updatedPodTolerations := pod.Spec.Tolerations
|
||||
if test.admit && !tolerations.EqualTolerations(updatedPodTolerations, test.mergedTolerations) {
|
||||
t.Errorf("Test: %s, expected: %#v but got: %#v", test.testName, test.mergedTolerations, updatedPodTolerations)
|
||||
}
|
||||
err = handler.Admit(admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "testNamespace", namespace.ObjectMeta.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
|
||||
if test.admit && err != nil {
|
||||
t.Errorf("Test: %s, expected no error but got: %s", test.testName, err)
|
||||
} else if !test.admit && err == nil {
|
||||
t.Errorf("Test: %s, expected an error", test.testName)
|
||||
}
|
||||
|
||||
// handles update of uninitialized pod like it's newly created.
|
||||
err = handler.Admit(admission.NewAttributesRecord(pod, &oldPod, api.Kind("Pod").WithVersion("version"), "testNamespace", namespace.ObjectMeta.Name, api.Resource("pods").WithVersion("version"), "", admission.Update, nil))
|
||||
if test.admit && err != nil {
|
||||
t.Errorf("Test: %s, expected no error but got: %s", test.testName, err)
|
||||
} else if !test.admit && err == nil {
|
||||
t.Errorf("Test: %s, expected an error", test.testName)
|
||||
}
|
||||
updatedPodTolerations := pod.Spec.Tolerations
|
||||
if test.admit && !tolerations.EqualTolerations(updatedPodTolerations, test.mergedTolerations) {
|
||||
t.Errorf("Test: %s, expected: %#v but got: %#v", test.testName, test.mergedTolerations, updatedPodTolerations)
|
||||
}
|
||||
|
||||
updatedPodTolerations = pod.Spec.Tolerations
|
||||
if test.admit && !tolerations.EqualTolerations(updatedPodTolerations, test.mergedTolerations) {
|
||||
t.Errorf("Test: %s, expected: %#v but got: %#v", test.testName, test.mergedTolerations, updatedPodTolerations)
|
||||
}
|
||||
// handles update of uninitialized pod like it's newly created.
|
||||
err = handler.Admit(admission.NewAttributesRecord(pod, &oldPod, api.Kind("Pod").WithVersion("version"), "testNamespace", namespace.ObjectMeta.Name, api.Resource("pods").WithVersion("version"), "", admission.Update, nil))
|
||||
if test.admit && err != nil {
|
||||
t.Errorf("Test: %s, expected no error but got: %s", test.testName, err)
|
||||
} else if !test.admit && err == nil {
|
||||
t.Errorf("Test: %s, expected an error", test.testName)
|
||||
}
|
||||
|
||||
updatedPodTolerations = pod.Spec.Tolerations
|
||||
if test.admit && !tolerations.EqualTolerations(updatedPodTolerations, test.mergedTolerations) {
|
||||
t.Errorf("Test: %s, expected: %#v but got: %#v", test.testName, test.mergedTolerations, updatedPodTolerations)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -12,9 +12,8 @@ go_library(
|
||||
deps = [
|
||||
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library",
|
||||
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -19,25 +19,15 @@ limitations under the License.
|
||||
package install
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/apimachinery/announced"
|
||||
"k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
internalapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
|
||||
versionedapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1"
|
||||
)
|
||||
|
||||
// Install registers the API group and adds types to a scheme
|
||||
func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) {
|
||||
if err := announced.NewGroupMetaFactory(
|
||||
&announced.GroupMetaFactoryArgs{
|
||||
GroupName: internalapi.GroupName,
|
||||
VersionPreferenceOrder: []string{versionedapi.SchemeGroupVersion.Version},
|
||||
AddInternalObjectsToScheme: internalapi.AddToScheme,
|
||||
},
|
||||
announced.VersionToSchemeFunc{
|
||||
versionedapi.SchemeGroupVersion.Version: versionedapi.AddToScheme,
|
||||
},
|
||||
).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
func Install(scheme *runtime.Scheme) {
|
||||
utilruntime.Must(internalapi.AddToScheme(scheme))
|
||||
utilruntime.Must(versionedapi.AddToScheme(scheme))
|
||||
utilruntime.Must(scheme.SetVersionPriority(versionedapi.SchemeGroupVersion))
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
11
vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/config.go
generated
vendored
11
vendor/k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/config.go
generated
vendored
@ -20,10 +20,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apimachinery/announced"
|
||||
"k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
internalapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
|
||||
@ -33,14 +30,12 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
groupFactoryRegistry = make(announced.APIGroupFactoryRegistry)
|
||||
registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS"))
|
||||
scheme = runtime.NewScheme()
|
||||
codecs = serializer.NewCodecFactory(scheme)
|
||||
scheme = runtime.NewScheme()
|
||||
codecs = serializer.NewCodecFactory(scheme)
|
||||
)
|
||||
|
||||
func init() {
|
||||
install.Install(groupFactoryRegistry, registry, scheme)
|
||||
install.Install(scheme)
|
||||
}
|
||||
|
||||
// LoadConfiguration loads the provided configuration.
|
||||
|
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/priority/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/priority/BUILD
generated
vendored
@ -16,10 +16,10 @@ go_test(
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -37,8 +37,8 @@ go_library(
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
|
77
vendor/k8s.io/kubernetes/plugin/pkg/admission/priority/admission.go
generated
vendored
77
vendor/k8s.io/kubernetes/plugin/pkg/admission/priority/admission.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"io"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@ -32,7 +33,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -97,6 +97,10 @@ var (
|
||||
// Admit checks Pods and admits or rejects them. It also resolves the priority of pods based on their PriorityClass.
|
||||
// Note that pod validation mechanism prevents update of a pod priority.
|
||||
func (p *priorityPlugin) Admit(a admission.Attributes) error {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
|
||||
return nil
|
||||
}
|
||||
|
||||
operation := a.GetOperation()
|
||||
// Ignore all calls to subresources
|
||||
if len(a.GetSubresource()) != 0 {
|
||||
@ -105,7 +109,7 @@ func (p *priorityPlugin) Admit(a admission.Attributes) error {
|
||||
|
||||
switch a.GetResource().GroupResource() {
|
||||
case podResource:
|
||||
if operation == admission.Create {
|
||||
if operation == admission.Create || operation == admission.Update {
|
||||
return p.admitPod(a)
|
||||
}
|
||||
return nil
|
||||
@ -135,6 +139,20 @@ func (p *priorityPlugin) Validate(a admission.Attributes) error {
|
||||
}
|
||||
}
|
||||
|
||||
// priorityClassPermittedInNamespace returns true if we allow the given priority class name in the
|
||||
// given namespace. It currently checks that system priorities are created only in the system namespace.
|
||||
func priorityClassPermittedInNamespace(priorityClassName string, namespace string) bool {
|
||||
// Only allow system priorities in the system namespace. This is to prevent abuse or incorrect
|
||||
// usage of these priorities. Pods created at these priorities could preempt system critical
|
||||
// components.
|
||||
for _, spc := range scheduling.SystemPriorityClasses() {
|
||||
if spc.Name == priorityClassName && namespace != metav1.NamespaceSystem {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// admitPod makes sure a new pod does not set spec.Priority field. It also makes sure that the PriorityClassName exists if it is provided and resolves the pod priority from the PriorityClassName.
|
||||
func (p *priorityPlugin) admitPod(a admission.Attributes) error {
|
||||
operation := a.GetOperation()
|
||||
@ -143,18 +161,29 @@ func (p *priorityPlugin) admitPod(a admission.Attributes) error {
|
||||
return errors.NewBadRequest("resource was marked with kind Pod but was unable to be converted")
|
||||
}
|
||||
|
||||
// Make sure that the client has not set `priority` at the time of pod creation.
|
||||
if operation == admission.Create && pod.Spec.Priority != nil {
|
||||
return admission.NewForbidden(a, fmt.Errorf("the integer value of priority must not be provided in pod spec. Priority admission controller populates the value from the given PriorityClass name"))
|
||||
if operation == admission.Update {
|
||||
oldPod, ok := a.GetOldObject().(*api.Pod)
|
||||
if !ok {
|
||||
return errors.NewBadRequest("resource was marked with kind Pod but was unable to be converted")
|
||||
}
|
||||
|
||||
// This admission plugin set pod.Spec.Priority on create.
|
||||
// Ensure the existing priority is preserved on update.
|
||||
// API validation prevents mutations to Priority and PriorityClassName, so any other changes will fail update validation and not be persisted.
|
||||
if pod.Spec.Priority == nil && oldPod.Spec.Priority != nil {
|
||||
pod.Spec.Priority = oldPod.Spec.Priority
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
|
||||
|
||||
if operation == admission.Create {
|
||||
var priority int32
|
||||
// TODO: @ravig - This is for backwards compatibility to ensure that critical pods with annotations just work fine.
|
||||
// Remove when no longer needed.
|
||||
if len(pod.Spec.PriorityClassName) == 0 &&
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) &&
|
||||
kubelettypes.IsCritical(a.GetNamespace(), pod.Annotations) {
|
||||
pod.Spec.PriorityClassName = schedulerapi.SystemClusterCritical
|
||||
pod.Spec.PriorityClassName = scheduling.SystemClusterCritical
|
||||
}
|
||||
if len(pod.Spec.PriorityClassName) == 0 {
|
||||
var err error
|
||||
@ -163,22 +192,26 @@ func (p *priorityPlugin) admitPod(a admission.Attributes) error {
|
||||
return fmt.Errorf("failed to get default priority class: %v", err)
|
||||
}
|
||||
} else {
|
||||
// First try to resolve by system priority classes.
|
||||
priority, ok = schedulerapi.SystemPriorityClasses[pod.Spec.PriorityClassName]
|
||||
if !ok {
|
||||
// Now that we didn't find any system priority, try resolving by user defined priority classes.
|
||||
pc, err := p.lister.Get(pod.Spec.PriorityClassName)
|
||||
pcName := pod.Spec.PriorityClassName
|
||||
if !priorityClassPermittedInNamespace(pcName, a.GetNamespace()) {
|
||||
return admission.NewForbidden(a, fmt.Errorf("pods with %v priorityClass is not permitted in %v namespace", pcName, a.GetNamespace()))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return admission.NewForbidden(a, fmt.Errorf("no PriorityClass with name %v was found", pod.Spec.PriorityClassName))
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to get PriorityClass with name %s: %v", pod.Spec.PriorityClassName, err)
|
||||
// Try resolving the priority class name.
|
||||
pc, err := p.lister.Get(pod.Spec.PriorityClassName)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return admission.NewForbidden(a, fmt.Errorf("no PriorityClass with name %v was found", pod.Spec.PriorityClassName))
|
||||
}
|
||||
|
||||
priority = pc.Value
|
||||
return fmt.Errorf("failed to get PriorityClass with name %s: %v", pod.Spec.PriorityClassName, err)
|
||||
}
|
||||
|
||||
priority = pc.Value
|
||||
}
|
||||
// if the pod contained a priority that differs from the one computed from the priority class, error
|
||||
if pod.Spec.Priority != nil && *pod.Spec.Priority != priority {
|
||||
return admission.NewForbidden(a, fmt.Errorf("the integer value of priority (%d) must not be provided in pod spec; priority admission controller computed %d from the given PriorityClass name", *pod.Spec.Priority, priority))
|
||||
}
|
||||
pod.Spec.Priority = &priority
|
||||
}
|
||||
@ -192,12 +225,6 @@ func (p *priorityPlugin) validatePriorityClass(a admission.Attributes) error {
|
||||
if !ok {
|
||||
return errors.NewBadRequest("resource was marked with kind PriorityClass but was unable to be converted")
|
||||
}
|
||||
if pc.Value > schedulerapi.HighestUserDefinablePriority {
|
||||
return admission.NewForbidden(a, fmt.Errorf("maximum allowed value of a user defined priority is %v", schedulerapi.HighestUserDefinablePriority))
|
||||
}
|
||||
if _, ok := schedulerapi.SystemPriorityClasses[pc.Name]; ok {
|
||||
return admission.NewForbidden(a, fmt.Errorf("the name of the priority class is a reserved name for system use only: %v", pc.Name))
|
||||
}
|
||||
// If the new PriorityClass tries to be the default priority, make sure that no other priority class is marked as default.
|
||||
if pc.GlobalDefault {
|
||||
dpc, err := p.getDefaultPriorityClass()
|
||||
|
159
vendor/k8s.io/kubernetes/plugin/pkg/admission/priority/admission_test.go
generated
vendored
159
vendor/k8s.io/kubernetes/plugin/pkg/admission/priority/admission_test.go
generated
vendored
@ -24,13 +24,13 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
func addPriorityClasses(ctrl *priorityPlugin, priorityClasses []*scheduling.PriorityClass) {
|
||||
@ -75,58 +75,58 @@ var nondefaultClass1 = &scheduling.PriorityClass{
|
||||
Description: "Just a test priority class",
|
||||
}
|
||||
|
||||
func TestPriorityClassAdmission(t *testing.T) {
|
||||
var tooHighPriorityClass = &scheduling.PriorityClass{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PriorityClass",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "toohighclass",
|
||||
},
|
||||
Value: schedulerapi.HighestUserDefinablePriority + 1,
|
||||
Description: "Just a test priority class",
|
||||
}
|
||||
var systemClusterCritical = &scheduling.PriorityClass{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PriorityClass",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scheduling.SystemClusterCritical,
|
||||
},
|
||||
Value: scheduling.SystemCriticalPriority,
|
||||
GlobalDefault: true,
|
||||
}
|
||||
|
||||
func TestPriorityClassAdmission(t *testing.T) {
|
||||
var systemClass = &scheduling.PriorityClass{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PriorityClass",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: schedulerapi.SystemClusterCritical,
|
||||
Name: scheduling.SystemPriorityClassPrefix + "test",
|
||||
},
|
||||
Value: schedulerapi.HighestUserDefinablePriority + 1,
|
||||
Description: "Name conflicts with system priority class names",
|
||||
Value: scheduling.HighestUserDefinablePriority + 1,
|
||||
Description: "Name has system critical prefix",
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
existingClasses []*scheduling.PriorityClass
|
||||
newClass *scheduling.PriorityClass
|
||||
userInfo user.Info
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
"one default class",
|
||||
[]*scheduling.PriorityClass{},
|
||||
defaultClass1,
|
||||
nil,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"more than one default classes",
|
||||
[]*scheduling.PriorityClass{defaultClass1},
|
||||
defaultClass2,
|
||||
nil,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"too high PriorityClass value",
|
||||
[]*scheduling.PriorityClass{},
|
||||
tooHighPriorityClass,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"system name conflict",
|
||||
"system name and value are allowed by admission controller",
|
||||
[]*scheduling.PriorityClass{},
|
||||
systemClass,
|
||||
true,
|
||||
&user.DefaultInfo{
|
||||
Name: user.APIServerUser,
|
||||
},
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
@ -146,7 +146,7 @@ func TestPriorityClassAdmission(t *testing.T) {
|
||||
scheduling.Resource("priorityclasses").WithVersion("version"),
|
||||
"",
|
||||
admission.Create,
|
||||
nil,
|
||||
test.userInfo,
|
||||
)
|
||||
err := ctrl.Validate(attrs)
|
||||
glog.Infof("Got %v", err)
|
||||
@ -244,6 +244,7 @@ func TestDefaultPriority(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var zeroPriority = int32(0)
|
||||
var intPriority = int32(1000)
|
||||
|
||||
func TestPodAdmission(t *testing.T) {
|
||||
@ -314,7 +315,7 @@ func TestPodAdmission(t *testing.T) {
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-w-system-priority",
|
||||
Namespace: "namespace",
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
@ -322,14 +323,14 @@ func TestPodAdmission(t *testing.T) {
|
||||
Name: containerName,
|
||||
},
|
||||
},
|
||||
PriorityClassName: schedulerapi.SystemClusterCritical,
|
||||
PriorityClassName: scheduling.SystemClusterCritical,
|
||||
},
|
||||
},
|
||||
// pod[5]: mirror Pod with a system priority class name
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mirror-pod-w-system-priority",
|
||||
Namespace: "namespace",
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
Annotations: map[string]string{api.MirrorPodAnnotationKey: ""},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
@ -374,6 +375,67 @@ func TestPodAdmission(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
// pod[8]: Pod with a system priority class name in non-system namespace
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-w-system-priority-in-nonsystem-namespace",
|
||||
Namespace: "non-system-namespace",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
},
|
||||
},
|
||||
PriorityClassName: scheduling.SystemClusterCritical,
|
||||
},
|
||||
},
|
||||
// pod[9]: Pod with a priority value that matches the resolved priority
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-w-zero-priority-in-nonsystem-namespace",
|
||||
Namespace: "non-system-namespace",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
},
|
||||
},
|
||||
Priority: &zeroPriority,
|
||||
},
|
||||
},
|
||||
// pod[10]: Pod with a priority value that matches the resolved default priority
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-w-priority-matching-default-priority",
|
||||
Namespace: "non-system-namespace",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
},
|
||||
},
|
||||
Priority: &defaultClass2.Value,
|
||||
},
|
||||
},
|
||||
// pod[11]: Pod with a priority value that matches the resolved priority
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-w-priority-matching-resolved-default-priority",
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
},
|
||||
},
|
||||
PriorityClassName: systemClusterCritical.Name,
|
||||
Priority: &systemClusterCritical.Value,
|
||||
},
|
||||
},
|
||||
}
|
||||
// Enable PodPriority feature gate.
|
||||
utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("%s=true", features.PodPriority))
|
||||
@ -419,9 +481,9 @@ func TestPodAdmission(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"pod with a system priority class",
|
||||
[]*scheduling.PriorityClass{},
|
||||
[]*scheduling.PriorityClass{systemClusterCritical},
|
||||
*pods[4],
|
||||
schedulerapi.SystemCriticalPriority,
|
||||
scheduling.SystemCriticalPriority,
|
||||
false,
|
||||
},
|
||||
{
|
||||
@ -440,9 +502,9 @@ func TestPodAdmission(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"mirror pod with system priority class",
|
||||
[]*scheduling.PriorityClass{},
|
||||
[]*scheduling.PriorityClass{systemClusterCritical},
|
||||
*pods[5],
|
||||
schedulerapi.SystemCriticalPriority,
|
||||
scheduling.SystemCriticalPriority,
|
||||
false,
|
||||
},
|
||||
{
|
||||
@ -454,9 +516,37 @@ func TestPodAdmission(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"pod with critical pod annotation",
|
||||
[]*scheduling.PriorityClass{},
|
||||
[]*scheduling.PriorityClass{systemClusterCritical},
|
||||
*pods[7],
|
||||
schedulerapi.SystemCriticalPriority,
|
||||
scheduling.SystemCriticalPriority,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"pod with system critical priority in non-system namespace",
|
||||
[]*scheduling.PriorityClass{systemClusterCritical},
|
||||
*pods[8],
|
||||
scheduling.SystemCriticalPriority,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"pod with priority that matches computed priority",
|
||||
[]*scheduling.PriorityClass{nondefaultClass1},
|
||||
*pods[9],
|
||||
0,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"pod with priority that matches default priority",
|
||||
[]*scheduling.PriorityClass{defaultClass2},
|
||||
*pods[10],
|
||||
defaultClass2.Value,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"pod with priority that matches resolved priority",
|
||||
[]*scheduling.PriorityClass{systemClusterCritical},
|
||||
*pods[11],
|
||||
systemClusterCritical.Value,
|
||||
false,
|
||||
},
|
||||
}
|
||||
@ -485,8 +575,7 @@ func TestPodAdmission(t *testing.T) {
|
||||
if !test.expectError {
|
||||
if err != nil {
|
||||
t.Errorf("Test %q: unexpected error received: %v", test.name, err)
|
||||
}
|
||||
if *test.pod.Spec.Priority != test.expectedPriority {
|
||||
} else if *test.pod.Spec.Priority != test.expectedPriority {
|
||||
t.Errorf("Test %q: expected priority is %d, but got %d.", test.name, test.expectedPriority, *test.pod.Spec.Priority)
|
||||
}
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/BUILD
generated
vendored
@ -32,9 +32,8 @@ go_library(
|
||||
"//plugin/pkg/admission/resourcequota/apis/resourcequota/validation:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/hashicorp/golang-lru:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
|
678
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/admission_test.go
generated
vendored
678
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/admission_test.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package resourcequota
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -73,6 +74,14 @@ func validPod(name string, numContainers int, resources api.ResourceRequirements
|
||||
return pod
|
||||
}
|
||||
|
||||
func validPodWithPriority(name string, numContainers int, resources api.ResourceRequirements, priorityClass string) *api.Pod {
|
||||
pod := validPod(name, numContainers, resources)
|
||||
if priorityClass != "" {
|
||||
pod.Spec.PriorityClassName = priorityClass
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func validPersistentVolumeClaim(name string, resources api.ResourceRequirements) *api.PersistentVolumeClaim {
|
||||
return &api.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test"},
|
||||
@ -1445,3 +1454,672 @@ func TestAdmitLimitedResourceWithQuotaThatDoesNotCover(t *testing.T) {
|
||||
t.Fatalf("Expected an error since the quota did not cover cpu")
|
||||
}
|
||||
}
|
||||
|
||||
// TestAdmitLimitedScopeWithQuota verifies if a limited scope is configured the quota must cover the resource.
|
||||
func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
testPod *api.Pod
|
||||
quota *api.ResourceQuota
|
||||
anotherQuota *api.ResourceQuota
|
||||
config *resourcequotaapi.Configuration
|
||||
expErr string
|
||||
}{
|
||||
{
|
||||
description: "Covering quota exists for configured limited scope PriorityClassNameExists.",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpExists},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
description: "configured limited scope PriorityClassNameExists and limited cpu resource. No covering quota for cpu and pod admit fails.",
|
||||
testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpExists},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
MatchContains: []string{"requests.cpu"}, // match on "requests.cpu" only
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to consume: requests.cpu",
|
||||
},
|
||||
{
|
||||
description: "Covering quota does not exist for configured limited scope PriorityClassNameExists.",
|
||||
testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{PriorityClass Exists []}]",
|
||||
},
|
||||
{
|
||||
description: "Covering quota does not exist for configured limited scope resourceQuotaBestEffort",
|
||||
testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{BestEffort Exists []}]",
|
||||
},
|
||||
{
|
||||
description: "Covering quota exist for configured limited scope resourceQuotaBestEffort",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Neither matches pod. Pod allowed",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Only BestEffort scope matches pod. Pod admit fails because covering quota is missing for BestEffort scope",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{BestEffort Exists []}]",
|
||||
},
|
||||
{
|
||||
description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Only PriorityClass scope matches pod. Pod admit fails because covering quota is missing for PriorityClass scope",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{PriorityClass In [cluster-services]}]",
|
||||
},
|
||||
{
|
||||
description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Pod admit fails because covering quota is missing for PriorityClass scope and BestEffort scope",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{BestEffort Exists []} {PriorityClass In [cluster-services]}]",
|
||||
},
|
||||
{
|
||||
description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Quota available only for BestEffort scope. Pod admit fails because covering quota is missing for PriorityClass scope",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{PriorityClass In [cluster-services]}]",
|
||||
},
|
||||
{
|
||||
description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Quota available only for PriorityClass scope. Pod admit fails because covering quota is missing for BestEffort scope",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{BestEffort Exists []}]",
|
||||
},
|
||||
{
|
||||
description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Quota available only for both the scopes. Pod admit success. No Error",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("3"),
|
||||
},
|
||||
},
|
||||
},
|
||||
anotherQuota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopeBestEffort,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
description: "Pod allowed with priorityclass if limited scope PriorityClassNameExists not configured.",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{},
|
||||
config: &resourcequotaapi.Configuration{},
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
description: "quota fails, though covering quota for configured limited scope PriorityClassNameExists exists.",
|
||||
testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "20Gi"), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpExists},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceMemory: resource.MustParse("10Gi"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "forbidden: exceeded quota: quota, requested: memory=20Gi, used: memory=1Gi, limited: memory=10Gi",
|
||||
},
|
||||
{
|
||||
description: "Pod has different priorityclass than configured limited. Covering quota exists for configured limited scope PriorityClassIn.",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
description: "Pod has limited priorityclass. Covering quota exists for configured limited scope PriorityClassIn.",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"another-priorityclass-name", "cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
description: "Pod has limited priorityclass. Covering quota does not exist for configured limited scope PriorityClassIn.",
|
||||
testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"another-priorityclass-name"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"another-priorityclass-name", "cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{PriorityClass In [another-priorityclass-name cluster-services]}]",
|
||||
},
|
||||
{
|
||||
description: "From the above test case, just changing pod priority from cluster-services to another-priorityclass-name. expecting no error",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "another-priorityclass-name"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"another-priorityclass-name"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"another-priorityclass-name", "cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "",
|
||||
},
|
||||
{
|
||||
description: "Pod has limited priorityclass. Covering quota does NOT exists for configured limited scope PriorityClassIn.",
|
||||
testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"another-priorityclass-name", "cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "insufficient quota to match these scopes: [{PriorityClass In [another-priorityclass-name cluster-services]}]",
|
||||
},
|
||||
{
|
||||
description: "Pod has limited priorityclass. Covering quota exists for configured limited scope PriorityClassIn through PriorityClassNameExists",
|
||||
testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"),
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
ScopeSelector: &api.ScopeSelector{
|
||||
MatchExpressions: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpExists},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
config: &resourcequotaapi.Configuration{
|
||||
LimitedResources: []resourcequotaapi.LimitedResource{
|
||||
{
|
||||
Resource: "pods",
|
||||
MatchScopes: []api.ScopedResourceSelectorRequirement{
|
||||
{
|
||||
ScopeName: api.ResourceQuotaScopePriorityClass,
|
||||
Operator: api.ScopeSelectorOpIn,
|
||||
Values: []string{"another-priorityclass-name", "cluster-services"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expErr: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
newPod := testCase.testPod
|
||||
config := testCase.config
|
||||
resourceQuota := testCase.quota
|
||||
kubeClient := fake.NewSimpleClientset(resourceQuota)
|
||||
if testCase.anotherQuota != nil {
|
||||
kubeClient = fake.NewSimpleClientset(resourceQuota, testCase.anotherQuota)
|
||||
}
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc())
|
||||
quotaAccessor, _ := newQuotaAccessor()
|
||||
quotaAccessor.client = kubeClient
|
||||
quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister()
|
||||
|
||||
quotaConfiguration := install.NewQuotaConfigurationForAdmission()
|
||||
evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh)
|
||||
|
||||
handler := &QuotaAdmission{
|
||||
Handler: admission.NewHandler(admission.Create, admission.Update),
|
||||
evaluator: evaluator,
|
||||
}
|
||||
indexer.Add(resourceQuota)
|
||||
if testCase.anotherQuota != nil {
|
||||
indexer.Add(testCase.anotherQuota)
|
||||
}
|
||||
err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
|
||||
if testCase.expErr == "" {
|
||||
if err != nil {
|
||||
t.Fatalf("Testcase, %v, failed with unexpected error: %v. ExpErr: %v", testCase.description, err, testCase.expErr)
|
||||
}
|
||||
} else {
|
||||
if !strings.Contains(fmt.Sprintf("%v", err), testCase.expErr) {
|
||||
t.Fatalf("Testcase, %v, failed with unexpected error: %v. ExpErr: %v", testCase.description, err, testCase.expErr)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/BUILD
generated
vendored
@ -15,6 +15,7 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
|
@ -12,9 +12,8 @@ go_library(
|
||||
deps = [
|
||||
"//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library",
|
||||
"//plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apimachinery/registered:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -19,25 +19,15 @@ limitations under the License.
|
||||
package install
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/apimachinery/announced"
|
||||
"k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
resourcequotaapi "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota"
|
||||
resourcequotav1alpha1 "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1"
|
||||
)
|
||||
|
||||
// Install registers the API group and adds types to a scheme
|
||||
func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) {
|
||||
if err := announced.NewGroupMetaFactory(
|
||||
&announced.GroupMetaFactoryArgs{
|
||||
GroupName: resourcequotaapi.GroupName,
|
||||
VersionPreferenceOrder: []string{resourcequotav1alpha1.SchemeGroupVersion.Version},
|
||||
AddInternalObjectsToScheme: resourcequotaapi.AddToScheme,
|
||||
},
|
||||
announced.VersionToSchemeFunc{
|
||||
resourcequotav1alpha1.SchemeGroupVersion.Version: resourcequotav1alpha1.AddToScheme,
|
||||
},
|
||||
).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
func Install(scheme *runtime.Scheme) {
|
||||
utilruntime.Must(resourcequotaapi.AddToScheme(scheme))
|
||||
utilruntime.Must(resourcequotav1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(scheme.SetVersionPriority(resourcequotav1alpha1.SchemeGroupVersion))
|
||||
}
|
||||
|
17
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/types.go
generated
vendored
17
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/types.go
generated
vendored
@ -16,7 +16,10 @@ limitations under the License.
|
||||
|
||||
package resourcequota
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
@ -54,4 +57,16 @@ type LimitedResource struct {
|
||||
// with any storage class, the list would include
|
||||
// ".storageclass.storage.k8s.io/requests.storage"
|
||||
MatchContains []string
|
||||
|
||||
// For each intercepted request, the quota system will figure out if the input object
|
||||
// satisfies a scope which is present in this listing, then
|
||||
// quota system will ensure that there is a covering quota. In the
|
||||
// absence of a covering quota, the quota system will deny the request.
|
||||
// For example, if an administrator wants to globally enforce that
|
||||
// a quota must exist to create a pod with "cluster-services" priorityclass
|
||||
// the list would include
|
||||
// "PriorityClassNameIn=cluster-services"
|
||||
// +optional
|
||||
// MatchScopes []string `json:"matchScopes,omitempty"`
|
||||
MatchScopes []core.ScopedResourceSelectorRequirement `json:"matchScopes,omitempty"`
|
||||
}
|
||||
|
@ -18,7 +18,9 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
|
@ -16,7 +16,10 @@ limitations under the License.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
@ -54,4 +57,13 @@ type LimitedResource struct {
|
||||
// with any storage class, the list would include
|
||||
// ".storageclass.storage.k8s.io/requests.storage"
|
||||
MatchContains []string `json:"matchContains,omitempty"`
|
||||
// For each intercepted request, the quota system will figure out if the input object
|
||||
// satisfies a scope which is present in this listing, then
|
||||
// quota system will ensure that there is a covering quota. In the
|
||||
// absence of a covering quota, the quota system will deny the request.
|
||||
// For example, if an administrator wants to globally enforce that
|
||||
// a quota must exist to create a pod with "cluster-services" priorityclass
|
||||
// the list would include "scopeName=PriorityClass, Operator=In, Value=cluster-services"
|
||||
// +optional
|
||||
MatchScopes []v1.ScopedResourceSelectorRequirement `json:"matchScopes,omitempty"`
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -23,8 +23,10 @@ package v1alpha1
|
||||
import (
|
||||
unsafe "unsafe"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
core "k8s.io/kubernetes/pkg/apis/core"
|
||||
resourcequota "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota"
|
||||
)
|
||||
|
||||
@ -67,6 +69,7 @@ func autoConvert_v1alpha1_LimitedResource_To_resourcequota_LimitedResource(in *L
|
||||
out.APIGroup = in.APIGroup
|
||||
out.Resource = in.Resource
|
||||
out.MatchContains = *(*[]string)(unsafe.Pointer(&in.MatchContains))
|
||||
out.MatchScopes = *(*[]core.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchScopes))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -79,6 +82,7 @@ func autoConvert_resourcequota_LimitedResource_To_v1alpha1_LimitedResource(in *r
|
||||
out.APIGroup = in.APIGroup
|
||||
out.Resource = in.Resource
|
||||
out.MatchContains = *(*[]string)(unsafe.Pointer(&in.MatchContains))
|
||||
out.MatchScopes = *(*[]v1.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchScopes))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -21,6 +21,7 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@ -64,6 +65,13 @@ func (in *LimitedResource) DeepCopyInto(out *LimitedResource) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.MatchScopes != nil {
|
||||
in, out := &in.MatchScopes, &out.MatchScopes
|
||||
*out = make([]v1.ScopedResourceSelectorRequirement, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -1,7 +1,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -22,6 +22,7 @@ package resourcequota
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
core "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
@ -64,6 +65,13 @@ func (in *LimitedResource) DeepCopyInto(out *LimitedResource) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.MatchScopes != nil {
|
||||
in, out := &in.MatchScopes, &out.MatchScopes
|
||||
*out = make([]core.ScopedResourceSelectorRequirement, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
11
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/config.go
generated
vendored
11
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/config.go
generated
vendored
@ -20,10 +20,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apimachinery/announced"
|
||||
"k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
resourcequotaapi "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota"
|
||||
@ -32,14 +29,12 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
groupFactoryRegistry = make(announced.APIGroupFactoryRegistry)
|
||||
registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS"))
|
||||
scheme = runtime.NewScheme()
|
||||
codecs = serializer.NewCodecFactory(scheme)
|
||||
scheme = runtime.NewScheme()
|
||||
codecs = serializer.NewCodecFactory(scheme)
|
||||
)
|
||||
|
||||
func init() {
|
||||
install.Install(groupFactoryRegistry, registry, scheme)
|
||||
install.Install(scheme)
|
||||
}
|
||||
|
||||
// LoadConfiguration loads the provided configuration.
|
||||
|
82
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/controller.go
generated
vendored
82
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/controller.go
generated
vendored
@ -25,7 +25,9 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -366,6 +368,21 @@ func limitedByDefault(usage api.ResourceList, limitedResources []resourcequotaap
|
||||
return result
|
||||
}
|
||||
|
||||
func getMatchedLimitedScopes(evaluator quota.Evaluator, inputObject runtime.Object, limitedResources []resourcequotaapi.LimitedResource) ([]api.ScopedResourceSelectorRequirement, error) {
|
||||
scopes := []api.ScopedResourceSelectorRequirement{}
|
||||
for _, limitedResource := range limitedResources {
|
||||
matched, err := evaluator.MatchingScopes(inputObject, limitedResource.MatchScopes)
|
||||
if err != nil {
|
||||
glog.Errorf("Error while matching limited Scopes: %v", err)
|
||||
return []api.ScopedResourceSelectorRequirement{}, err
|
||||
}
|
||||
for _, scope := range matched {
|
||||
scopes = append(scopes, scope)
|
||||
}
|
||||
}
|
||||
return scopes, nil
|
||||
}
|
||||
|
||||
// checkRequest verifies that the request does not exceed any quota constraint. it returns a copy of quotas not yet persisted
|
||||
// that capture what the usage would be if the request succeeded. It return an error if there is insufficient quota to satisfy the request
|
||||
func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.Attributes) ([]api.ResourceQuota, error) {
|
||||
@ -382,6 +399,12 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At
|
||||
// if we have limited resources enabled for this resource, always calculate usage
|
||||
inputObject := a.GetObject()
|
||||
|
||||
// Check if object matches AdmissionConfiguration matchScopes
|
||||
limitedScopes, err := getMatchedLimitedScopes(evaluator, inputObject, e.config.LimitedResources)
|
||||
if err != nil {
|
||||
return quotas, nil
|
||||
}
|
||||
|
||||
// determine the set of resource names that must exist in a covering quota
|
||||
limitedResourceNames := []api.ResourceName{}
|
||||
limitedResources := filterLimitedResourcesByGroupResource(e.config.LimitedResources, a.GetResource().GroupResource())
|
||||
@ -403,10 +426,21 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At
|
||||
// this is needed to know if we have satisfied any constraints where consumption
|
||||
// was limited by default.
|
||||
restrictedResourcesSet := sets.String{}
|
||||
restrictedScopes := []api.ScopedResourceSelectorRequirement{}
|
||||
for i := range quotas {
|
||||
resourceQuota := quotas[i]
|
||||
scopeSelectors := getScopeSelectorsFromQuota(resourceQuota)
|
||||
localRestrictedScopes, err := evaluator.MatchingScopes(inputObject, scopeSelectors)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error matching scopes of quota %s, err: %v", resourceQuota.Name, err)
|
||||
}
|
||||
for _, scope := range localRestrictedScopes {
|
||||
restrictedScopes = append(restrictedScopes, scope)
|
||||
}
|
||||
|
||||
match, err := evaluator.Matches(&resourceQuota, inputObject)
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred while matching resource quota, %v, against input object. Err: %v", resourceQuota, err)
|
||||
return quotas, err
|
||||
}
|
||||
if !match {
|
||||
@ -431,7 +465,18 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At
|
||||
// if not, we reject the request.
|
||||
hasNoCoveringQuota := limitedResourceNamesSet.Difference(restrictedResourcesSet)
|
||||
if len(hasNoCoveringQuota) > 0 {
|
||||
return quotas, fmt.Errorf("insufficient quota to consume: %v", strings.Join(hasNoCoveringQuota.List(), ","))
|
||||
return quotas, admission.NewForbidden(a, fmt.Errorf("insufficient quota to consume: %v", strings.Join(hasNoCoveringQuota.List(), ",")))
|
||||
}
|
||||
|
||||
// verify that for every scope that had limited access enabled
|
||||
// that there was a corresponding quota that covered it.
|
||||
// if not, we reject the request.
|
||||
scopesHasNoCoveringQuota, err := evaluator.UncoveredQuotaScopes(limitedScopes, restrictedScopes)
|
||||
if err != nil {
|
||||
return quotas, err
|
||||
}
|
||||
if len(scopesHasNoCoveringQuota) > 0 {
|
||||
return quotas, fmt.Errorf("insufficient quota to match these scopes: %v", scopesHasNoCoveringQuota)
|
||||
}
|
||||
|
||||
if len(interestingQuotaIndexes) == 0 {
|
||||
@ -515,6 +560,21 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At
|
||||
return outQuotas, nil
|
||||
}
|
||||
|
||||
func getScopeSelectorsFromQuota(quota api.ResourceQuota) []api.ScopedResourceSelectorRequirement {
|
||||
selectors := []api.ScopedResourceSelectorRequirement{}
|
||||
for _, scope := range quota.Spec.Scopes {
|
||||
selectors = append(selectors, api.ScopedResourceSelectorRequirement{
|
||||
ScopeName: scope,
|
||||
Operator: api.ScopeSelectorOpExists})
|
||||
}
|
||||
if quota.Spec.ScopeSelector != nil {
|
||||
for _, scopeSelector := range quota.Spec.ScopeSelector.MatchExpressions {
|
||||
selectors = append(selectors, scopeSelector)
|
||||
}
|
||||
}
|
||||
return selectors
|
||||
}
|
||||
|
||||
func (e *quotaEvaluator) Evaluate(a admission.Attributes) error {
|
||||
e.init.Do(func() {
|
||||
go e.run()
|
||||
@ -531,7 +591,7 @@ func (e *quotaEvaluator) Evaluate(a admission.Attributes) error {
|
||||
evaluator := e.registry.Get(gr)
|
||||
if evaluator == nil {
|
||||
// create an object count evaluator if no evaluator previously registered
|
||||
// note, we do not need aggregate usage here, so we pass a nil infomer func
|
||||
// note, we do not need aggregate usage here, so we pass a nil informer func
|
||||
evaluator = generic.NewObjectCountEvaluator(false, gr, nil, "")
|
||||
e.registry.Add(evaluator)
|
||||
glog.Infof("quota admission added evaluator for: %s", gr)
|
||||
@ -549,7 +609,7 @@ func (e *quotaEvaluator) Evaluate(a admission.Attributes) error {
|
||||
select {
|
||||
case <-waiter.finished:
|
||||
case <-time.After(10 * time.Second):
|
||||
return fmt.Errorf("timeout")
|
||||
return apierrors.NewInternalError(fmt.Errorf("resource quota evaluates timeout"))
|
||||
}
|
||||
|
||||
return waiter.result
|
||||
@ -582,6 +642,11 @@ func (e *quotaEvaluator) completeWork(ns string) {
|
||||
e.inProgress.Delete(ns)
|
||||
}
|
||||
|
||||
// getWork returns a namespace, a list of work items in that
|
||||
// namespace, and a shutdown boolean. If not shutdown then the return
|
||||
// must eventually be followed by a call on completeWork for the
|
||||
// returned namespace (regardless of whether the work item list is
|
||||
// empty).
|
||||
func (e *quotaEvaluator) getWork() (string, []*admissionWaiter, bool) {
|
||||
uncastNS, shutdown := e.queue.Get()
|
||||
if shutdown {
|
||||
@ -598,15 +663,8 @@ func (e *quotaEvaluator) getWork() (string, []*admissionWaiter, bool) {
|
||||
work := e.work[ns]
|
||||
delete(e.work, ns)
|
||||
delete(e.dirtyWork, ns)
|
||||
|
||||
if len(work) != 0 {
|
||||
e.inProgress.Insert(ns)
|
||||
return ns, work, false
|
||||
}
|
||||
|
||||
e.queue.Done(ns)
|
||||
e.inProgress.Delete(ns)
|
||||
return ns, []*admissionWaiter{}, false
|
||||
e.inProgress.Insert(ns)
|
||||
return ns, work, false
|
||||
}
|
||||
|
||||
// prettyPrint formats a resource list for usage in errors
|
||||
|
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/BUILD
generated
vendored
@ -15,7 +15,7 @@ go_library(
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/apis/policy:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/client/listers/extensions/internalversion:go_default_library",
|
||||
"//pkg/client/listers/policy/internalversion:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//pkg/registry/rbac:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy:go_default_library",
|
||||
@ -39,8 +39,6 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/apis/policy:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
@ -48,6 +46,7 @@ go_test(
|
||||
"//pkg/security/podsecuritypolicy:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/util:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
|
19
vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/admission.go
generated
vendored
19
vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/admission.go
generated
vendored
@ -35,7 +35,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/internalversion"
|
||||
policylisters "k8s.io/kubernetes/pkg/client/listers/policy/internalversion"
|
||||
kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
rbacregistry "k8s.io/kubernetes/pkg/registry/rbac"
|
||||
psp "k8s.io/kubernetes/pkg/security/podsecuritypolicy"
|
||||
@ -61,7 +61,7 @@ type PodSecurityPolicyPlugin struct {
|
||||
strategyFactory psp.StrategyFactory
|
||||
failOnNoPolicies bool
|
||||
authz authorizer.Authorizer
|
||||
lister extensionslisters.PodSecurityPolicyLister
|
||||
lister policylisters.PodSecurityPolicyLister
|
||||
}
|
||||
|
||||
// SetAuthorizer sets the authorizer.
|
||||
@ -84,6 +84,7 @@ var _ admission.MutationInterface = &PodSecurityPolicyPlugin{}
|
||||
var _ admission.ValidationInterface = &PodSecurityPolicyPlugin{}
|
||||
var _ genericadmissioninit.WantsAuthorizer = &PodSecurityPolicyPlugin{}
|
||||
var _ kubeapiserveradmission.WantsInternalKubeInformerFactory = &PodSecurityPolicyPlugin{}
|
||||
var auditKeyPrefix = strings.ToLower(PluginName) + "." + policy.GroupName + ".k8s.io"
|
||||
|
||||
// newPlugin creates a new PSP admission plugin.
|
||||
func newPlugin(strategyFactory psp.StrategyFactory, failOnNoPolicies bool) *PodSecurityPolicyPlugin {
|
||||
@ -95,7 +96,7 @@ func newPlugin(strategyFactory psp.StrategyFactory, failOnNoPolicies bool) *PodS
|
||||
}
|
||||
|
||||
func (a *PodSecurityPolicyPlugin) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) {
|
||||
podSecurityPolicyInformer := f.Extensions().InternalVersion().PodSecurityPolicies()
|
||||
podSecurityPolicyInformer := f.Policy().InternalVersion().PodSecurityPolicies()
|
||||
a.lister = podSecurityPolicyInformer.Lister()
|
||||
a.SetReadyFunc(podSecurityPolicyInformer.Informer().HasSynced)
|
||||
}
|
||||
@ -136,6 +137,10 @@ func (c *PodSecurityPolicyPlugin) Admit(a admission.Attributes) error {
|
||||
pod.ObjectMeta.Annotations = map[string]string{}
|
||||
}
|
||||
pod.ObjectMeta.Annotations[psputil.ValidatedPSPAnnotation] = pspName
|
||||
key := auditKeyPrefix + "/" + "admit-policy"
|
||||
if err := a.AddAnnotation(key, pspName); err != nil {
|
||||
glog.Warningf("failed to set admission audit annotation %s to %s: %v", key, pspName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -154,11 +159,15 @@ func (c *PodSecurityPolicyPlugin) Validate(a admission.Attributes) error {
|
||||
pod := a.GetObject().(*api.Pod)
|
||||
|
||||
// compute the context. Mutation is not allowed. ValidatedPSPAnnotation is used as a hint to gain same speed-up.
|
||||
allowedPod, _, validationErrs, err := c.computeSecurityContext(a, pod, false, pod.ObjectMeta.Annotations[psputil.ValidatedPSPAnnotation])
|
||||
allowedPod, pspName, validationErrs, err := c.computeSecurityContext(a, pod, false, pod.ObjectMeta.Annotations[psputil.ValidatedPSPAnnotation])
|
||||
if err != nil {
|
||||
return admission.NewForbidden(a, err)
|
||||
}
|
||||
if apiequality.Semantic.DeepEqual(pod, allowedPod) {
|
||||
key := auditKeyPrefix + "/" + "validate-policy"
|
||||
if err := a.AddAnnotation(key, pspName); err != nil {
|
||||
glog.Warningf("failed to set admission audit annotation %s to %s: %v", key, pspName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -329,7 +338,7 @@ func assignSecurityContext(provider psp.Provider, pod *api.Pod, fldPath *field.P
|
||||
}
|
||||
|
||||
// createProvidersFromPolicies creates providers from the constraints supplied.
|
||||
func (c *PodSecurityPolicyPlugin) createProvidersFromPolicies(psps []*extensions.PodSecurityPolicy, namespace string) ([]psp.Provider, []error) {
|
||||
func (c *PodSecurityPolicyPlugin) createProvidersFromPolicies(psps []*policy.PodSecurityPolicy, namespace string) ([]psp.Provider, []error) {
|
||||
var (
|
||||
// collected providers
|
||||
providers []psp.Provider
|
||||
|
666
vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go
generated
vendored
666
vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/admission_test.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/OWNERS
generated
vendored
@ -1,7 +1,9 @@
|
||||
approvers:
|
||||
- liggitt
|
||||
- deads2k
|
||||
- mikedanese
|
||||
reviewers:
|
||||
- liggitt
|
||||
- deads2k
|
||||
- mikedanese
|
||||
- enj
|
||||
|
9
vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission.go
generated
vendored
9
vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission.go
generated
vendored
@ -208,6 +208,15 @@ func (s *serviceAccount) Validate(a admission.Attributes) (err error) {
|
||||
if hasSecrets {
|
||||
return admission.NewForbidden(a, fmt.Errorf("a mirror pod may not reference secrets"))
|
||||
}
|
||||
for _, v := range pod.Spec.Volumes {
|
||||
if proj := v.Projected; proj != nil {
|
||||
for _, projSource := range proj.Sources {
|
||||
if projSource.ServiceAccountToken != nil {
|
||||
return admission.NewForbidden(a, fmt.Errorf("a mirror pod may not use ServiceAccountToken volume projections"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
25
vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission_test.go
generated
vendored
25
vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission_test.go
generated
vendored
@ -138,6 +138,31 @@ func TestRejectsMirrorPodWithSecretVolumes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRejectsMirrorPodWithServiceAccountTokenVolumeProjections(t *testing.T) {
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
kubelet.ConfigMirrorAnnotationKey: "true",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{
|
||||
{VolumeSource: api.VolumeSource{
|
||||
Projected: &api.ProjectedVolumeSource{
|
||||
Sources: []api.VolumeProjection{{ServiceAccountToken: &api.ServiceAccountTokenProjection{}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
attrs := admission.NewAttributesRecord(pod, nil, api.Kind("Pod").WithVersion("version"), "myns", "myname", api.Resource("pods").WithVersion("version"), "", admission.Create, nil)
|
||||
err := NewServiceAccount().Admit(attrs)
|
||||
if err == nil {
|
||||
t.Errorf("Expected a mirror pod to be prevented from referencing a ServiceAccountToken volume projection")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAssignsDefaultServiceAccountAndToleratesMissingAPIToken(t *testing.T) {
|
||||
ns := "myns"
|
||||
|
||||
|
@ -12,7 +12,7 @@ go_library(
|
||||
"admission.go",
|
||||
"doc.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label",
|
||||
importpath = "k8s.io/kubernetes/plugin/pkg/admission/storage/persistentvolume/label",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
@ -33,12 +33,15 @@ import (
|
||||
vol "k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
const PluginName = "PersistentVolumeLabel"
|
||||
const (
|
||||
// PluginName is the name of persistent volume label admission plugin
|
||||
PluginName = "PersistentVolumeLabel"
|
||||
)
|
||||
|
||||
// Register registers a plugin
|
||||
func Register(plugins *admission.Plugins) {
|
||||
plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) {
|
||||
persistentVolumeLabelAdmission := NewPersistentVolumeLabel()
|
||||
persistentVolumeLabelAdmission := newPersistentVolumeLabel()
|
||||
return persistentVolumeLabelAdmission, nil
|
||||
})
|
||||
}
|
||||
@ -57,11 +60,11 @@ type persistentVolumeLabel struct {
|
||||
var _ admission.MutationInterface = &persistentVolumeLabel{}
|
||||
var _ kubeapiserveradmission.WantsCloudConfig = &persistentVolumeLabel{}
|
||||
|
||||
// NewPersistentVolumeLabel returns an admission.Interface implementation which adds labels to PersistentVolume CREATE requests,
|
||||
// newPersistentVolumeLabel returns an admission.Interface implementation which adds labels to PersistentVolume CREATE requests,
|
||||
// based on the labels provided by the underlying cloud provider.
|
||||
//
|
||||
// As a side effect, the cloud provider may block invalid or non-existent volumes.
|
||||
func NewPersistentVolumeLabel() *persistentVolumeLabel {
|
||||
func newPersistentVolumeLabel() *persistentVolumeLabel {
|
||||
// DEPRECATED: cloud-controller-manager will now start NewPersistentVolumeLabelController
|
||||
// which does exactly what this admission controller used to do. So once GCE and AWS can
|
||||
// run externally, we can remove this admission controller.
|
@ -36,7 +36,7 @@ type mockVolumes struct {
|
||||
|
||||
var _ aws.Volumes = &mockVolumes{}
|
||||
|
||||
func (v *mockVolumes) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName, readOnly bool) (string, error) {
|
||||
func (v *mockVolumes) AttachDisk(diskName aws.KubernetesVolumeID, nodeName types.NodeName) (string, error) {
|
||||
return "", fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
@ -56,19 +56,19 @@ func (v *mockVolumes) GetVolumeLabels(volumeName aws.KubernetesVolumeID) (map[st
|
||||
return v.volumeLabels, v.volumeLabelsError
|
||||
}
|
||||
|
||||
func (c *mockVolumes) GetDiskPath(volumeName aws.KubernetesVolumeID) (string, error) {
|
||||
func (v *mockVolumes) GetDiskPath(volumeName aws.KubernetesVolumeID) (string, error) {
|
||||
return "", fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (c *mockVolumes) DiskIsAttached(volumeName aws.KubernetesVolumeID, nodeName types.NodeName) (bool, error) {
|
||||
func (v *mockVolumes) DiskIsAttached(volumeName aws.KubernetesVolumeID, nodeName types.NodeName) (bool, error) {
|
||||
return false, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (c *mockVolumes) DisksAreAttached(nodeDisks map[types.NodeName][]aws.KubernetesVolumeID) (map[types.NodeName]map[aws.KubernetesVolumeID]bool, error) {
|
||||
func (v *mockVolumes) DisksAreAttached(nodeDisks map[types.NodeName][]aws.KubernetesVolumeID) (map[types.NodeName]map[aws.KubernetesVolumeID]bool, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (c *mockVolumes) ResizeDisk(
|
||||
func (v *mockVolumes) ResizeDisk(
|
||||
diskName aws.KubernetesVolumeID,
|
||||
oldSize resource.Quantity,
|
||||
newSize resource.Quantity) (resource.Quantity, error) {
|
||||
@ -85,7 +85,7 @@ func mockVolumeLabels(labels map[string]string) *mockVolumes {
|
||||
|
||||
// TestAdmission
|
||||
func TestAdmission(t *testing.T) {
|
||||
pvHandler := NewPersistentVolumeLabel()
|
||||
pvHandler := newPersistentVolumeLabel()
|
||||
handler := admission.NewChainHandler(pvHandler)
|
||||
ignoredPV := api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "noncloud", Namespace: "myns"},
|
@ -14,6 +14,6 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// labels created persistent volumes with zone information
|
||||
// Package label created persistent volumes with zone information
|
||||
// as provided by the cloud provider
|
||||
package label // import "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label"
|
||||
package label // import "k8s.io/kubernetes/plugin/pkg/admission/storage/persistentvolume/label"
|
@ -26,7 +26,7 @@ go_test(
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["admission.go"],
|
||||
importpath = "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/resize",
|
||||
importpath = "k8s.io/kubernetes/plugin/pkg/admission/storage/persistentvolume/resize",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/helper:go_default_library",
|
@ -149,7 +149,7 @@ func (pvcr *persistentVolumeClaimResize) allowResize(pvc, oldPvc *api.Persistent
|
||||
|
||||
// checkVolumePlugin checks whether the volume plugin supports resize
|
||||
func (pvcr *persistentVolumeClaimResize) checkVolumePlugin(pv *api.PersistentVolume) bool {
|
||||
if pv.Spec.Glusterfs != nil || pv.Spec.Cinder != nil || pv.Spec.RBD != nil {
|
||||
if pv.Spec.Glusterfs != nil || pv.Spec.Cinder != nil || pv.Spec.RBD != nil || pv.Spec.PortworxVolume != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
@ -164,5 +164,9 @@ func (pvcr *persistentVolumeClaimResize) checkVolumePlugin(pv *api.PersistentVol
|
||||
if pv.Spec.AzureFile != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if pv.Spec.AzureDisk != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/OWNERS
generated
vendored
@ -2,9 +2,11 @@ approvers:
|
||||
- erictune
|
||||
- liggitt
|
||||
- deads2k
|
||||
- mikedanese
|
||||
reviewers:
|
||||
- erictune
|
||||
- liggitt
|
||||
- deads2k
|
||||
- ericchiang
|
||||
- enj
|
||||
- mikedanese
|
||||
|
19
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap/bootstrap.go
generated
vendored
19
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap/bootstrap.go
generated
vendored
@ -152,13 +152,12 @@ func (t *TokenAuthenticator) AuthenticateToken(token string) (user.Info, bool, e
|
||||
|
||||
// Copied from k8s.io/client-go/tools/bootstrap/token/api
|
||||
func getSecretString(secret *api.Secret, key string) string {
|
||||
if secret.Data == nil {
|
||||
data, ok := secret.Data[key]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
if val, ok := secret.Data[key]; ok {
|
||||
return string(val)
|
||||
}
|
||||
return ""
|
||||
|
||||
return string(data)
|
||||
}
|
||||
|
||||
// Copied from k8s.io/client-go/tools/bootstrap/token/api
|
||||
@ -167,11 +166,13 @@ func isSecretExpired(secret *api.Secret) bool {
|
||||
if len(expiration) > 0 {
|
||||
expTime, err2 := time.Parse(time.RFC3339, expiration)
|
||||
if err2 != nil {
|
||||
tokenErrorf(secret, "has unparsable expiration time (%s). Treating as expired.", expiration)
|
||||
glog.V(3).Infof("Unparseable expiration time (%s) in %s/%s Secret: %v. Treating as expired.",
|
||||
expiration, secret.Namespace, secret.Name, err2)
|
||||
return true
|
||||
}
|
||||
if time.Now().After(expTime) {
|
||||
tokenErrorf(secret, "has expired.", expiration)
|
||||
glog.V(3).Infof("Expired bootstrap token in %s/%s Secret: %v",
|
||||
secret.Namespace, secret.Name, expiration)
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -181,8 +182,10 @@ func isSecretExpired(secret *api.Secret) bool {
|
||||
// Copied from kubernetes/cmd/kubeadm/app/util/token
|
||||
|
||||
var (
|
||||
// tokenRegexpString defines id.secret regular expression pattern
|
||||
tokenRegexpString = "^([a-z0-9]{6})\\.([a-z0-9]{16})$"
|
||||
tokenRegexp = regexp.MustCompile(tokenRegexpString)
|
||||
// tokenRegexp is a compiled regular expression of TokenRegexpString
|
||||
tokenRegexp = regexp.MustCompile(tokenRegexpString)
|
||||
)
|
||||
|
||||
// parseToken tries and parse a valid token from a string.
|
||||
|
11
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/BUILD
generated
vendored
11
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/BUILD
generated
vendored
@ -8,15 +8,21 @@ load(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["node_authorizer_test.go"],
|
||||
srcs = [
|
||||
"graph_test.go",
|
||||
"intset_test.go",
|
||||
"node_authorizer_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/auth/nodeidentifier:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//plugin/pkg/auth/authorizer/rbac/bootstrappolicy:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
@ -28,6 +34,7 @@ go_library(
|
||||
srcs = [
|
||||
"graph.go",
|
||||
"graph_populator.go",
|
||||
"intset.go",
|
||||
"node_authorizer.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/node",
|
||||
@ -35,7 +42,6 @@ go_library(
|
||||
"//pkg/api/persistentvolume:go_default_library",
|
||||
"//pkg/api/pod:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/rbac:go_default_library",
|
||||
"//pkg/apis/storage:go_default_library",
|
||||
"//pkg/auth/nodeidentifier:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion/core/internalversion:go_default_library",
|
||||
@ -45,6 +51,7 @@ go_library(
|
||||
"//third_party/forked/gonum/graph/simple:go_default_library",
|
||||
"//third_party/forked/gonum/graph/traverse:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
|
158
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph.go
generated
vendored
158
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph.go
generated
vendored
@ -80,6 +80,11 @@ type Graph struct {
|
||||
graph *simple.DirectedAcyclicGraph
|
||||
// vertices is a map of type -> namespace -> name -> vertex
|
||||
vertices map[vertexType]namespaceVertexMapping
|
||||
|
||||
// destinationEdgeIndex is a map of vertex -> set of destination IDs
|
||||
destinationEdgeIndex map[int]*intSet
|
||||
// destinationEdgeThreshold is the minimum number of distinct destination IDs at which to maintain an index
|
||||
destinationEdgeThreshold int
|
||||
}
|
||||
|
||||
// namespaceVertexMapping is a map of namespace -> name -> vertex
|
||||
@ -92,6 +97,11 @@ func NewGraph() *Graph {
|
||||
return &Graph{
|
||||
vertices: map[vertexType]namespaceVertexMapping{},
|
||||
graph: simple.NewDirectedAcyclicGraph(0, 0),
|
||||
|
||||
destinationEdgeIndex: map[int]*intSet{},
|
||||
// experimentally determined to be the point at which iteration adds an order of magnitude to the authz check.
|
||||
// since maintaining indexes costs time/memory while processing graph changes, we don't want to make this too low.
|
||||
destinationEdgeThreshold: 200,
|
||||
}
|
||||
}
|
||||
|
||||
@ -165,6 +175,7 @@ func (g *Graph) deleteVertex_locked(vertexType vertexType, namespace, name strin
|
||||
|
||||
// find existing neighbors with a single edge (meaning we are their only neighbor)
|
||||
neighborsToRemove := []graph.Node{}
|
||||
neighborsToRecompute := []graph.Node{}
|
||||
g.graph.VisitFrom(vertex, func(neighbor graph.Node) bool {
|
||||
// this downstream neighbor has only one edge (which must be from us), so remove them as well
|
||||
if g.graph.Degree(neighbor) == 1 {
|
||||
@ -173,28 +184,115 @@ func (g *Graph) deleteVertex_locked(vertexType vertexType, namespace, name strin
|
||||
return true
|
||||
})
|
||||
g.graph.VisitTo(vertex, func(neighbor graph.Node) bool {
|
||||
// this upstream neighbor has only one edge (which must be to us), so remove them as well
|
||||
if g.graph.Degree(neighbor) == 1 {
|
||||
// this upstream neighbor has only one edge (which must be to us), so remove them as well
|
||||
neighborsToRemove = append(neighborsToRemove, neighbor)
|
||||
} else {
|
||||
// recompute the destination edge index on this neighbor
|
||||
neighborsToRecompute = append(neighborsToRemove, neighbor)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// remove the vertex
|
||||
g.graph.RemoveNode(vertex)
|
||||
delete(g.vertices[vertexType][namespace], name)
|
||||
if len(g.vertices[vertexType][namespace]) == 0 {
|
||||
delete(g.vertices[vertexType], namespace)
|
||||
}
|
||||
g.removeVertex_locked(vertex)
|
||||
|
||||
// remove neighbors that are now edgeless
|
||||
for _, neighbor := range neighborsToRemove {
|
||||
g.graph.RemoveNode(neighbor)
|
||||
n := neighbor.(*namedVertex)
|
||||
delete(g.vertices[n.vertexType][n.namespace], n.name)
|
||||
if len(g.vertices[n.vertexType][n.namespace]) == 0 {
|
||||
delete(g.vertices[n.vertexType], n.namespace)
|
||||
g.removeVertex_locked(neighbor.(*namedVertex))
|
||||
}
|
||||
|
||||
// recompute destination indexes for neighbors that dropped outbound edges
|
||||
for _, neighbor := range neighborsToRecompute {
|
||||
g.recomputeDestinationIndex_locked(neighbor)
|
||||
}
|
||||
}
|
||||
|
||||
// must be called under write lock
|
||||
// deletes edges from a given vertex type to a specific vertex
|
||||
// will delete each orphaned "from" vertex, but will never delete the "to" vertex
|
||||
func (g *Graph) deleteEdges_locked(fromType, toType vertexType, toNamespace, toName string) {
|
||||
// get the "to" side
|
||||
toVert, exists := g.getVertex_rlocked(toType, toNamespace, toName)
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
|
||||
// delete all edges between vertices of fromType and toVert
|
||||
neighborsToRemove := []*namedVertex{}
|
||||
neighborsToRecompute := []*namedVertex{}
|
||||
g.graph.VisitTo(toVert, func(from graph.Node) bool {
|
||||
fromVert := from.(*namedVertex)
|
||||
if fromVert.vertexType != fromType {
|
||||
return true
|
||||
}
|
||||
// remove the edge
|
||||
g.graph.RemoveEdge(simple.Edge{F: fromVert, T: toVert})
|
||||
// track vertexes that changed edges
|
||||
if g.graph.Degree(fromVert) == 0 {
|
||||
neighborsToRemove = append(neighborsToRemove, fromVert)
|
||||
} else {
|
||||
neighborsToRecompute = append(neighborsToRecompute, fromVert)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// clean up orphaned verts
|
||||
for _, v := range neighborsToRemove {
|
||||
g.removeVertex_locked(v)
|
||||
}
|
||||
|
||||
// recompute destination indexes for neighbors that dropped outbound edges
|
||||
for _, v := range neighborsToRecompute {
|
||||
g.recomputeDestinationIndex_locked(v)
|
||||
}
|
||||
}
|
||||
|
||||
// must be called under write lock
|
||||
// removeVertex_locked removes the specified vertex from the graph and from the maintained indices.
|
||||
// It does nothing to indexes of neighbor vertices.
|
||||
func (g *Graph) removeVertex_locked(v *namedVertex) {
|
||||
g.graph.RemoveNode(v)
|
||||
delete(g.destinationEdgeIndex, v.ID())
|
||||
delete(g.vertices[v.vertexType][v.namespace], v.name)
|
||||
if len(g.vertices[v.vertexType][v.namespace]) == 0 {
|
||||
delete(g.vertices[v.vertexType], v.namespace)
|
||||
}
|
||||
}
|
||||
|
||||
// must be called under write lock
|
||||
// recomputeDestinationIndex_locked recomputes the index of destination ids for the specified vertex
|
||||
func (g *Graph) recomputeDestinationIndex_locked(n graph.Node) {
|
||||
// don't maintain indices for nodes with few edges
|
||||
edgeCount := g.graph.Degree(n)
|
||||
if edgeCount < g.destinationEdgeThreshold {
|
||||
delete(g.destinationEdgeIndex, n.ID())
|
||||
return
|
||||
}
|
||||
|
||||
// get or create the index
|
||||
index := g.destinationEdgeIndex[n.ID()]
|
||||
if index == nil {
|
||||
index = newIntSet()
|
||||
} else {
|
||||
index.startNewGeneration()
|
||||
}
|
||||
|
||||
// populate the index
|
||||
g.graph.VisitFrom(n, func(dest graph.Node) bool {
|
||||
if destinationEdge, ok := g.graph.EdgeBetween(n, dest).(*destinationEdge); ok {
|
||||
index.mark(destinationEdge.DestinationID())
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
// remove existing items no longer in the list
|
||||
index.sweep()
|
||||
|
||||
if len(index.members) < g.destinationEdgeThreshold {
|
||||
delete(g.destinationEdgeIndex, n.ID())
|
||||
} else {
|
||||
g.destinationEdgeIndex[n.ID()] = index
|
||||
}
|
||||
}
|
||||
|
||||
@ -221,22 +319,30 @@ func (g *Graph) AddPod(pod *api.Pod) {
|
||||
//
|
||||
// ref https://github.com/kubernetes/kubernetes/issues/58790
|
||||
if len(pod.Spec.ServiceAccountName) > 0 {
|
||||
g.graph.SetEdge(newDestinationEdge(g.getOrCreateVertex_locked(serviceAccountVertexType, pod.Namespace, pod.Spec.ServiceAccountName), podVertex, nodeVertex))
|
||||
serviceAccountVertex := g.getOrCreateVertex_locked(serviceAccountVertexType, pod.Namespace, pod.Spec.ServiceAccountName)
|
||||
g.graph.SetEdge(newDestinationEdge(serviceAccountVertex, podVertex, nodeVertex))
|
||||
g.recomputeDestinationIndex_locked(serviceAccountVertex)
|
||||
}
|
||||
|
||||
podutil.VisitPodSecretNames(pod, func(secret string) bool {
|
||||
g.graph.SetEdge(newDestinationEdge(g.getOrCreateVertex_locked(secretVertexType, pod.Namespace, secret), podVertex, nodeVertex))
|
||||
secretVertex := g.getOrCreateVertex_locked(secretVertexType, pod.Namespace, secret)
|
||||
g.graph.SetEdge(newDestinationEdge(secretVertex, podVertex, nodeVertex))
|
||||
g.recomputeDestinationIndex_locked(secretVertex)
|
||||
return true
|
||||
})
|
||||
|
||||
podutil.VisitPodConfigmapNames(pod, func(configmap string) bool {
|
||||
g.graph.SetEdge(newDestinationEdge(g.getOrCreateVertex_locked(configMapVertexType, pod.Namespace, configmap), podVertex, nodeVertex))
|
||||
configmapVertex := g.getOrCreateVertex_locked(configMapVertexType, pod.Namespace, configmap)
|
||||
g.graph.SetEdge(newDestinationEdge(configmapVertex, podVertex, nodeVertex))
|
||||
g.recomputeDestinationIndex_locked(configmapVertex)
|
||||
return true
|
||||
})
|
||||
|
||||
for _, v := range pod.Spec.Volumes {
|
||||
if v.PersistentVolumeClaim != nil {
|
||||
g.graph.SetEdge(newDestinationEdge(g.getOrCreateVertex_locked(pvcVertexType, pod.Namespace, v.PersistentVolumeClaim.ClaimName), podVertex, nodeVertex))
|
||||
pvcVertex := g.getOrCreateVertex_locked(pvcVertexType, pod.Namespace, v.PersistentVolumeClaim.ClaimName)
|
||||
g.graph.SetEdge(newDestinationEdge(pvcVertex, podVertex, nodeVertex))
|
||||
g.recomputeDestinationIndex_locked(pvcVertex)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -301,3 +407,25 @@ func (g *Graph) DeleteVolumeAttachment(name string) {
|
||||
defer g.lock.Unlock()
|
||||
g.deleteVertex_locked(vaVertexType, "", name)
|
||||
}
|
||||
|
||||
// SetNodeConfigMap sets up edges for the Node.Spec.ConfigSource.ConfigMap relationship:
|
||||
//
|
||||
// configmap -> node
|
||||
func (g *Graph) SetNodeConfigMap(nodeName, configMapName, configMapNamespace string) {
|
||||
g.lock.Lock()
|
||||
defer g.lock.Unlock()
|
||||
|
||||
// TODO(mtaufen): ensure len(nodeName) > 0 in all cases (would sure be nice to have a dependently-typed language here...)
|
||||
|
||||
// clear edges configmaps -> node where the destination is the current node *only*
|
||||
// at present, a node can only have one *direct* configmap reference at a time
|
||||
g.deleteEdges_locked(configMapVertexType, nodeVertexType, "", nodeName)
|
||||
|
||||
// establish new edges if we have a real ConfigMap to reference
|
||||
if len(configMapName) > 0 && len(configMapNamespace) > 0 {
|
||||
configmapVertex := g.getOrCreateVertex_locked(configMapVertexType, configMapNamespace, configMapName)
|
||||
nodeVertex := g.getOrCreateVertex_locked(nodeVertexType, "", nodeName)
|
||||
g.graph.SetEdge(newDestinationEdge(configmapVertex, nodeVertex, nodeVertex))
|
||||
}
|
||||
|
||||
}
|
||||
|
66
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph_populator.go
generated
vendored
66
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph_populator.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
@ -34,6 +35,7 @@ type graphPopulator struct {
|
||||
|
||||
func AddGraphEventHandlers(
|
||||
graph *Graph,
|
||||
nodes coreinformers.NodeInformer,
|
||||
pods coreinformers.PodInformer,
|
||||
pvs coreinformers.PersistentVolumeInformer,
|
||||
attachments storageinformers.VolumeAttachmentInformer,
|
||||
@ -42,6 +44,14 @@ func AddGraphEventHandlers(
|
||||
graph: graph,
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
|
||||
nodes.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: g.addNode,
|
||||
UpdateFunc: g.updateNode,
|
||||
DeleteFunc: g.deleteNode,
|
||||
})
|
||||
}
|
||||
|
||||
pods.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: g.addPod,
|
||||
UpdateFunc: g.updatePod,
|
||||
@ -63,6 +73,62 @@ func AddGraphEventHandlers(
|
||||
}
|
||||
}
|
||||
|
||||
func (g *graphPopulator) addNode(obj interface{}) {
|
||||
g.updateNode(nil, obj)
|
||||
}
|
||||
|
||||
func (g *graphPopulator) updateNode(oldObj, obj interface{}) {
|
||||
node := obj.(*api.Node)
|
||||
var oldNode *api.Node
|
||||
if oldObj != nil {
|
||||
oldNode = oldObj.(*api.Node)
|
||||
}
|
||||
|
||||
// we only set up rules for ConfigMap today, because that is the only reference type
|
||||
|
||||
var name, namespace string
|
||||
if source := node.Spec.ConfigSource; source != nil && source.ConfigMap != nil {
|
||||
name = source.ConfigMap.Name
|
||||
namespace = source.ConfigMap.Namespace
|
||||
}
|
||||
|
||||
var oldName, oldNamespace string
|
||||
if oldNode != nil {
|
||||
if oldSource := oldNode.Spec.ConfigSource; oldSource != nil && oldSource.ConfigMap != nil {
|
||||
oldName = oldSource.ConfigMap.Name
|
||||
oldNamespace = oldSource.ConfigMap.Namespace
|
||||
}
|
||||
}
|
||||
|
||||
// if Node.Spec.ConfigSource wasn't updated, nothing for us to do
|
||||
if name == oldName && namespace == oldNamespace {
|
||||
return
|
||||
}
|
||||
|
||||
path := "nil"
|
||||
if node.Spec.ConfigSource != nil {
|
||||
path = fmt.Sprintf("%s/%s", namespace, name)
|
||||
}
|
||||
glog.V(4).Infof("updateNode configSource reference to %s for node %s", path, node.Name)
|
||||
g.graph.SetNodeConfigMap(node.Name, name, namespace)
|
||||
}
|
||||
|
||||
func (g *graphPopulator) deleteNode(obj interface{}) {
|
||||
if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
|
||||
obj = tombstone.Obj
|
||||
}
|
||||
node, ok := obj.(*api.Node)
|
||||
if !ok {
|
||||
glog.Infof("unexpected type %T", obj)
|
||||
return
|
||||
}
|
||||
|
||||
// NOTE: We don't remove the node, because if the node is re-created not all pod -> node
|
||||
// links are re-established (we don't get relevant events because the no mutations need
|
||||
// to happen in the API; the state is already there).
|
||||
g.graph.SetNodeConfigMap(node.Name, "", "")
|
||||
}
|
||||
|
||||
func (g *graphPopulator) addPod(obj interface{}) {
|
||||
g.updatePod(nil, obj)
|
||||
}
|
||||
|
178
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph_test.go
generated
vendored
Normal file
178
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/graph_test.go
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDeleteEdges_locked(t *testing.T) {
|
||||
cases := []struct {
|
||||
desc string
|
||||
fromType vertexType
|
||||
toType vertexType
|
||||
toNamespace string
|
||||
toName string
|
||||
start *Graph
|
||||
expect *Graph
|
||||
}{
|
||||
{
|
||||
// single edge from a configmap to a node, will delete edge and orphaned configmap
|
||||
desc: "edges and source orphans are deleted, destination orphans are preserved",
|
||||
fromType: configMapVertexType,
|
||||
toType: nodeVertexType,
|
||||
toNamespace: "",
|
||||
toName: "node1",
|
||||
start: func() *Graph {
|
||||
g := NewGraph()
|
||||
g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap2")
|
||||
nodeVertex := g.getOrCreateVertex_locked(nodeVertexType, "", "node1")
|
||||
configmapVertex := g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap1")
|
||||
g.graph.SetEdge(newDestinationEdge(configmapVertex, nodeVertex, nodeVertex))
|
||||
return g
|
||||
}(),
|
||||
expect: func() *Graph {
|
||||
g := NewGraph()
|
||||
g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap2")
|
||||
g.getOrCreateVertex_locked(nodeVertexType, "", "node1")
|
||||
return g
|
||||
}(),
|
||||
},
|
||||
{
|
||||
// two edges from the same configmap to distinct nodes, will delete one of the edges
|
||||
desc: "edges are deleted, non-orphans and destination orphans are preserved",
|
||||
fromType: configMapVertexType,
|
||||
toType: nodeVertexType,
|
||||
toNamespace: "",
|
||||
toName: "node2",
|
||||
start: func() *Graph {
|
||||
g := NewGraph()
|
||||
nodeVertex1 := g.getOrCreateVertex_locked(nodeVertexType, "", "node1")
|
||||
nodeVertex2 := g.getOrCreateVertex_locked(nodeVertexType, "", "node2")
|
||||
configmapVertex := g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap1")
|
||||
g.graph.SetEdge(newDestinationEdge(configmapVertex, nodeVertex1, nodeVertex1))
|
||||
g.graph.SetEdge(newDestinationEdge(configmapVertex, nodeVertex2, nodeVertex2))
|
||||
return g
|
||||
}(),
|
||||
expect: func() *Graph {
|
||||
g := NewGraph()
|
||||
nodeVertex1 := g.getOrCreateVertex_locked(nodeVertexType, "", "node1")
|
||||
g.getOrCreateVertex_locked(nodeVertexType, "", "node2")
|
||||
configmapVertex := g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap1")
|
||||
g.graph.SetEdge(newDestinationEdge(configmapVertex, nodeVertex1, nodeVertex1))
|
||||
return g
|
||||
}(),
|
||||
},
|
||||
{
|
||||
desc: "no edges to delete",
|
||||
fromType: configMapVertexType,
|
||||
toType: nodeVertexType,
|
||||
toNamespace: "",
|
||||
toName: "node1",
|
||||
start: func() *Graph {
|
||||
g := NewGraph()
|
||||
g.getOrCreateVertex_locked(nodeVertexType, "", "node1")
|
||||
g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap1")
|
||||
return g
|
||||
}(),
|
||||
expect: func() *Graph {
|
||||
g := NewGraph()
|
||||
g.getOrCreateVertex_locked(nodeVertexType, "", "node1")
|
||||
g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap1")
|
||||
return g
|
||||
}(),
|
||||
},
|
||||
{
|
||||
desc: "destination vertex does not exist",
|
||||
fromType: configMapVertexType,
|
||||
toType: nodeVertexType,
|
||||
toNamespace: "",
|
||||
toName: "node1",
|
||||
start: func() *Graph {
|
||||
g := NewGraph()
|
||||
g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap1")
|
||||
return g
|
||||
}(),
|
||||
expect: func() *Graph {
|
||||
g := NewGraph()
|
||||
g.getOrCreateVertex_locked(configMapVertexType, "namespace1", "configmap1")
|
||||
return g
|
||||
}(),
|
||||
},
|
||||
{
|
||||
desc: "source vertex type doesn't exist",
|
||||
fromType: configMapVertexType,
|
||||
toType: nodeVertexType,
|
||||
toNamespace: "",
|
||||
toName: "node1",
|
||||
start: func() *Graph {
|
||||
g := NewGraph()
|
||||
g.getOrCreateVertex_locked(nodeVertexType, "", "node1")
|
||||
return g
|
||||
}(),
|
||||
expect: func() *Graph {
|
||||
g := NewGraph()
|
||||
g.getOrCreateVertex_locked(nodeVertexType, "", "node1")
|
||||
return g
|
||||
}(),
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.desc, func(t *testing.T) {
|
||||
c.start.deleteEdges_locked(c.fromType, c.toType, c.toNamespace, c.toName)
|
||||
|
||||
// Note: We assert on substructures (graph.Nodes(), graph.Edges()) because the graph tracks
|
||||
// freed IDs for reuse, which results in an irrelevant inequality between start and expect.
|
||||
|
||||
// sort the nodes by ID
|
||||
// (the slices we get back are from map iteration, where order is not guaranteed)
|
||||
expectNodes := c.expect.graph.Nodes()
|
||||
sort.Slice(expectNodes, func(i, j int) bool {
|
||||
return expectNodes[i].ID() < expectNodes[j].ID()
|
||||
})
|
||||
startNodes := c.start.graph.Nodes()
|
||||
sort.Slice(startNodes, func(i, j int) bool {
|
||||
return startNodes[i].ID() < startNodes[j].ID()
|
||||
})
|
||||
assert.Equal(t, expectNodes, startNodes)
|
||||
|
||||
// sort the edges by from ID, then to ID
|
||||
// (the slices we get back are from map iteration, where order is not guaranteed)
|
||||
expectEdges := c.expect.graph.Edges()
|
||||
sort.Slice(expectEdges, func(i, j int) bool {
|
||||
if expectEdges[i].From().ID() == expectEdges[j].From().ID() {
|
||||
return expectEdges[i].To().ID() < expectEdges[j].To().ID()
|
||||
}
|
||||
return expectEdges[i].From().ID() < expectEdges[j].From().ID()
|
||||
})
|
||||
startEdges := c.start.graph.Edges()
|
||||
sort.Slice(startEdges, func(i, j int) bool {
|
||||
if startEdges[i].From().ID() == startEdges[j].From().ID() {
|
||||
return startEdges[i].To().ID() < startEdges[j].To().ID()
|
||||
}
|
||||
return startEdges[i].From().ID() < startEdges[j].From().ID()
|
||||
})
|
||||
assert.Equal(t, expectEdges, startEdges)
|
||||
|
||||
// vertices is a recursive map, no need to sort
|
||||
assert.Equal(t, c.expect.vertices, c.start.vertices)
|
||||
})
|
||||
}
|
||||
}
|
62
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/intset.go
generated
vendored
Normal file
62
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/intset.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package node
|
||||
|
||||
// intSet maintains a set of ints, and supports promoting and culling the previous generation.
|
||||
// this allows tracking a large, mostly-stable set without constantly reallocating the entire set.
|
||||
type intSet struct {
|
||||
currentGeneration byte
|
||||
members map[int]byte
|
||||
}
|
||||
|
||||
func newIntSet() *intSet {
|
||||
return &intSet{members: map[int]byte{}}
|
||||
}
|
||||
|
||||
// has returns true if the specified int is in the set.
|
||||
// it is safe to call concurrently, but must not be called concurrently with any of the other methods.
|
||||
func (s *intSet) has(i int) bool {
|
||||
if s == nil {
|
||||
return false
|
||||
}
|
||||
_, present := s.members[i]
|
||||
return present
|
||||
}
|
||||
|
||||
// startNewGeneration begins a new generation.
|
||||
// it must be followed by a call to mark() for every member of the generation,
|
||||
// then a call to sweep() to remove members not present in the generation.
|
||||
// it is not thread-safe.
|
||||
func (s *intSet) startNewGeneration() {
|
||||
s.currentGeneration++
|
||||
}
|
||||
|
||||
// mark indicates the specified int belongs to the current generation.
|
||||
// it is not thread-safe.
|
||||
func (s *intSet) mark(i int) {
|
||||
s.members[i] = s.currentGeneration
|
||||
}
|
||||
|
||||
// sweep removes items not in the current generation.
|
||||
// it is not thread-safe.
|
||||
func (s *intSet) sweep() {
|
||||
for k, v := range s.members {
|
||||
if v != s.currentGeneration {
|
||||
delete(s.members, k)
|
||||
}
|
||||
}
|
||||
}
|
62
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/intset_test.go
generated
vendored
Normal file
62
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/intset_test.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestIntSet(t *testing.T) {
|
||||
i := newIntSet()
|
||||
|
||||
assert.False(t, i.has(1))
|
||||
assert.False(t, i.has(2))
|
||||
assert.False(t, i.has(3))
|
||||
assert.False(t, i.has(4))
|
||||
|
||||
i.startNewGeneration()
|
||||
i.mark(1)
|
||||
i.mark(2)
|
||||
i.sweep()
|
||||
|
||||
assert.True(t, i.has(1))
|
||||
assert.True(t, i.has(2))
|
||||
assert.False(t, i.has(3))
|
||||
assert.False(t, i.has(4))
|
||||
|
||||
i.startNewGeneration()
|
||||
i.mark(2)
|
||||
i.mark(3)
|
||||
i.sweep()
|
||||
|
||||
assert.False(t, i.has(1))
|
||||
assert.True(t, i.has(2))
|
||||
assert.True(t, i.has(3))
|
||||
assert.False(t, i.has(4))
|
||||
|
||||
i.startNewGeneration()
|
||||
i.mark(3)
|
||||
i.mark(4)
|
||||
i.sweep()
|
||||
|
||||
assert.False(t, i.has(1))
|
||||
assert.False(t, i.has(2))
|
||||
assert.True(t, i.has(3))
|
||||
assert.True(t, i.has(4))
|
||||
}
|
34
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/node_authorizer.go
generated
vendored
34
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/node_authorizer.go
generated
vendored
@ -21,11 +21,11 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
rbacapi "k8s.io/kubernetes/pkg/apis/rbac"
|
||||
storageapi "k8s.io/kubernetes/pkg/apis/storage"
|
||||
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
@ -38,6 +38,7 @@ import (
|
||||
// 1. If a request is not from a node (NodeIdentity() returns isNode=false), reject
|
||||
// 2. If a specific node cannot be identified (NodeIdentity() returns nodeName=""), reject
|
||||
// 3. If a request is for a secret, configmap, persistent volume or persistent volume claim, reject unless the verb is get, and the requested object is related to the requesting node:
|
||||
// node <- configmap
|
||||
// node <- pod
|
||||
// node <- pod <- secret
|
||||
// node <- pod <- configmap
|
||||
@ -48,14 +49,14 @@ import (
|
||||
type NodeAuthorizer struct {
|
||||
graph *Graph
|
||||
identifier nodeidentifier.NodeIdentifier
|
||||
nodeRules []rbacapi.PolicyRule
|
||||
nodeRules []rbacv1.PolicyRule
|
||||
|
||||
// allows overriding for testing
|
||||
features utilfeature.FeatureGate
|
||||
}
|
||||
|
||||
// NewAuthorizer returns a new node authorizer
|
||||
func NewAuthorizer(graph *Graph, identifier nodeidentifier.NodeIdentifier, rules []rbacapi.PolicyRule) authorizer.Authorizer {
|
||||
func NewAuthorizer(graph *Graph, identifier nodeidentifier.NodeIdentifier, rules []rbacv1.PolicyRule) authorizer.Authorizer {
|
||||
return &NodeAuthorizer{
|
||||
graph: graph,
|
||||
identifier: identifier,
|
||||
@ -90,9 +91,9 @@ func (r *NodeAuthorizer) Authorize(attrs authorizer.Attributes) (authorizer.Deci
|
||||
requestResource := schema.GroupResource{Group: attrs.GetAPIGroup(), Resource: attrs.GetResource()}
|
||||
switch requestResource {
|
||||
case secretResource:
|
||||
return r.authorizeGet(nodeName, secretVertexType, attrs)
|
||||
return r.authorizeReadNamespacedObject(nodeName, secretVertexType, attrs)
|
||||
case configMapResource:
|
||||
return r.authorizeGet(nodeName, configMapVertexType, attrs)
|
||||
return r.authorizeReadNamespacedObject(nodeName, configMapVertexType, attrs)
|
||||
case pvcResource:
|
||||
if r.features.Enabled(features.ExpandPersistentVolumes) {
|
||||
if attrs.GetSubresource() == "status" {
|
||||
@ -153,6 +154,24 @@ func (r *NodeAuthorizer) authorizeGet(nodeName string, startingType vertexType,
|
||||
return r.authorize(nodeName, startingType, attrs)
|
||||
}
|
||||
|
||||
// authorizeReadNamespacedObject authorizes "get", "list" and "watch" requests to single objects of a
|
||||
// specified types if they are related to the specified node.
|
||||
func (r *NodeAuthorizer) authorizeReadNamespacedObject(nodeName string, startingType vertexType, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||
if attrs.GetVerb() != "get" && attrs.GetVerb() != "list" && attrs.GetVerb() != "watch" {
|
||||
glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs)
|
||||
return authorizer.DecisionNoOpinion, "can only read resources of this type", nil
|
||||
}
|
||||
if len(attrs.GetSubresource()) > 0 {
|
||||
glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs)
|
||||
return authorizer.DecisionNoOpinion, "cannot read subresource", nil
|
||||
}
|
||||
if len(attrs.GetNamespace()) == 0 {
|
||||
glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs)
|
||||
return authorizer.DecisionNoOpinion, "can only read namespaced object of this type", nil
|
||||
}
|
||||
return r.authorize(nodeName, startingType, attrs)
|
||||
}
|
||||
|
||||
func (r *NodeAuthorizer) authorize(nodeName string, startingType vertexType, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||
if len(attrs.GetName()) == 0 {
|
||||
glog.V(2).Infof("NODE DENY: %s %#v", nodeName, attrs)
|
||||
@ -211,6 +230,11 @@ func (r *NodeAuthorizer) hasPathFrom(nodeName string, startingType vertexType, s
|
||||
return false, fmt.Errorf("node %q cannot get unknown %s %s/%s", nodeName, vertexTypes[startingType], startingNamespace, startingName)
|
||||
}
|
||||
|
||||
// Fast check to see if we know of a destination edge
|
||||
if r.graph.destinationEdgeIndex[startingVertex.ID()].has(nodeVertex.ID()) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
found := false
|
||||
traversal := &traverse.VisitingDepthFirst{
|
||||
EdgeFilter: func(edge graph.Edge) bool {
|
||||
|
293
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/node_authorizer_test.go
generated
vendored
293
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/node/node_authorizer_test.go
generated
vendored
@ -20,12 +20,15 @@ import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@ -72,8 +75,8 @@ func TestAuthorizer(t *testing.T) {
|
||||
sharedPVCsPerPod: 0,
|
||||
uniquePVCsPerPod: 1,
|
||||
}
|
||||
pods, pvs, attachments := generate(opts)
|
||||
populate(g, pods, pvs, attachments)
|
||||
nodes, pods, pvs, attachments := generate(opts)
|
||||
populate(g, nodes, pods, pvs, attachments)
|
||||
|
||||
identifier := nodeidentifier.NewDefaultNodeIdentifier()
|
||||
authz := NewAuthorizer(g, identifier, bootstrappolicy.NodeRules()).(*NodeAuthorizer)
|
||||
@ -86,6 +89,11 @@ func TestAuthorizer(t *testing.T) {
|
||||
expect authorizer.Decision
|
||||
features utilfeature.FeatureGate
|
||||
}{
|
||||
{
|
||||
name: "allowed node configmap",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "node0-configmap", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionAllow,
|
||||
},
|
||||
{
|
||||
name: "allowed configmap",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "configmap0-pod0-node0", Namespace: "ns0"},
|
||||
@ -96,6 +104,31 @@ func TestAuthorizer(t *testing.T) {
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "secrets", Name: "secret0-pod0-node0", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionAllow,
|
||||
},
|
||||
{
|
||||
name: "list allowed secret via pod",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "list", Resource: "secrets", Name: "secret0-pod0-node0", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionAllow,
|
||||
},
|
||||
{
|
||||
name: "watch allowed secret via pod",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "watch", Resource: "secrets", Name: "secret0-pod0-node0", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionAllow,
|
||||
},
|
||||
{
|
||||
name: "disallowed list many secrets",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "list", Resource: "secrets", Name: "", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionNoOpinion,
|
||||
},
|
||||
{
|
||||
name: "disallowed watch many secrets",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "watch", Resource: "secrets", Name: "", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionNoOpinion,
|
||||
},
|
||||
{
|
||||
name: "disallowed list secrets from all namespaces with name",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "list", Resource: "secrets", Name: "secret0-pod0-node0", Namespace: ""},
|
||||
expect: authorizer.DecisionNoOpinion,
|
||||
},
|
||||
{
|
||||
name: "allowed shared secret via pod",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "secrets", Name: "secret0-shared", Namespace: "ns0"},
|
||||
@ -117,6 +150,11 @@ func TestAuthorizer(t *testing.T) {
|
||||
expect: authorizer.DecisionAllow,
|
||||
},
|
||||
|
||||
{
|
||||
name: "disallowed node configmap",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "node1-configmap", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionNoOpinion,
|
||||
},
|
||||
{
|
||||
name: "disallowed configmap",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "configmap0-pod0-node1", Namespace: "ns0"},
|
||||
@ -209,6 +247,7 @@ func TestAuthorizer(t *testing.T) {
|
||||
|
||||
func TestAuthorizerSharedResources(t *testing.T) {
|
||||
g := NewGraph()
|
||||
g.destinationEdgeThreshold = 1
|
||||
identifier := nodeidentifier.NewDefaultNodeIdentifier()
|
||||
authz := NewAuthorizer(g, identifier, bootstrappolicy.NodeRules())
|
||||
|
||||
@ -237,7 +276,8 @@ func TestAuthorizerSharedResources(t *testing.T) {
|
||||
},
|
||||
},
|
||||
})
|
||||
g.AddPod(&api.Pod{
|
||||
|
||||
pod3 := &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod3-node3", Namespace: "ns1"},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: "node3",
|
||||
@ -245,11 +285,17 @@ func TestAuthorizerSharedResources(t *testing.T) {
|
||||
{VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "shared-all"}}},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
g.AddPod(pod3)
|
||||
|
||||
g.SetNodeConfigMap("node1", "shared-configmap", "ns1")
|
||||
g.SetNodeConfigMap("node2", "shared-configmap", "ns1")
|
||||
g.SetNodeConfigMap("node3", "configmap", "ns1")
|
||||
|
||||
testcases := []struct {
|
||||
User user.Info
|
||||
Secret string
|
||||
ConfigMap string
|
||||
ExpectAllowed bool
|
||||
}{
|
||||
{User: node1, ExpectAllowed: true, Secret: "node1-only"},
|
||||
@ -263,18 +309,67 @@ func TestAuthorizerSharedResources(t *testing.T) {
|
||||
{User: node3, ExpectAllowed: false, Secret: "node1-only"},
|
||||
{User: node3, ExpectAllowed: false, Secret: "node1-node2-only"},
|
||||
{User: node3, ExpectAllowed: true, Secret: "shared-all"},
|
||||
|
||||
{User: node1, ExpectAllowed: true, ConfigMap: "shared-configmap"},
|
||||
{User: node1, ExpectAllowed: false, ConfigMap: "configmap"},
|
||||
|
||||
{User: node2, ExpectAllowed: true, ConfigMap: "shared-configmap"},
|
||||
{User: node2, ExpectAllowed: false, ConfigMap: "configmap"},
|
||||
|
||||
{User: node3, ExpectAllowed: false, ConfigMap: "shared-configmap"},
|
||||
{User: node3, ExpectAllowed: true, ConfigMap: "configmap"},
|
||||
}
|
||||
|
||||
for i, tc := range testcases {
|
||||
decision, _, err := authz.Authorize(authorizer.AttributesRecord{User: tc.User, ResourceRequest: true, Verb: "get", Resource: "secrets", Namespace: "ns1", Name: tc.Secret})
|
||||
if err != nil {
|
||||
t.Errorf("%d: unexpected error: %v", i, err)
|
||||
continue
|
||||
var (
|
||||
decision authorizer.Decision
|
||||
err error
|
||||
)
|
||||
|
||||
if len(tc.Secret) > 0 {
|
||||
decision, _, err = authz.Authorize(authorizer.AttributesRecord{User: tc.User, ResourceRequest: true, Verb: "get", Resource: "secrets", Namespace: "ns1", Name: tc.Secret})
|
||||
if err != nil {
|
||||
t.Errorf("%d: unexpected error: %v", i, err)
|
||||
continue
|
||||
}
|
||||
} else if len(tc.ConfigMap) > 0 {
|
||||
decision, _, err = authz.Authorize(authorizer.AttributesRecord{User: tc.User, ResourceRequest: true, Verb: "get", Resource: "configmaps", Namespace: "ns1", Name: tc.ConfigMap})
|
||||
if err != nil {
|
||||
t.Errorf("%d: unexpected error: %v", i, err)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
t.Fatalf("test case must include a request for a Secret or ConfigMap")
|
||||
}
|
||||
|
||||
if (decision == authorizer.DecisionAllow) != tc.ExpectAllowed {
|
||||
t.Errorf("%d: expected %v, got %v", i, tc.ExpectAllowed, decision)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
node3SharedSecretGet := authorizer.AttributesRecord{User: node3, ResourceRequest: true, Verb: "get", Resource: "secrets", Namespace: "ns1", Name: "shared-all"}
|
||||
|
||||
decision, _, err := authz.Authorize(node3SharedSecretGet)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if decision != authorizer.DecisionAllow {
|
||||
t.Error("expected allowed")
|
||||
}
|
||||
|
||||
// should trigger recalculation of the shared secret index
|
||||
pod3.Spec.Volumes = nil
|
||||
g.AddPod(pod3)
|
||||
|
||||
decision, _, err = authz.Authorize(node3SharedSecretGet)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if decision == authorizer.DecisionAllow {
|
||||
t.Errorf("unexpectedly allowed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type sampleDataOpts struct {
|
||||
@ -309,12 +404,12 @@ func BenchmarkPopulationAllocation(b *testing.B) {
|
||||
uniquePVCsPerPod: 1,
|
||||
}
|
||||
|
||||
pods, pvs, attachments := generate(opts)
|
||||
nodes, pods, pvs, attachments := generate(opts)
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
g := NewGraph()
|
||||
populate(g, pods, pvs, attachments)
|
||||
populate(g, nodes, pods, pvs, attachments)
|
||||
}
|
||||
}
|
||||
|
||||
@ -340,14 +435,14 @@ func BenchmarkPopulationRetention(b *testing.B) {
|
||||
uniquePVCsPerPod: 1,
|
||||
}
|
||||
|
||||
pods, pvs, attachments := generate(opts)
|
||||
nodes, pods, pvs, attachments := generate(opts)
|
||||
// Garbage collect before the first iteration
|
||||
runtime.GC()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
g := NewGraph()
|
||||
populate(g, pods, pvs, attachments)
|
||||
populate(g, nodes, pods, pvs, attachments)
|
||||
|
||||
if i == 0 {
|
||||
f, _ := os.Create("BenchmarkPopulationRetention.profile")
|
||||
@ -360,10 +455,47 @@ func BenchmarkPopulationRetention(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriteIndexMaintenance(b *testing.B) {
|
||||
|
||||
// Run with:
|
||||
// go test ./plugin/pkg/auth/authorizer/node -benchmem -bench BenchmarkWriteIndexMaintenance -run None
|
||||
|
||||
opts := sampleDataOpts{
|
||||
// simulate high replication in a small number of namespaces:
|
||||
nodes: 5000,
|
||||
namespaces: 1,
|
||||
podsPerNode: 1,
|
||||
attachmentsPerNode: 20,
|
||||
sharedConfigMapsPerPod: 0,
|
||||
uniqueConfigMapsPerPod: 1,
|
||||
sharedSecretsPerPod: 1,
|
||||
uniqueSecretsPerPod: 1,
|
||||
sharedPVCsPerPod: 0,
|
||||
uniquePVCsPerPod: 1,
|
||||
}
|
||||
nodes, pods, pvs, attachments := generate(opts)
|
||||
g := NewGraph()
|
||||
populate(g, nodes, pods, pvs, attachments)
|
||||
// Garbage collect before the first iteration
|
||||
runtime.GC()
|
||||
b.ResetTimer()
|
||||
|
||||
b.SetParallelism(100)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
g.AddPod(pods[0])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkAuthorization(b *testing.B) {
|
||||
g := NewGraph()
|
||||
|
||||
opts := sampleDataOpts{
|
||||
// To simulate high replication in a small number of namespaces:
|
||||
// nodes: 5000,
|
||||
// namespaces: 10,
|
||||
// podsPerNode: 10,
|
||||
nodes: 500,
|
||||
namespaces: 200,
|
||||
podsPerNode: 200,
|
||||
@ -375,8 +507,8 @@ func BenchmarkAuthorization(b *testing.B) {
|
||||
sharedPVCsPerPod: 0,
|
||||
uniquePVCsPerPod: 1,
|
||||
}
|
||||
pods, pvs, attachments := generate(opts)
|
||||
populate(g, pods, pvs, attachments)
|
||||
nodes, pods, pvs, attachments := generate(opts)
|
||||
populate(g, nodes, pods, pvs, attachments)
|
||||
|
||||
identifier := nodeidentifier.NewDefaultNodeIdentifier()
|
||||
authz := NewAuthorizer(g, identifier, bootstrappolicy.NodeRules()).(*NodeAuthorizer)
|
||||
@ -389,6 +521,11 @@ func BenchmarkAuthorization(b *testing.B) {
|
||||
expect authorizer.Decision
|
||||
features utilfeature.FeatureGate
|
||||
}{
|
||||
{
|
||||
name: "allowed node configmap",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "node0-configmap", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionAllow,
|
||||
},
|
||||
{
|
||||
name: "allowed configmap",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "configmap0-pod0-node0", Namespace: "ns0"},
|
||||
@ -404,6 +541,12 @@ func BenchmarkAuthorization(b *testing.B) {
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "secrets", Name: "secret0-shared", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionAllow,
|
||||
},
|
||||
|
||||
{
|
||||
name: "disallowed node configmap",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "node1-configmap", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionNoOpinion,
|
||||
},
|
||||
{
|
||||
name: "disallowed configmap",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "configmaps", Name: "configmap0-pod0-node1", Namespace: "ns0"},
|
||||
@ -450,26 +593,102 @@ func BenchmarkAuthorization(b *testing.B) {
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for _, tc := range tests {
|
||||
if tc.features == nil {
|
||||
authz.features = utilfeature.DefaultFeatureGate
|
||||
} else {
|
||||
authz.features = tc.features
|
||||
}
|
||||
b.Run(tc.name, func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
decision, _, _ := authz.Authorize(tc.attrs)
|
||||
if decision != tc.expect {
|
||||
b.Errorf("expected %v, got %v", tc.expect, decision)
|
||||
for _, testWriteContention := range []bool{false, true} {
|
||||
|
||||
shouldWrite := int32(1)
|
||||
writes := int64(0)
|
||||
_1ms := int64(0)
|
||||
_10ms := int64(0)
|
||||
_25ms := int64(0)
|
||||
_50ms := int64(0)
|
||||
_100ms := int64(0)
|
||||
_250ms := int64(0)
|
||||
_500ms := int64(0)
|
||||
_1000ms := int64(0)
|
||||
_1s := int64(0)
|
||||
|
||||
contentionPrefix := ""
|
||||
if testWriteContention {
|
||||
contentionPrefix = "contentious "
|
||||
// Start a writer pushing graph modifications 100x a second
|
||||
go func() {
|
||||
for shouldWrite == 1 {
|
||||
go func() {
|
||||
start := time.Now()
|
||||
authz.graph.AddPod(&api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testwrite", Namespace: "ns0"},
|
||||
Spec: api.PodSpec{
|
||||
NodeName: "node0",
|
||||
ServiceAccountName: "default",
|
||||
Volumes: []api.Volume{
|
||||
{Name: "token", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "secret0-shared"}}},
|
||||
},
|
||||
},
|
||||
})
|
||||
diff := time.Now().Sub(start)
|
||||
atomic.AddInt64(&writes, 1)
|
||||
switch {
|
||||
case diff < time.Millisecond:
|
||||
atomic.AddInt64(&_1ms, 1)
|
||||
case diff < 10*time.Millisecond:
|
||||
atomic.AddInt64(&_10ms, 1)
|
||||
case diff < 25*time.Millisecond:
|
||||
atomic.AddInt64(&_25ms, 1)
|
||||
case diff < 50*time.Millisecond:
|
||||
atomic.AddInt64(&_50ms, 1)
|
||||
case diff < 100*time.Millisecond:
|
||||
atomic.AddInt64(&_100ms, 1)
|
||||
case diff < 250*time.Millisecond:
|
||||
atomic.AddInt64(&_250ms, 1)
|
||||
case diff < 500*time.Millisecond:
|
||||
atomic.AddInt64(&_500ms, 1)
|
||||
case diff < 1000*time.Millisecond:
|
||||
atomic.AddInt64(&_1000ms, 1)
|
||||
default:
|
||||
atomic.AddInt64(&_1s, 1)
|
||||
}
|
||||
}()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
if tc.features == nil {
|
||||
authz.features = utilfeature.DefaultFeatureGate
|
||||
} else {
|
||||
authz.features = tc.features
|
||||
}
|
||||
})
|
||||
b.Run(contentionPrefix+tc.name, func(b *testing.B) {
|
||||
// Run authorization checks in parallel
|
||||
b.SetParallelism(5000)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
decision, _, _ := authz.Authorize(tc.attrs)
|
||||
if decision != tc.expect {
|
||||
b.Errorf("expected %v, got %v", tc.expect, decision)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&shouldWrite, 0)
|
||||
if testWriteContention {
|
||||
b.Logf("graph modifications during contention test: %d", writes)
|
||||
b.Logf("<1ms=%d, <10ms=%d, <25ms=%d, <50ms=%d, <100ms=%d, <250ms=%d, <500ms=%d, <1000ms=%d, >1000ms=%d", _1ms, _10ms, _25ms, _50ms, _100ms, _250ms, _500ms, _1000ms, _1s)
|
||||
} else {
|
||||
b.Logf("graph modifications during non-contention test: %d", writes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func populate(graph *Graph, pods []*api.Pod, pvs []*api.PersistentVolume, attachments []*storagev1beta1.VolumeAttachment) {
|
||||
func populate(graph *Graph, nodes []*api.Node, pods []*api.Pod, pvs []*api.PersistentVolume, attachments []*storagev1beta1.VolumeAttachment) {
|
||||
p := &graphPopulator{}
|
||||
p.graph = graph
|
||||
for _, node := range nodes {
|
||||
p.addNode(node)
|
||||
}
|
||||
for _, pod := range pods {
|
||||
p.addPod(pod)
|
||||
}
|
||||
@ -485,7 +704,8 @@ func populate(graph *Graph, pods []*api.Pod, pvs []*api.PersistentVolume, attach
|
||||
// the secret/configmap/pvc/node references in the pod and pv objects are named to indicate the connections between the objects.
|
||||
// for example, secret0-pod0-node0 is a secret referenced by pod0 which is bound to node0.
|
||||
// when populated into the graph, the node authorizer should allow node0 to access that secret, but not node1.
|
||||
func generate(opts sampleDataOpts) ([]*api.Pod, []*api.PersistentVolume, []*storagev1beta1.VolumeAttachment) {
|
||||
func generate(opts sampleDataOpts) ([]*api.Node, []*api.Pod, []*api.PersistentVolume, []*storagev1beta1.VolumeAttachment) {
|
||||
nodes := make([]*api.Node, 0, opts.nodes)
|
||||
pods := make([]*api.Pod, 0, opts.nodes*opts.podsPerNode)
|
||||
pvs := make([]*api.PersistentVolume, 0, (opts.nodes*opts.podsPerNode*opts.uniquePVCsPerPod)+(opts.sharedPVCsPerPod*opts.namespaces))
|
||||
attachments := make([]*storagev1beta1.VolumeAttachment, 0, opts.nodes*opts.attachmentsPerNode)
|
||||
@ -552,6 +772,21 @@ func generate(opts sampleDataOpts) ([]*api.Pod, []*api.PersistentVolume, []*stor
|
||||
attachment.Spec.NodeName = nodeName
|
||||
attachments = append(attachments, attachment)
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("%s-configmap", nodeName)
|
||||
nodes = append(nodes, &api.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: nodeName},
|
||||
Spec: api.NodeSpec{
|
||||
ConfigSource: &api.NodeConfigSource{
|
||||
ConfigMap: &api.ConfigMapNodeConfigSource{
|
||||
Name: name,
|
||||
Namespace: "ns0",
|
||||
UID: types.UID(fmt.Sprintf("ns0-%s", name)),
|
||||
KubeletConfigKey: "kubelet",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
return pods, pvs, attachments
|
||||
return nodes, pods, pvs, attachments
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/BUILD
generated
vendored
@ -14,14 +14,15 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac",
|
||||
deps = [
|
||||
"//pkg/apis/rbac:go_default_library",
|
||||
"//pkg/client/listers/rbac/internalversion:go_default_library",
|
||||
"//pkg/apis/rbac/v1:go_default_library",
|
||||
"//pkg/registry/rbac/validation:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/rbac/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -33,9 +34,10 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/rbac:go_default_library",
|
||||
"//pkg/apis/rbac/v1:go_default_library",
|
||||
"//pkg/registry/rbac/validation:go_default_library",
|
||||
"//plugin/pkg/auth/authorizer/rbac/bootstrappolicy:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
|
5
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD
generated
vendored
@ -15,9 +15,10 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy",
|
||||
deps = [
|
||||
"//pkg/apis/rbac:go_default_library",
|
||||
"//pkg/apis/rbac/v1:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
@ -37,8 +38,8 @@ go_test(
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//pkg/apis/rbac:go_default_library",
|
||||
"//pkg/apis/rbac/install:go_default_library",
|
||||
"//pkg/apis/rbac/v1:go_default_library",
|
||||
"//pkg/registry/rbac/validation:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
|
@ -21,15 +21,16 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
rbac "k8s.io/kubernetes/pkg/apis/rbac"
|
||||
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
const saRolePrefix = "system:controller:"
|
||||
|
||||
func addControllerRole(controllerRoles *[]rbac.ClusterRole, controllerRoleBindings *[]rbac.ClusterRoleBinding, role rbac.ClusterRole) {
|
||||
func addControllerRole(controllerRoles *[]rbacv1.ClusterRole, controllerRoleBindings *[]rbacv1.ClusterRoleBinding, role rbacv1.ClusterRole) {
|
||||
if !strings.HasPrefix(role.Name, saRolePrefix) {
|
||||
glog.Fatalf(`role %q must start with %q`, role.Name, saRolePrefix)
|
||||
}
|
||||
@ -44,317 +45,313 @@ func addControllerRole(controllerRoles *[]rbac.ClusterRole, controllerRoleBindin
|
||||
addClusterRoleLabel(*controllerRoles)
|
||||
|
||||
*controllerRoleBindings = append(*controllerRoleBindings,
|
||||
rbac.NewClusterBinding(role.Name).SAs("kube-system", role.Name[len(saRolePrefix):]).BindingOrDie())
|
||||
rbacv1helpers.NewClusterBinding(role.Name).SAs("kube-system", role.Name[len(saRolePrefix):]).BindingOrDie())
|
||||
addClusterRoleBindingLabel(*controllerRoleBindings)
|
||||
}
|
||||
|
||||
func eventsRule() rbac.PolicyRule {
|
||||
return rbac.NewRule("create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie()
|
||||
func eventsRule() rbacv1.PolicyRule {
|
||||
return rbacv1helpers.NewRule("create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie()
|
||||
}
|
||||
|
||||
func buildControllerRoles() ([]rbac.ClusterRole, []rbac.ClusterRoleBinding) {
|
||||
func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) {
|
||||
// controllerRoles is a slice of roles used for controllers
|
||||
controllerRoles := []rbac.ClusterRole{}
|
||||
controllerRoles := []rbacv1.ClusterRole{}
|
||||
// controllerRoleBindings is a slice of roles used for controllers
|
||||
controllerRoleBindings := []rbac.ClusterRoleBinding{}
|
||||
controllerRoleBindings := []rbacv1.ClusterRoleBinding{}
|
||||
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, func() rbac.ClusterRole {
|
||||
role := rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, func() rbacv1.ClusterRole {
|
||||
role := rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "attachdetach-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("persistentvolumes", "persistentvolumeclaims").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbac.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
|
||||
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("persistentvolumes", "persistentvolumeclaims").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
|
||||
role.Rules = append(role.Rules, rbac.NewRule("get", "create", "delete", "list", "watch").Groups(storageGroup).Resources("volumeattachments").RuleOrDie())
|
||||
role.Rules = append(role.Rules, rbacv1helpers.NewRule("get", "create", "delete", "list", "watch").Groups(storageGroup).Resources("volumeattachments").RuleOrDie())
|
||||
}
|
||||
|
||||
return role
|
||||
}())
|
||||
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "clusterrole-aggregation-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
// this controller must have full permissions to allow it to mutate any role in any way
|
||||
rbac.NewRule("*").Groups("*").Resources("*").RuleOrDie(),
|
||||
rbac.NewRule("*").URLs("*").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("*").Groups("*").Resources("*").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("*").URLs("*").RuleOrDie(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "cronjob-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("cronjobs").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "watch", "create", "update", "delete", "patch").Groups(batchGroup).Resources("jobs").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(batchGroup).Resources("cronjobs/status").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(batchGroup).Resources("cronjobs/finalizers").RuleOrDie(),
|
||||
rbac.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("cronjobs").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "delete", "patch").Groups(batchGroup).Resources("jobs").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("cronjobs/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("cronjobs/finalizers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "daemon-set-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch").Groups(extensionsGroup, appsGroup).Resources("daemonsets").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("daemonsets/status").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("daemonsets/finalizers").RuleOrDie(),
|
||||
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbac.NewRule("list", "watch", "create", "delete", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbac.NewRule("create").Groups(legacyGroup).Resources("pods/binding").RuleOrDie(),
|
||||
rbac.NewRule("list", "watch", "create", "delete", "update", "patch").Groups(appsGroup).Resources("controllerrevisions").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(extensionsGroup, appsGroup).Resources("daemonsets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("daemonsets/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("daemonsets/finalizers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list", "watch", "create", "delete", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/binding").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "create", "delete", "update", "patch").Groups(appsGroup).Resources("controllerrevisions").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "deployment-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "update").Groups(extensionsGroup, appsGroup).Resources("deployments").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("deployments/status").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("deployments/finalizers").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "watch", "create", "update", "patch", "delete").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(extensionsGroup, appsGroup).Resources("deployments").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("deployments/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(extensionsGroup, appsGroup).Resources("deployments/finalizers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "patch", "delete").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
|
||||
// TODO: remove "update" once
|
||||
// https://github.com/kubernetes/kubernetes/issues/36897 is resolved.
|
||||
rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "disruption-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch").Groups(extensionsGroup, appsGroup).Resources("deployments").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "watch").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "watch").Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(policyGroup).Resources("poddisruptionbudgets/status").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(extensionsGroup, appsGroup).Resources("deployments").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(policyGroup).Resources("poddisruptionbudgets/status").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "endpoint-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services", "pods").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "create", "update", "delete").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
|
||||
rbac.NewRule("create").Groups(legacyGroup).Resources("endpoints/restricted").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services", "pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "create", "update", "delete").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints/restricted").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) {
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "expand-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "update", "patch").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
|
||||
rbac.NewRule("update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "update", "patch").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||
// glusterfs
|
||||
rbac.NewRule("get", "list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
|
||||
rbac.NewRule("get").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
|
||||
rbac.NewRule("get").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "generic-garbage-collector"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
// the GC controller needs to run list/watches, selective gets, and updates against any resource
|
||||
rbac.NewRule("get", "list", "watch", "patch", "update", "delete").Groups("*").Resources("*").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "patch", "update", "delete").Groups("*").Resources("*").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "horizontal-pod-autoscaler"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch").Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(autoscalingGroup).Resources("horizontalpodautoscalers/status").RuleOrDie(),
|
||||
rbac.NewRule("get", "update").Groups("*").Resources("*/scale").RuleOrDie(),
|
||||
rbac.NewRule("list").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(autoscalingGroup).Resources("horizontalpodautoscalers/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "update").Groups("*").Resources("*/scale").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
// TODO: restrict this to the appropriate namespace
|
||||
rbac.NewRule("get").Groups(legacyGroup).Resources("services/proxy").Names("https:heapster:", "http:heapster:").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("services/proxy").Names("https:heapster:", "http:heapster:").RuleOrDie(),
|
||||
// allow listing resource metrics and custom metrics
|
||||
rbac.NewRule("list").Groups(resMetricsGroup).Resources("pods").RuleOrDie(),
|
||||
rbac.NewRule("get", "list").Groups(customMetricsGroup).Resources("*").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list").Groups(resMetricsGroup).Resources("pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list").Groups(customMetricsGroup).Resources("*").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "job-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("jobs").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(batchGroup).Resources("jobs/status").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(batchGroup).Resources("jobs/finalizers").RuleOrDie(),
|
||||
rbac.NewRule("list", "watch", "create", "delete", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("jobs").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("jobs/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(batchGroup).Resources("jobs/finalizers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list", "watch", "create", "delete", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "namespace-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(legacyGroup).Resources("namespaces/finalize", "namespaces/status").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "delete", "deletecollection").Groups("*").Resources("*").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("namespaces/finalize", "namespaces/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "delete", "deletecollection").Groups("*").Resources("*").RuleOrDie(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "node-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "update", "delete", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbac.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "update", "delete", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
|
||||
// used for pod eviction
|
||||
rbac.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
|
||||
rbac.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "persistent-volume-binder"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "update", "create", "delete").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(legacyGroup).Resources("persistentvolumes/status").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie(),
|
||||
rbac.NewRule("list", "watch", "get", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "update", "create", "delete").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("persistentvolumes/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list", "watch", "get", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
|
||||
// glusterfs
|
||||
rbac.NewRule("get", "list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
|
||||
rbac.NewRule("get", "create", "delete").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
|
||||
rbac.NewRule("get").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "create", "delete").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
|
||||
// openstack
|
||||
rbac.NewRule("get", "list").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
|
||||
// recyclerClient.WatchPod
|
||||
rbac.NewRule("watch").Groups(legacyGroup).Resources("events").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("events").RuleOrDie(),
|
||||
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pod-garbage-collector"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbac.NewRule("list").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "replicaset-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "update").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(appsGroup, extensionsGroup).Resources("replicasets/status").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(appsGroup, extensionsGroup).Resources("replicasets/finalizers").RuleOrDie(),
|
||||
rbac.NewRule("list", "watch", "patch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(appsGroup, extensionsGroup).Resources("replicasets/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(appsGroup, extensionsGroup).Resources("replicasets/finalizers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list", "watch", "patch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "replication-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
// 1.0 controllers needed get, update, so without these old controllers break on new servers
|
||||
rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(legacyGroup).Resources("replicationcontrollers/status").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(legacyGroup).Resources("replicationcontrollers/finalizers").RuleOrDie(),
|
||||
rbac.NewRule("list", "watch", "patch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("replicationcontrollers/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("replicationcontrollers/finalizers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list", "watch", "patch", "create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "resourcequota-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
// quota can count quota on anything for reconciliation, so it needs full viewing powers
|
||||
rbac.NewRule("list", "watch").Groups("*").Resources("*").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(legacyGroup).Resources("resourcequotas/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list", "watch").Groups("*").Resources("*").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("resourcequotas/status").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "route-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbac.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "service-account-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "service-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(legacyGroup).Resources("services/status").RuleOrDie(),
|
||||
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("services/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "statefulset-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(appsGroup).Resources("statefulsets/status").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(appsGroup).Resources("statefulsets/finalizers").RuleOrDie(),
|
||||
rbac.NewRule("get", "create", "delete", "update", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbac.NewRule("get", "create", "delete", "update", "patch", "list", "watch").Groups(appsGroup).Resources("controllerrevisions").RuleOrDie(),
|
||||
rbac.NewRule("get", "create").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(appsGroup).Resources("statefulsets/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(appsGroup).Resources("statefulsets/finalizers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "create", "delete", "update", "patch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "create", "delete", "update", "patch", "list", "watch").Groups(appsGroup).Resources("controllerrevisions").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "create").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "ttl-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("update", "patch", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("update", "patch", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "certificate-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "delete").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(certificatesGroup).Resources("certificatesigningrequests/status", "certificatesigningrequests/approval").RuleOrDie(),
|
||||
rbac.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(certificatesGroup).Resources("certificatesigningrequests/status", "certificatesigningrequests/approval").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pvc-protection-controller"},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list", "watch", "get").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pv-protection-controller"},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) {
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pvc-protection-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||
rbac.NewRule("list", "watch", "get").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection) {
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbac.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pv-protection-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return controllerRoles, controllerRoleBindings
|
||||
}
|
||||
|
||||
// ControllerRoles returns the cluster roles used by controllers
|
||||
func ControllerRoles() []rbac.ClusterRole {
|
||||
func ControllerRoles() []rbacv1.ClusterRole {
|
||||
controllerRoles, _ := buildControllerRoles()
|
||||
return controllerRoles
|
||||
}
|
||||
|
||||
// ControllerRoleBindings returns the role bindings used by controllers
|
||||
func ControllerRoleBindings() []rbac.ClusterRoleBinding {
|
||||
func ControllerRoleBindings() []rbacv1.ClusterRoleBinding {
|
||||
_, controllerRoleBindings := buildControllerRoles()
|
||||
return controllerRoleBindings
|
||||
}
|
||||
|
@ -21,19 +21,20 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
rbac "k8s.io/kubernetes/pkg/apis/rbac"
|
||||
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// namespaceRoles is a map of namespace to slice of roles to create
|
||||
namespaceRoles = map[string][]rbac.Role{}
|
||||
namespaceRoles = map[string][]rbacv1.Role{}
|
||||
|
||||
// namespaceRoleBindings is a map of namespace to slice of roleBindings to create
|
||||
namespaceRoleBindings = map[string][]rbac.RoleBinding{}
|
||||
namespaceRoleBindings = map[string][]rbacv1.RoleBinding{}
|
||||
)
|
||||
|
||||
func addNamespaceRole(namespace string, role rbac.Role) {
|
||||
func addNamespaceRole(namespace string, role rbacv1.Role) {
|
||||
if !strings.HasPrefix(namespace, "kube-") {
|
||||
glog.Fatalf(`roles can only be bootstrapped into reserved namespaces starting with "kube-", not %q`, namespace)
|
||||
}
|
||||
@ -51,7 +52,7 @@ func addNamespaceRole(namespace string, role rbac.Role) {
|
||||
namespaceRoles[namespace] = existingRoles
|
||||
}
|
||||
|
||||
func addNamespaceRoleBinding(namespace string, roleBinding rbac.RoleBinding) {
|
||||
func addNamespaceRoleBinding(namespace string, roleBinding rbacv1.RoleBinding) {
|
||||
if !strings.HasPrefix(namespace, "kube-") {
|
||||
glog.Fatalf(`rolebindings can only be bootstrapped into reserved namespaces starting with "kube-", not %q`, namespace)
|
||||
}
|
||||
@ -70,86 +71,86 @@ func addNamespaceRoleBinding(namespace string, roleBinding rbac.RoleBinding) {
|
||||
}
|
||||
|
||||
func init() {
|
||||
addNamespaceRole(metav1.NamespaceSystem, rbac.Role{
|
||||
addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
|
||||
// role for finding authentication config info for starting a server
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "extension-apiserver-authentication-reader"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
// this particular config map is exposed and contains authentication configuration information
|
||||
rbac.NewRule("get").Groups(legacyGroup).Resources("configmaps").Names("extension-apiserver-authentication").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("configmaps").Names("extension-apiserver-authentication").RuleOrDie(),
|
||||
},
|
||||
})
|
||||
addNamespaceRole(metav1.NamespaceSystem, rbac.Role{
|
||||
addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
|
||||
// role for the bootstrap signer to be able to inspect kube-system secrets
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "bootstrap-signer"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
|
||||
},
|
||||
})
|
||||
addNamespaceRole(metav1.NamespaceSystem, rbac.Role{
|
||||
addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
|
||||
// role for the cloud providers to access/create kube-system configmaps
|
||||
// Deprecated starting Kubernetes 1.10 and will be deleted according to GA deprecation policy.
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "cloud-provider"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("create", "get", "list", "watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
|
||||
},
|
||||
})
|
||||
addNamespaceRole(metav1.NamespaceSystem, rbac.Role{
|
||||
addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
|
||||
// role for the token-cleaner to be able to remove secrets, but only in kube-system
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "token-cleaner"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
// TODO: Create util on Role+Binding for leader locking if more cases evolve.
|
||||
addNamespaceRole(metav1.NamespaceSystem, rbac.Role{
|
||||
addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
|
||||
// role for the leader locking on supplied configmap
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system::leader-locking-kube-controller-manager"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
|
||||
rbac.NewRule("get", "update").Groups(legacyGroup).Resources("configmaps").Names("kube-controller-manager").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "update").Groups(legacyGroup).Resources("configmaps").Names("kube-controller-manager").RuleOrDie(),
|
||||
},
|
||||
})
|
||||
addNamespaceRole(metav1.NamespaceSystem, rbac.Role{
|
||||
addNamespaceRole(metav1.NamespaceSystem, rbacv1.Role{
|
||||
// role for the leader locking on supplied configmap
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system::leader-locking-kube-scheduler"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
|
||||
rbac.NewRule("get", "update").Groups(legacyGroup).Resources("configmaps").Names("kube-scheduler").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "update").Groups(legacyGroup).Resources("configmaps").Names("kube-scheduler").RuleOrDie(),
|
||||
},
|
||||
})
|
||||
addNamespaceRoleBinding(metav1.NamespaceSystem,
|
||||
rbac.NewRoleBinding("system::leader-locking-kube-controller-manager", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "kube-controller-manager").BindingOrDie())
|
||||
rbacv1helpers.NewRoleBinding("system::leader-locking-kube-controller-manager", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "kube-controller-manager").BindingOrDie())
|
||||
addNamespaceRoleBinding(metav1.NamespaceSystem,
|
||||
rbac.NewRoleBinding("system::leader-locking-kube-scheduler", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "kube-scheduler").BindingOrDie())
|
||||
rbacv1helpers.NewRoleBinding("system::leader-locking-kube-scheduler", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "kube-scheduler").BindingOrDie())
|
||||
addNamespaceRoleBinding(metav1.NamespaceSystem,
|
||||
rbac.NewRoleBinding(saRolePrefix+"bootstrap-signer", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "bootstrap-signer").BindingOrDie())
|
||||
rbacv1helpers.NewRoleBinding(saRolePrefix+"bootstrap-signer", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "bootstrap-signer").BindingOrDie())
|
||||
// cloud-provider is deprecated starting Kubernetes 1.10 and will be deleted according to GA deprecation policy.
|
||||
addNamespaceRoleBinding(metav1.NamespaceSystem,
|
||||
rbac.NewRoleBinding(saRolePrefix+"cloud-provider", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "cloud-provider").BindingOrDie())
|
||||
rbacv1helpers.NewRoleBinding(saRolePrefix+"cloud-provider", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "cloud-provider").BindingOrDie())
|
||||
addNamespaceRoleBinding(metav1.NamespaceSystem,
|
||||
rbac.NewRoleBinding(saRolePrefix+"token-cleaner", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "token-cleaner").BindingOrDie())
|
||||
rbacv1helpers.NewRoleBinding(saRolePrefix+"token-cleaner", metav1.NamespaceSystem).SAs(metav1.NamespaceSystem, "token-cleaner").BindingOrDie())
|
||||
|
||||
addNamespaceRole(metav1.NamespacePublic, rbac.Role{
|
||||
addNamespaceRole(metav1.NamespacePublic, rbacv1.Role{
|
||||
// role for the bootstrap signer to be able to write its configmap
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "bootstrap-signer"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(legacyGroup).Resources("configmaps").Names("cluster-info").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("configmaps").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("configmaps").Names("cluster-info").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addNamespaceRoleBinding(metav1.NamespacePublic,
|
||||
rbac.NewRoleBinding(saRolePrefix+"bootstrap-signer", metav1.NamespacePublic).SAs(metav1.NamespaceSystem, "bootstrap-signer").BindingOrDie())
|
||||
rbacv1helpers.NewRoleBinding(saRolePrefix+"bootstrap-signer", metav1.NamespacePublic).SAs(metav1.NamespaceSystem, "bootstrap-signer").BindingOrDie())
|
||||
|
||||
}
|
||||
|
||||
// NamespaceRoles returns a map of namespace to slice of roles to create
|
||||
func NamespaceRoles() map[string][]rbac.Role {
|
||||
func NamespaceRoles() map[string][]rbacv1.Role {
|
||||
return namespaceRoles
|
||||
}
|
||||
|
||||
// NamespaceRoleBindings returns a map of namespace to slice of roles to create
|
||||
func NamespaceRoleBindings() map[string][]rbac.RoleBinding {
|
||||
func NamespaceRoleBindings() map[string][]rbacv1.RoleBinding {
|
||||
return namespaceRoleBindings
|
||||
}
|
||||
|
320
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go
generated
vendored
320
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go
generated
vendored
@ -17,12 +17,13 @@ limitations under the License.
|
||||
package bootstrappolicy
|
||||
|
||||
import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
rbac "k8s.io/kubernetes/pkg/apis/rbac"
|
||||
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
@ -32,7 +33,7 @@ var (
|
||||
ReadUpdate = []string{"get", "list", "watch", "update", "patch"}
|
||||
|
||||
Label = map[string]string{"kubernetes.io/bootstrapping": "rbac-defaults"}
|
||||
Annotation = map[string]string{rbac.AutoUpdateAnnotationKey: "true"}
|
||||
Annotation = map[string]string{rbacv1.AutoUpdateAnnotationKey: "true"}
|
||||
)
|
||||
|
||||
const (
|
||||
@ -78,105 +79,105 @@ func addDefaultMetadata(obj runtime.Object) {
|
||||
metadata.SetAnnotations(annotations)
|
||||
}
|
||||
|
||||
func addClusterRoleLabel(roles []rbac.ClusterRole) {
|
||||
func addClusterRoleLabel(roles []rbacv1.ClusterRole) {
|
||||
for i := range roles {
|
||||
addDefaultMetadata(&roles[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func addClusterRoleBindingLabel(rolebindings []rbac.ClusterRoleBinding) {
|
||||
func addClusterRoleBindingLabel(rolebindings []rbacv1.ClusterRoleBinding) {
|
||||
for i := range rolebindings {
|
||||
addDefaultMetadata(&rolebindings[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func NodeRules() []rbac.PolicyRule {
|
||||
nodePolicyRules := []rbac.PolicyRule{
|
||||
func NodeRules() []rbacv1.PolicyRule {
|
||||
nodePolicyRules := []rbacv1.PolicyRule{
|
||||
// Needed to check API access. These creates are non-mutating
|
||||
rbac.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
|
||||
rbac.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews", "localsubjectaccessreviews").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews", "localsubjectaccessreviews").RuleOrDie(),
|
||||
|
||||
// Needed to build serviceLister, to populate env vars for services
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("services").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("services").RuleOrDie(),
|
||||
|
||||
// Nodes can register Node API objects and report status.
|
||||
// Use the NodeRestriction admission plugin to limit a node to creating/updating its own API object.
|
||||
rbac.NewRule("create", "get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbac.NewRule("update", "patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
|
||||
rbac.NewRule("update", "patch", "delete").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update", "patch", "delete").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
|
||||
// TODO: restrict to the bound node as creator in the NodeRestrictions admission plugin
|
||||
rbac.NewRule("create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(),
|
||||
|
||||
// TODO: restrict to pods scheduled on the bound node once field selectors are supported by list/watch authorization
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
|
||||
// Needed for the node to create/delete mirror pods.
|
||||
// Use the NodeRestriction admission plugin to limit a node to creating/deleting mirror pods bound to itself.
|
||||
rbac.NewRule("create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
// Needed for the node to report status of pods it is running.
|
||||
// Use the NodeRestriction admission plugin to limit a node to updating status of pods bound to itself.
|
||||
rbac.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
|
||||
// Needed for the node to create pod evictions.
|
||||
// Use the NodeRestriction admission plugin to limit a node to creating evictions for pods bound to itself.
|
||||
rbac.NewRule("create").Groups(legacyGroup).Resources("pods/eviction").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/eviction").RuleOrDie(),
|
||||
|
||||
// Needed for imagepullsecrets, rbd/ceph and secret volumes, and secrets in envs
|
||||
// Needed for configmap volume and envs
|
||||
// Use the Node authorization mode to limit a node to get secrets/configmaps referenced by pods bound to itself.
|
||||
rbac.NewRule("get").Groups(legacyGroup).Resources("secrets", "configmaps").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("secrets", "configmaps").RuleOrDie(),
|
||||
// Needed for persistent volumes
|
||||
// Use the Node authorization mode to limit a node to get pv/pvc objects referenced by pods bound to itself.
|
||||
rbac.NewRule("get").Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(),
|
||||
|
||||
// TODO: add to the Node authorizer and restrict to endpoints referenced by pods or PVs bound to the node
|
||||
// Needed for glusterfs volumes
|
||||
rbac.NewRule("get").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
|
||||
// Used to create a certificatesigningrequest for a node-specific client certificate, and watch
|
||||
// for it to be signed. This allows the kubelet to rotate it's own certificate.
|
||||
rbac.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) {
|
||||
// Use the Node authorization mode to limit a node to update status of pvc objects referenced by pods bound to itself.
|
||||
// Use the NodeRestriction admission plugin to limit a node to just update the status stanza.
|
||||
pvcStatusPolicyRule := rbac.NewRule("get", "update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie()
|
||||
pvcStatusPolicyRule := rbacv1helpers.NewRule("get", "update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims/status").RuleOrDie()
|
||||
nodePolicyRules = append(nodePolicyRules, pvcStatusPolicyRule)
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.TokenRequest) {
|
||||
// Use the Node authorization to limit a node to create tokens for service accounts running on that node
|
||||
// Use the NodeRestriction admission plugin to limit a node to create tokens bound to pods on that node
|
||||
tokenRequestRule := rbac.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts/token").RuleOrDie()
|
||||
tokenRequestRule := rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts/token").RuleOrDie()
|
||||
nodePolicyRules = append(nodePolicyRules, tokenRequestRule)
|
||||
}
|
||||
|
||||
// CSI
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) {
|
||||
volAttachRule := rbac.NewRule("get").Groups(storageGroup).Resources("volumeattachments").RuleOrDie()
|
||||
volAttachRule := rbacv1helpers.NewRule("get").Groups(storageGroup).Resources("volumeattachments").RuleOrDie()
|
||||
nodePolicyRules = append(nodePolicyRules, volAttachRule)
|
||||
}
|
||||
return nodePolicyRules
|
||||
}
|
||||
|
||||
// ClusterRoles returns the cluster roles to bootstrap an API server with
|
||||
func ClusterRoles() []rbac.ClusterRole {
|
||||
roles := []rbac.ClusterRole{
|
||||
func ClusterRoles() []rbacv1.ClusterRole {
|
||||
roles := []rbacv1.ClusterRole{
|
||||
{
|
||||
// a "root" role which can do absolutely anything
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "cluster-admin"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("*").Groups("*").Resources("*").RuleOrDie(),
|
||||
rbac.NewRule("*").URLs("*").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("*").Groups("*").Resources("*").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("*").URLs("*").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role which provides just enough power to determine if the server is ready and discover API versions for negotiation
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:discovery"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get").URLs(
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get").URLs(
|
||||
"/healthz", "/version", "/version/",
|
||||
// remove once swagger 1.2 support is removed
|
||||
"/swaggerapi", "/swaggerapi/*",
|
||||
@ -192,16 +193,16 @@ func ClusterRoles() []rbac.ClusterRole {
|
||||
{
|
||||
// a role which provides minimal resource access to allow a "normal" user to learn information about themselves
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:basic-user"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
// TODO add future selfsubjectrulesreview, project request APIs, project listing APIs
|
||||
rbac.NewRule("create").Groups(authorizationGroup).Resources("selfsubjectaccessreviews", "selfsubjectrulesreviews").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("selfsubjectaccessreviews", "selfsubjectrulesreviews").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
// a role for a namespace level admin. It is `edit` plus the power to grant permissions to other users.
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "admin"},
|
||||
AggregationRule: &rbac.AggregationRule{
|
||||
AggregationRule: &rbacv1.AggregationRule{
|
||||
ClusterRoleSelectors: []metav1.LabelSelector{{MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-admin": "true"}}},
|
||||
},
|
||||
},
|
||||
@ -210,7 +211,7 @@ func ClusterRoles() []rbac.ClusterRole {
|
||||
// It does not grant powers for "privileged" resources which are domain of the system: `/status`
|
||||
// subresources or `quota`/`limits` which are used to control namespaces
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "edit"},
|
||||
AggregationRule: &rbac.AggregationRule{
|
||||
AggregationRule: &rbacv1.AggregationRule{
|
||||
ClusterRoleSelectors: []metav1.LabelSelector{{MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-edit": "true"}}},
|
||||
},
|
||||
},
|
||||
@ -218,45 +219,46 @@ func ClusterRoles() []rbac.ClusterRole {
|
||||
// a role for namespace level viewing. It grants Read-only access to non-escalating resources in
|
||||
// a namespace.
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "view"},
|
||||
AggregationRule: &rbac.AggregationRule{
|
||||
AggregationRule: &rbacv1.AggregationRule{
|
||||
ClusterRoleSelectors: []metav1.LabelSelector{{MatchLabels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-view": "true"}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role for a namespace level admin. It is `edit` plus the power to grant permissions to other users.
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-admin", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-admin": "true"}},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(),
|
||||
rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
|
||||
"services", "services/proxy", "endpoints", "persistentvolumeclaims", "configmaps", "secrets").RuleOrDie(),
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
|
||||
"pods/status", "resourcequotas/status", "namespaces/status", "replicationcontrollers/status", "pods/log").RuleOrDie(),
|
||||
// read access to namespaces at the namespace scope means you can read *this* namespace. This can be used as an
|
||||
// indicator of which namespaces you have access to.
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
|
||||
rbac.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(ReadWrite...).Groups(appsGroup).Resources("statefulsets",
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(appsGroup).Resources(
|
||||
"statefulsets", "statefulsets/scale",
|
||||
"daemonsets",
|
||||
"deployments", "deployments/scale", "deployments/rollback",
|
||||
"replicasets", "replicasets/scale").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets",
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets",
|
||||
"deployments", "deployments/scale", "deployments/rollback", "ingresses",
|
||||
"replicasets", "replicasets/scale", "replicationcontrollers/scale",
|
||||
"networkpolicies").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(ReadWrite...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(ReadWrite...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(),
|
||||
|
||||
// additional admin powers
|
||||
rbac.NewRule("create").Groups(authorizationGroup).Resources("localsubjectaccessreviews").RuleOrDie(),
|
||||
rbac.NewRule(ReadWrite...).Groups(rbacGroup).Resources("roles", "rolebindings").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("localsubjectaccessreviews").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(rbacGroup).Resources("roles", "rolebindings").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -264,73 +266,75 @@ func ClusterRoles() []rbac.ClusterRole {
|
||||
// It does not grant powers for "privileged" resources which are domain of the system: `/status`
|
||||
// subresources or `quota`/`limits` which are used to control namespaces
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-edit", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-edit": "true"}},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(),
|
||||
rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
|
||||
"services", "services/proxy", "endpoints", "persistentvolumeclaims", "configmaps", "secrets").RuleOrDie(),
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
|
||||
"pods/status", "resourcequotas/status", "namespaces/status", "replicationcontrollers/status", "pods/log").RuleOrDie(),
|
||||
// read access to namespaces at the namespace scope means you can read *this* namespace. This can be used as an
|
||||
// indicator of which namespaces you have access to.
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
|
||||
rbac.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("impersonate").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(ReadWrite...).Groups(appsGroup).Resources("statefulsets",
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(appsGroup).Resources(
|
||||
"statefulsets", "statefulsets/scale",
|
||||
"daemonsets",
|
||||
"deployments", "deployments/scale", "deployments/rollback",
|
||||
"replicasets", "replicasets/scale").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets",
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets",
|
||||
"deployments", "deployments/scale", "deployments/rollback", "ingresses",
|
||||
"replicasets", "replicasets/scale", "replicationcontrollers/scale",
|
||||
"networkpolicies").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(ReadWrite...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(ReadWrite...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(ReadWrite...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role for namespace level viewing. It grants Read-only access to non-escalating resources in
|
||||
// a namespace.
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:aggregate-to-view", Labels: map[string]string{"rbac.authorization.k8s.io/aggregate-to-view": "true"}},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
|
||||
"services", "endpoints", "persistentvolumeclaims", "configmaps").RuleOrDie(),
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("limitranges", "resourcequotas", "bindings", "events",
|
||||
"pods/status", "resourcequotas/status", "namespaces/status", "replicationcontrollers/status", "pods/log").RuleOrDie(),
|
||||
// read access to namespaces at the namespace scope means you can read *this* namespace. This can be used as an
|
||||
// indicator of which namespaces you have access to.
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(Read...).Groups(appsGroup).Resources("statefulsets",
|
||||
rbacv1helpers.NewRule(Read...).Groups(appsGroup).Resources(
|
||||
"statefulsets", "statefulsets/scale",
|
||||
"daemonsets",
|
||||
"deployments", "deployments/scale",
|
||||
"replicasets", "replicasets/scale").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(Read...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(autoscalingGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(Read...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(batchGroup).Resources("jobs", "cronjobs").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(Read...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale",
|
||||
rbacv1helpers.NewRule(Read...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale",
|
||||
"ingresses", "replicasets", "replicasets/scale", "replicationcontrollers/scale",
|
||||
"networkpolicies").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(Read...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(networkingGroup).Resources("networkpolicies").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role to use for heapster's connections back to the API server
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:heapster"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("events", "pods", "nodes", "namespaces").RuleOrDie(),
|
||||
rbac.NewRule(Read...).Groups(extensionsGroup).Resources("deployments").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("events", "pods", "nodes", "namespaces").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(extensionsGroup).Resources("deployments").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -342,19 +346,19 @@ func ClusterRoles() []rbac.ClusterRole {
|
||||
// a role to use for node-problem-detector access. It does not get bound to default location since
|
||||
// deployment locations can reasonably vary.
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:node-problem-detector"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbac.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role to use for setting up a proxy
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:node-proxier"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
// Used to build serviceLister
|
||||
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
|
||||
rbac.NewRule("get").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
|
||||
eventsRule(),
|
||||
},
|
||||
@ -362,142 +366,156 @@ func ClusterRoles() []rbac.ClusterRole {
|
||||
{
|
||||
// a role to use for full access to the kubelet API
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:kubelet-api-admin"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
// Allow read-only access to the Node API objects
|
||||
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
// Allow all API calls to the nodes
|
||||
rbac.NewRule("proxy").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbac.NewRule("*").Groups(legacyGroup).Resources("nodes/proxy", "nodes/metrics", "nodes/spec", "nodes/stats", "nodes/log").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("proxy").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("*").Groups(legacyGroup).Resources("nodes/proxy", "nodes/metrics", "nodes/spec", "nodes/stats", "nodes/log").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role to use for bootstrapping a node's client certificates
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:node-bootstrapper"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
// used to create a certificatesigningrequest for a node-specific client certificate, and watch for it to be signed
|
||||
rbac.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role to use for allowing authentication and authorization delegation
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:auth-delegator"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
// These creates are non-mutating
|
||||
rbac.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
|
||||
rbac.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role to use for the API registry, summarization, and proxy handling
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:kube-aggregator"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
// it needs to see all services so that it knows whether the ones it points to exist or not
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role to use for bootstrapping the kube-controller-manager so it can create the shared informers
|
||||
// service accounts, and secrets that we need to create separate identities for other controllers
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:kube-controller-manager"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
eventsRule(),
|
||||
rbac.NewRule("create").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(),
|
||||
rbac.NewRule("delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
|
||||
rbac.NewRule("get").Groups(legacyGroup).Resources("endpoints", "namespaces", "secrets", "serviceaccounts").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("endpoints", "namespaces", "secrets", "serviceaccounts").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(),
|
||||
// Needed to check API access. These creates are non-mutating
|
||||
rbac.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
|
||||
// Needed for all shared informers
|
||||
rbac.NewRule("list", "watch").Groups("*").Resources("*").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list", "watch").Groups("*").Resources("*").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role to use for the kube-scheduler
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:kube-scheduler"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
eventsRule(),
|
||||
|
||||
// this is for leaderlease access
|
||||
// TODO: scope this to the kube-system namespace
|
||||
rbac.NewRule("create").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
|
||||
rbac.NewRule("get", "update", "patch", "delete").Groups(legacyGroup).Resources("endpoints").Names("kube-scheduler").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "update", "patch", "delete").Groups(legacyGroup).Resources("endpoints").Names("kube-scheduler").RuleOrDie(),
|
||||
|
||||
// fundamental resources
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbac.NewRule("create").Groups(legacyGroup).Resources("pods/binding", "bindings").RuleOrDie(),
|
||||
rbac.NewRule("patch", "update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/binding", "bindings").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
|
||||
// things that select pods
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("services", "replicationcontrollers").RuleOrDie(),
|
||||
rbac.NewRule(Read...).Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
|
||||
rbac.NewRule(Read...).Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("services", "replicationcontrollers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
|
||||
// things that pods use or applies to them
|
||||
rbac.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role to use for the kube-dns pod
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:kube-dns"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("endpoints", "services").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("list", "watch").Groups(legacyGroup).Resources("endpoints", "services").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role for an external/out-of-tree persistent volume provisioner
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:persistent-volume-provisioner"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "create", "delete").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "create", "delete").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
|
||||
// update is needed in addition to read access for setting lock annotations on PVCs
|
||||
rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||
rbac.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
|
||||
|
||||
// Needed for watching provisioning success and failure events
|
||||
rbac.NewRule("watch").Groups(legacyGroup).Resources("events").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("watch").Groups(legacyGroup).Resources("events").RuleOrDie(),
|
||||
|
||||
eventsRule(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role for the csi external provisioner
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:csi-external-provisioner"},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("create", "delete", "get", "list", "watch").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("list", "watch").Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role for the csi external attacher
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:csi-external-attacher"},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "update", "patch").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "update", "patch").Groups(storageGroup).Resources("volumeattachments").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:aws-cloud-provider"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "patch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role making the csrapprover controller approve a node client CSR
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:certificatesigningrequests:nodeclient"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/nodeclient").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/nodeclient").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role making the csrapprover controller approve a node client CSR requested by the node itself
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:certificatesigningrequests:selfnodeclient"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/selfnodeclient").RuleOrDie(),
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/selfnodeclient").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) {
|
||||
roles = append(roles, rbac.ClusterRole{
|
||||
// a role making the csrapprover controller approve a node server CSR requested by the node itself
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:certificates.k8s.io:certificatesigningrequests:selfnodeserver"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("create").Groups(certificatesGroup).Resources("certificatesigningrequests/selfnodeserver").RuleOrDie(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
|
||||
roles = append(roles, rbac.ClusterRole{
|
||||
rules := []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
rules = append(rules, rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie())
|
||||
}
|
||||
roles = append(roles, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:volume-scheduler"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
|
||||
rbac.NewRule(Read...).Groups(storageGroup).Resources("storageclasses").RuleOrDie(),
|
||||
},
|
||||
Rules: rules,
|
||||
})
|
||||
}
|
||||
|
||||
@ -508,27 +526,27 @@ func ClusterRoles() []rbac.ClusterRole {
|
||||
const systemNodeRoleName = "system:node"
|
||||
|
||||
// ClusterRoleBindings return default rolebindings to the default roles
|
||||
func ClusterRoleBindings() []rbac.ClusterRoleBinding {
|
||||
rolebindings := []rbac.ClusterRoleBinding{
|
||||
rbac.NewClusterBinding("cluster-admin").Groups(user.SystemPrivilegedGroup).BindingOrDie(),
|
||||
rbac.NewClusterBinding("system:discovery").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(),
|
||||
rbac.NewClusterBinding("system:basic-user").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(),
|
||||
rbac.NewClusterBinding("system:node-proxier").Users(user.KubeProxy).BindingOrDie(),
|
||||
rbac.NewClusterBinding("system:kube-controller-manager").Users(user.KubeControllerManager).BindingOrDie(),
|
||||
rbac.NewClusterBinding("system:kube-dns").SAs("kube-system", "kube-dns").BindingOrDie(),
|
||||
rbac.NewClusterBinding("system:kube-scheduler").Users(user.KubeScheduler).BindingOrDie(),
|
||||
rbac.NewClusterBinding("system:aws-cloud-provider").SAs("kube-system", "aws-cloud-provider").BindingOrDie(),
|
||||
func ClusterRoleBindings() []rbacv1.ClusterRoleBinding {
|
||||
rolebindings := []rbacv1.ClusterRoleBinding{
|
||||
rbacv1helpers.NewClusterBinding("cluster-admin").Groups(user.SystemPrivilegedGroup).BindingOrDie(),
|
||||
rbacv1helpers.NewClusterBinding("system:discovery").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(),
|
||||
rbacv1helpers.NewClusterBinding("system:basic-user").Groups(user.AllAuthenticated, user.AllUnauthenticated).BindingOrDie(),
|
||||
rbacv1helpers.NewClusterBinding("system:node-proxier").Users(user.KubeProxy).BindingOrDie(),
|
||||
rbacv1helpers.NewClusterBinding("system:kube-controller-manager").Users(user.KubeControllerManager).BindingOrDie(),
|
||||
rbacv1helpers.NewClusterBinding("system:kube-dns").SAs("kube-system", "kube-dns").BindingOrDie(),
|
||||
rbacv1helpers.NewClusterBinding("system:kube-scheduler").Users(user.KubeScheduler).BindingOrDie(),
|
||||
rbacv1helpers.NewClusterBinding("system:aws-cloud-provider").SAs("kube-system", "aws-cloud-provider").BindingOrDie(),
|
||||
|
||||
// This default binding of the system:node role to the system:nodes group is deprecated in 1.7 with the availability of the Node authorizer.
|
||||
// This leaves the binding, but with an empty set of subjects, so that tightening reconciliation can remove the subject.
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: systemNodeRoleName},
|
||||
RoleRef: rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "ClusterRole", Name: systemNodeRoleName},
|
||||
RoleRef: rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: systemNodeRoleName},
|
||||
},
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
|
||||
rolebindings = append(rolebindings, rbac.NewClusterBinding("system:volume-scheduler").Users(user.KubeScheduler).BindingOrDie())
|
||||
rolebindings = append(rolebindings, rbacv1helpers.NewClusterBinding("system:volume-scheduler").Users(user.KubeScheduler).BindingOrDie())
|
||||
}
|
||||
|
||||
addClusterRoleBindingLabel(rolebindings)
|
||||
|
42
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy_test.go
generated
vendored
42
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy_test.go
generated
vendored
@ -34,8 +34,8 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
"k8s.io/kubernetes/pkg/apis/rbac"
|
||||
_ "k8s.io/kubernetes/pkg/apis/rbac/install"
|
||||
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
|
||||
rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation"
|
||||
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy"
|
||||
)
|
||||
@ -43,12 +43,12 @@ import (
|
||||
// semanticRoles is a few enumerated roles for which the relationships are well established
|
||||
// and we want to maintain symmetric roles
|
||||
type semanticRoles struct {
|
||||
admin *rbac.ClusterRole
|
||||
edit *rbac.ClusterRole
|
||||
view *rbac.ClusterRole
|
||||
admin *rbacv1.ClusterRole
|
||||
edit *rbacv1.ClusterRole
|
||||
view *rbacv1.ClusterRole
|
||||
}
|
||||
|
||||
func getSemanticRoles(roles []rbac.ClusterRole) semanticRoles {
|
||||
func getSemanticRoles(roles []rbacv1.ClusterRole) semanticRoles {
|
||||
ret := semanticRoles{}
|
||||
for i := range roles {
|
||||
role := roles[i]
|
||||
@ -81,10 +81,10 @@ func TestCovers(t *testing.T) {
|
||||
|
||||
// additionalAdminPowers is the list of powers that we expect to be different than the editor role.
|
||||
// one resource per rule to make the "does not already contain" check easy
|
||||
var additionalAdminPowers = []rbac.PolicyRule{
|
||||
rbac.NewRule("create").Groups("authorization.k8s.io").Resources("localsubjectaccessreviews").RuleOrDie(),
|
||||
rbac.NewRule(bootstrappolicy.ReadWrite...).Groups("rbac.authorization.k8s.io").Resources("rolebindings").RuleOrDie(),
|
||||
rbac.NewRule(bootstrappolicy.ReadWrite...).Groups("rbac.authorization.k8s.io").Resources("roles").RuleOrDie(),
|
||||
var additionalAdminPowers = []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("create").Groups("authorization.k8s.io").Resources("localsubjectaccessreviews").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(bootstrappolicy.ReadWrite...).Groups("rbac.authorization.k8s.io").Resources("rolebindings").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(bootstrappolicy.ReadWrite...).Groups("rbac.authorization.k8s.io").Resources("roles").RuleOrDie(),
|
||||
}
|
||||
|
||||
func TestAdminEditRelationship(t *testing.T) {
|
||||
@ -92,7 +92,7 @@ func TestAdminEditRelationship(t *testing.T) {
|
||||
|
||||
// confirm that the edit role doesn't already have extra powers
|
||||
for _, rule := range additionalAdminPowers {
|
||||
if covers, _ := rbacregistryvalidation.Covers(semanticRoles.edit.Rules, []rbac.PolicyRule{rule}); covers {
|
||||
if covers, _ := rbacregistryvalidation.Covers(semanticRoles.edit.Rules, []rbacv1.PolicyRule{rule}); covers {
|
||||
t.Errorf("edit has extra powers: %#v", rule)
|
||||
}
|
||||
}
|
||||
@ -109,19 +109,19 @@ func TestAdminEditRelationship(t *testing.T) {
|
||||
|
||||
// viewEscalatingNamespaceResources is the list of rules that would allow privilege escalation attacks based on
|
||||
// ability to view (GET) them
|
||||
var viewEscalatingNamespaceResources = []rbac.PolicyRule{
|
||||
rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/attach").RuleOrDie(),
|
||||
rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/proxy").RuleOrDie(),
|
||||
rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/exec").RuleOrDie(),
|
||||
rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/portforward").RuleOrDie(),
|
||||
rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("secrets").RuleOrDie(),
|
||||
rbac.NewRule(bootstrappolicy.Read...).Groups("").Resources("services/proxy").RuleOrDie(),
|
||||
var viewEscalatingNamespaceResources = []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/attach").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/proxy").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/exec").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("pods/portforward").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("secrets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("").Resources("services/proxy").RuleOrDie(),
|
||||
}
|
||||
|
||||
// ungettableResources is the list of rules that don't allow to view (GET) them
|
||||
// this is purposefully separate list to distinguish from escalating privs
|
||||
var ungettableResources = []rbac.PolicyRule{
|
||||
rbac.NewRule(bootstrappolicy.Read...).Groups("apps", "extensions").Resources("deployments/rollback").RuleOrDie(),
|
||||
var ungettableResources = []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule(bootstrappolicy.Read...).Groups("apps", "extensions").Resources("deployments/rollback").RuleOrDie(),
|
||||
}
|
||||
|
||||
func TestEditViewRelationship(t *testing.T) {
|
||||
@ -143,7 +143,7 @@ func TestEditViewRelationship(t *testing.T) {
|
||||
|
||||
// confirm that the view role doesn't already have extra powers
|
||||
for _, rule := range viewEscalatingNamespaceResources {
|
||||
if covers, _ := rbacregistryvalidation.Covers(semanticRoles.view.Rules, []rbac.PolicyRule{rule}); covers {
|
||||
if covers, _ := rbacregistryvalidation.Covers(semanticRoles.view.Rules, []rbacv1.PolicyRule{rule}); covers {
|
||||
t.Errorf("view has extra powers: %#v", rule)
|
||||
}
|
||||
}
|
||||
@ -151,7 +151,7 @@ func TestEditViewRelationship(t *testing.T) {
|
||||
|
||||
// confirm that the view role doesn't have ungettable resources
|
||||
for _, rule := range ungettableResources {
|
||||
if covers, _ := rbacregistryvalidation.Covers(semanticRoles.view.Rules, []rbac.PolicyRule{rule}); covers {
|
||||
if covers, _ := rbacregistryvalidation.Covers(semanticRoles.view.Rules, []rbacv1.PolicyRule{rule}); covers {
|
||||
t.Errorf("view has ungettable resource: %#v", rule)
|
||||
}
|
||||
}
|
||||
|
@ -138,7 +138,6 @@ items:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:node
|
||||
subjects: null
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
|
@ -137,6 +137,7 @@ items:
|
||||
- replicasets
|
||||
- replicasets/scale
|
||||
- statefulsets
|
||||
- statefulsets/scale
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
@ -329,6 +330,7 @@ items:
|
||||
- replicasets
|
||||
- replicasets/scale
|
||||
- statefulsets
|
||||
- statefulsets/scale
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
@ -471,6 +473,7 @@ items:
|
||||
- replicasets
|
||||
- replicasets/scale
|
||||
- statefulsets
|
||||
- statefulsets/scale
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
@ -619,6 +622,103 @@ items:
|
||||
- certificatesigningrequests/selfnodeclient
|
||||
verbs:
|
||||
- create
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:csi-external-attacher
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- volumeattachments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:csi-external-provisioner
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumes
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- storageclasses
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
@ -999,6 +1099,7 @@ items:
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
@ -1013,6 +1114,8 @@ items:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
@ -1035,6 +1138,14 @@ items:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
|
@ -136,6 +136,23 @@ items:
|
||||
- kind: ServiceAccount
|
||||
name: endpoint-controller
|
||||
namespace: kube-system
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:controller:expand-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:controller:expand-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: expand-controller
|
||||
namespace: kube-system
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
|
@ -239,6 +239,7 @@ items:
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
@ -424,6 +425,70 @@ items:
|
||||
- create
|
||||
- patch
|
||||
- update
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:controller:expand-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims/status
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- storageclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- update
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
|
42
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/rbac.go
generated
vendored
42
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/rbac.go
generated
vendored
@ -18,18 +18,18 @@ limitations under the License.
|
||||
package rbac
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"bytes"
|
||||
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/kubernetes/pkg/apis/rbac"
|
||||
rbaclisters "k8s.io/kubernetes/pkg/client/listers/rbac/internalversion"
|
||||
rbaclisters "k8s.io/client-go/listers/rbac/v1"
|
||||
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
|
||||
rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation"
|
||||
)
|
||||
|
||||
@ -38,12 +38,12 @@ type RequestToRuleMapper interface {
|
||||
// Any rule returned is still valid, since rules are deny by default. If you can pass with the rules
|
||||
// supplied, you do not have to fail the request. If you cannot, you should indicate the error along
|
||||
// with your denial.
|
||||
RulesFor(subject user.Info, namespace string) ([]rbac.PolicyRule, error)
|
||||
RulesFor(subject user.Info, namespace string) ([]rbacv1.PolicyRule, error)
|
||||
|
||||
// VisitRulesFor invokes visitor() with each rule that applies to a given user in a given namespace,
|
||||
// and each error encountered resolving those rules. Rule may be nil if err is non-nil.
|
||||
// If visitor() returns false, visiting is short-circuited.
|
||||
VisitRulesFor(user user.Info, namespace string, visitor func(source fmt.Stringer, rule *rbac.PolicyRule, err error) bool)
|
||||
VisitRulesFor(user user.Info, namespace string, visitor func(source fmt.Stringer, rule *rbacv1.PolicyRule, err error) bool)
|
||||
}
|
||||
|
||||
type RBACAuthorizer struct {
|
||||
@ -59,10 +59,10 @@ type authorizingVisitor struct {
|
||||
errors []error
|
||||
}
|
||||
|
||||
func (v *authorizingVisitor) visit(source fmt.Stringer, rule *rbac.PolicyRule, err error) bool {
|
||||
func (v *authorizingVisitor) visit(source fmt.Stringer, rule *rbacv1.PolicyRule, err error) bool {
|
||||
if rule != nil && RuleAllows(v.requestAttributes, rule) {
|
||||
v.allowed = true
|
||||
v.reason = fmt.Sprintf("allowed by %s", source.String())
|
||||
v.reason = fmt.Sprintf("RBAC: allowed by %s", source.String())
|
||||
return false
|
||||
}
|
||||
if err != nil {
|
||||
@ -120,7 +120,7 @@ func (r *RBACAuthorizer) Authorize(requestAttributes authorizer.Attributes) (aut
|
||||
|
||||
reason := ""
|
||||
if len(ruleCheckingVisitor.errors) > 0 {
|
||||
reason = fmt.Sprintf("%v", utilerrors.NewAggregate(ruleCheckingVisitor.errors))
|
||||
reason = fmt.Sprintf("RBAC: %v", utilerrors.NewAggregate(ruleCheckingVisitor.errors))
|
||||
}
|
||||
return authorizer.DecisionNoOpinion, reason, nil
|
||||
}
|
||||
@ -164,7 +164,7 @@ func New(roles rbacregistryvalidation.RoleGetter, roleBindings rbacregistryvalid
|
||||
return authorizer
|
||||
}
|
||||
|
||||
func RulesAllow(requestAttributes authorizer.Attributes, rules ...rbac.PolicyRule) bool {
|
||||
func RulesAllow(requestAttributes authorizer.Attributes, rules ...rbacv1.PolicyRule) bool {
|
||||
for i := range rules {
|
||||
if RuleAllows(requestAttributes, &rules[i]) {
|
||||
return true
|
||||
@ -174,28 +174,28 @@ func RulesAllow(requestAttributes authorizer.Attributes, rules ...rbac.PolicyRul
|
||||
return false
|
||||
}
|
||||
|
||||
func RuleAllows(requestAttributes authorizer.Attributes, rule *rbac.PolicyRule) bool {
|
||||
func RuleAllows(requestAttributes authorizer.Attributes, rule *rbacv1.PolicyRule) bool {
|
||||
if requestAttributes.IsResourceRequest() {
|
||||
combinedResource := requestAttributes.GetResource()
|
||||
if len(requestAttributes.GetSubresource()) > 0 {
|
||||
combinedResource = requestAttributes.GetResource() + "/" + requestAttributes.GetSubresource()
|
||||
}
|
||||
|
||||
return rbac.VerbMatches(rule, requestAttributes.GetVerb()) &&
|
||||
rbac.APIGroupMatches(rule, requestAttributes.GetAPIGroup()) &&
|
||||
rbac.ResourceMatches(rule, combinedResource, requestAttributes.GetSubresource()) &&
|
||||
rbac.ResourceNameMatches(rule, requestAttributes.GetName())
|
||||
return rbacv1helpers.VerbMatches(rule, requestAttributes.GetVerb()) &&
|
||||
rbacv1helpers.APIGroupMatches(rule, requestAttributes.GetAPIGroup()) &&
|
||||
rbacv1helpers.ResourceMatches(rule, combinedResource, requestAttributes.GetSubresource()) &&
|
||||
rbacv1helpers.ResourceNameMatches(rule, requestAttributes.GetName())
|
||||
}
|
||||
|
||||
return rbac.VerbMatches(rule, requestAttributes.GetVerb()) &&
|
||||
rbac.NonResourceURLMatches(rule, requestAttributes.GetPath())
|
||||
return rbacv1helpers.VerbMatches(rule, requestAttributes.GetVerb()) &&
|
||||
rbacv1helpers.NonResourceURLMatches(rule, requestAttributes.GetPath())
|
||||
}
|
||||
|
||||
type RoleGetter struct {
|
||||
Lister rbaclisters.RoleLister
|
||||
}
|
||||
|
||||
func (g *RoleGetter) GetRole(namespace, name string) (*rbac.Role, error) {
|
||||
func (g *RoleGetter) GetRole(namespace, name string) (*rbacv1.Role, error) {
|
||||
return g.Lister.Roles(namespace).Get(name)
|
||||
}
|
||||
|
||||
@ -203,7 +203,7 @@ type RoleBindingLister struct {
|
||||
Lister rbaclisters.RoleBindingLister
|
||||
}
|
||||
|
||||
func (l *RoleBindingLister) ListRoleBindings(namespace string) ([]*rbac.RoleBinding, error) {
|
||||
func (l *RoleBindingLister) ListRoleBindings(namespace string) ([]*rbacv1.RoleBinding, error) {
|
||||
return l.Lister.RoleBindings(namespace).List(labels.Everything())
|
||||
}
|
||||
|
||||
@ -211,7 +211,7 @@ type ClusterRoleGetter struct {
|
||||
Lister rbaclisters.ClusterRoleLister
|
||||
}
|
||||
|
||||
func (g *ClusterRoleGetter) GetClusterRole(name string) (*rbac.ClusterRole, error) {
|
||||
func (g *ClusterRoleGetter) GetClusterRole(name string) (*rbacv1.ClusterRole, error) {
|
||||
return g.Lister.Get(name)
|
||||
}
|
||||
|
||||
@ -219,6 +219,6 @@ type ClusterRoleBindingLister struct {
|
||||
Lister rbaclisters.ClusterRoleBindingLister
|
||||
}
|
||||
|
||||
func (l *ClusterRoleBindingLister) ListClusterRoleBindings() ([]*rbac.ClusterRoleBinding, error) {
|
||||
func (l *ClusterRoleBindingLister) ListClusterRoleBindings() ([]*rbacv1.ClusterRoleBinding, error) {
|
||||
return l.Lister.List(labels.Everything())
|
||||
}
|
||||
|
97
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/rbac_test.go
generated
vendored
97
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/rbac_test.go
generated
vendored
@ -21,16 +21,17 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/kubernetes/pkg/apis/rbac"
|
||||
rbacv1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1"
|
||||
rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation"
|
||||
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy"
|
||||
)
|
||||
|
||||
func newRule(verbs, apiGroups, resources, nonResourceURLs string) rbac.PolicyRule {
|
||||
return rbac.PolicyRule{
|
||||
func newRule(verbs, apiGroups, resources, nonResourceURLs string) rbacv1.PolicyRule {
|
||||
return rbacv1.PolicyRule{
|
||||
Verbs: strings.Split(verbs, ","),
|
||||
APIGroups: strings.Split(apiGroups, ","),
|
||||
Resources: strings.Split(resources, ","),
|
||||
@ -38,12 +39,12 @@ func newRule(verbs, apiGroups, resources, nonResourceURLs string) rbac.PolicyRul
|
||||
}
|
||||
}
|
||||
|
||||
func newRole(name, namespace string, rules ...rbac.PolicyRule) *rbac.Role {
|
||||
return &rbac.Role{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, Rules: rules}
|
||||
func newRole(name, namespace string, rules ...rbacv1.PolicyRule) *rbacv1.Role {
|
||||
return &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, Rules: rules}
|
||||
}
|
||||
|
||||
func newClusterRole(name string, rules ...rbac.PolicyRule) *rbac.ClusterRole {
|
||||
return &rbac.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: name}, Rules: rules}
|
||||
func newClusterRole(name string, rules ...rbacv1.PolicyRule) *rbacv1.ClusterRole {
|
||||
return &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: name}, Rules: rules}
|
||||
}
|
||||
|
||||
const (
|
||||
@ -51,26 +52,26 @@ const (
|
||||
bindToClusterRole uint16 = 0x1
|
||||
)
|
||||
|
||||
func newClusterRoleBinding(roleName string, subjects ...string) *rbac.ClusterRoleBinding {
|
||||
r := &rbac.ClusterRoleBinding{
|
||||
func newClusterRoleBinding(roleName string, subjects ...string) *rbacv1.ClusterRoleBinding {
|
||||
r := &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: rbac.GroupName,
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Kind: "ClusterRole", // ClusterRoleBindings can only refer to ClusterRole
|
||||
Name: roleName,
|
||||
},
|
||||
}
|
||||
|
||||
r.Subjects = make([]rbac.Subject, len(subjects))
|
||||
r.Subjects = make([]rbacv1.Subject, len(subjects))
|
||||
for i, subject := range subjects {
|
||||
split := strings.SplitN(subject, ":", 2)
|
||||
r.Subjects[i].Kind, r.Subjects[i].Name = split[0], split[1]
|
||||
|
||||
switch r.Subjects[i].Kind {
|
||||
case rbac.ServiceAccountKind:
|
||||
case rbacv1.ServiceAccountKind:
|
||||
r.Subjects[i].APIGroup = ""
|
||||
case rbac.UserKind, rbac.GroupKind:
|
||||
r.Subjects[i].APIGroup = rbac.GroupName
|
||||
case rbacv1.UserKind, rbacv1.GroupKind:
|
||||
r.Subjects[i].APIGroup = rbacv1.GroupName
|
||||
default:
|
||||
panic(fmt.Errorf("invalid kind %s", r.Subjects[i].Kind))
|
||||
}
|
||||
@ -78,26 +79,26 @@ func newClusterRoleBinding(roleName string, subjects ...string) *rbac.ClusterRol
|
||||
return r
|
||||
}
|
||||
|
||||
func newRoleBinding(namespace, roleName string, bindType uint16, subjects ...string) *rbac.RoleBinding {
|
||||
r := &rbac.RoleBinding{ObjectMeta: metav1.ObjectMeta{Namespace: namespace}}
|
||||
func newRoleBinding(namespace, roleName string, bindType uint16, subjects ...string) *rbacv1.RoleBinding {
|
||||
r := &rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Namespace: namespace}}
|
||||
|
||||
switch bindType {
|
||||
case bindToRole:
|
||||
r.RoleRef = rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "Role", Name: roleName}
|
||||
r.RoleRef = rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: "Role", Name: roleName}
|
||||
case bindToClusterRole:
|
||||
r.RoleRef = rbac.RoleRef{APIGroup: rbac.GroupName, Kind: "ClusterRole", Name: roleName}
|
||||
r.RoleRef = rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: roleName}
|
||||
}
|
||||
|
||||
r.Subjects = make([]rbac.Subject, len(subjects))
|
||||
r.Subjects = make([]rbacv1.Subject, len(subjects))
|
||||
for i, subject := range subjects {
|
||||
split := strings.SplitN(subject, ":", 2)
|
||||
r.Subjects[i].Kind, r.Subjects[i].Name = split[0], split[1]
|
||||
|
||||
switch r.Subjects[i].Kind {
|
||||
case rbac.ServiceAccountKind:
|
||||
case rbacv1.ServiceAccountKind:
|
||||
r.Subjects[i].APIGroup = ""
|
||||
case rbac.UserKind, rbac.GroupKind:
|
||||
r.Subjects[i].APIGroup = rbac.GroupName
|
||||
case rbacv1.UserKind, rbacv1.GroupKind:
|
||||
r.Subjects[i].APIGroup = rbacv1.GroupName
|
||||
default:
|
||||
panic(fmt.Errorf("invalid kind %s", r.Subjects[i].Kind))
|
||||
}
|
||||
@ -136,19 +137,19 @@ func (d *defaultAttributes) GetPath() string { return "" }
|
||||
|
||||
func TestAuthorizer(t *testing.T) {
|
||||
tests := []struct {
|
||||
roles []*rbac.Role
|
||||
roleBindings []*rbac.RoleBinding
|
||||
clusterRoles []*rbac.ClusterRole
|
||||
clusterRoleBindings []*rbac.ClusterRoleBinding
|
||||
roles []*rbacv1.Role
|
||||
roleBindings []*rbacv1.RoleBinding
|
||||
clusterRoles []*rbacv1.ClusterRole
|
||||
clusterRoleBindings []*rbacv1.ClusterRoleBinding
|
||||
|
||||
shouldPass []authorizer.Attributes
|
||||
shouldFail []authorizer.Attributes
|
||||
}{
|
||||
{
|
||||
clusterRoles: []*rbac.ClusterRole{
|
||||
clusterRoles: []*rbacv1.ClusterRole{
|
||||
newClusterRole("admin", newRule("*", "*", "*", "*")),
|
||||
},
|
||||
roleBindings: []*rbac.RoleBinding{
|
||||
roleBindings: []*rbacv1.RoleBinding{
|
||||
newRoleBinding("ns1", "admin", bindToClusterRole, "User:admin", "Group:admins"),
|
||||
},
|
||||
shouldPass: []authorizer.Attributes{
|
||||
@ -167,12 +168,12 @@ func TestAuthorizer(t *testing.T) {
|
||||
},
|
||||
{
|
||||
// Non-resource-url tests
|
||||
clusterRoles: []*rbac.ClusterRole{
|
||||
clusterRoles: []*rbacv1.ClusterRole{
|
||||
newClusterRole("non-resource-url-getter", newRule("get", "", "", "/apis")),
|
||||
newClusterRole("non-resource-url", newRule("*", "", "", "/apis")),
|
||||
newClusterRole("non-resource-url-prefix", newRule("get", "", "", "/apis/*")),
|
||||
},
|
||||
clusterRoleBindings: []*rbac.ClusterRoleBinding{
|
||||
clusterRoleBindings: []*rbacv1.ClusterRoleBinding{
|
||||
newClusterRoleBinding("non-resource-url-getter", "User:foo", "Group:bar"),
|
||||
newClusterRoleBinding("non-resource-url", "User:admin", "Group:admin"),
|
||||
newClusterRoleBinding("non-resource-url-prefix", "User:prefixed", "Group:prefixed"),
|
||||
@ -208,10 +209,10 @@ func TestAuthorizer(t *testing.T) {
|
||||
},
|
||||
{
|
||||
// test subresource resolution
|
||||
clusterRoles: []*rbac.ClusterRole{
|
||||
clusterRoles: []*rbacv1.ClusterRole{
|
||||
newClusterRole("admin", newRule("*", "*", "pods", "*")),
|
||||
},
|
||||
roleBindings: []*rbac.RoleBinding{
|
||||
roleBindings: []*rbacv1.RoleBinding{
|
||||
newRoleBinding("ns1", "admin", bindToClusterRole, "User:admin", "Group:admins"),
|
||||
},
|
||||
shouldPass: []authorizer.Attributes{
|
||||
@ -223,13 +224,13 @@ func TestAuthorizer(t *testing.T) {
|
||||
},
|
||||
{
|
||||
// test subresource resolution
|
||||
clusterRoles: []*rbac.ClusterRole{
|
||||
clusterRoles: []*rbacv1.ClusterRole{
|
||||
newClusterRole("admin",
|
||||
newRule("*", "*", "pods/status", "*"),
|
||||
newRule("*", "*", "*/scale", "*"),
|
||||
),
|
||||
},
|
||||
roleBindings: []*rbac.RoleBinding{
|
||||
roleBindings: []*rbacv1.RoleBinding{
|
||||
newRoleBinding("ns1", "admin", bindToClusterRole, "User:admin", "Group:admins"),
|
||||
},
|
||||
shouldPass: []authorizer.Attributes{
|
||||
@ -263,13 +264,13 @@ func TestAuthorizer(t *testing.T) {
|
||||
func TestRuleMatches(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
rule rbac.PolicyRule
|
||||
rule rbacv1.PolicyRule
|
||||
|
||||
requestsToExpected map[authorizer.AttributesRecord]bool
|
||||
}{
|
||||
{
|
||||
name: "star verb, exact match other",
|
||||
rule: rbac.NewRule("*").Groups("group1").Resources("resource1").RuleOrDie(),
|
||||
rule: rbacv1helpers.NewRule("*").Groups("group1").Resources("resource1").RuleOrDie(),
|
||||
requestsToExpected: map[authorizer.AttributesRecord]bool{
|
||||
resourceRequest("verb1").Group("group1").Resource("resource1").New(): true,
|
||||
resourceRequest("verb1").Group("group2").Resource("resource1").New(): false,
|
||||
@ -283,7 +284,7 @@ func TestRuleMatches(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "star group, exact match other",
|
||||
rule: rbac.NewRule("verb1").Groups("*").Resources("resource1").RuleOrDie(),
|
||||
rule: rbacv1helpers.NewRule("verb1").Groups("*").Resources("resource1").RuleOrDie(),
|
||||
requestsToExpected: map[authorizer.AttributesRecord]bool{
|
||||
resourceRequest("verb1").Group("group1").Resource("resource1").New(): true,
|
||||
resourceRequest("verb1").Group("group2").Resource("resource1").New(): true,
|
||||
@ -297,7 +298,7 @@ func TestRuleMatches(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "star resource, exact match other",
|
||||
rule: rbac.NewRule("verb1").Groups("group1").Resources("*").RuleOrDie(),
|
||||
rule: rbacv1helpers.NewRule("verb1").Groups("group1").Resources("*").RuleOrDie(),
|
||||
requestsToExpected: map[authorizer.AttributesRecord]bool{
|
||||
resourceRequest("verb1").Group("group1").Resource("resource1").New(): true,
|
||||
resourceRequest("verb1").Group("group2").Resource("resource1").New(): false,
|
||||
@ -311,7 +312,7 @@ func TestRuleMatches(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "tuple expansion",
|
||||
rule: rbac.NewRule("verb1", "verb2").Groups("group1", "group2").Resources("resource1", "resource2").RuleOrDie(),
|
||||
rule: rbacv1helpers.NewRule("verb1", "verb2").Groups("group1", "group2").Resources("resource1", "resource2").RuleOrDie(),
|
||||
requestsToExpected: map[authorizer.AttributesRecord]bool{
|
||||
resourceRequest("verb1").Group("group1").Resource("resource1").New(): true,
|
||||
resourceRequest("verb1").Group("group2").Resource("resource1").New(): true,
|
||||
@ -325,7 +326,7 @@ func TestRuleMatches(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "subresource expansion",
|
||||
rule: rbac.NewRule("*").Groups("*").Resources("resource1/subresource1").RuleOrDie(),
|
||||
rule: rbacv1helpers.NewRule("*").Groups("*").Resources("resource1/subresource1").RuleOrDie(),
|
||||
requestsToExpected: map[authorizer.AttributesRecord]bool{
|
||||
resourceRequest("verb1").Group("group1").Resource("resource1").Subresource("subresource1").New(): true,
|
||||
resourceRequest("verb1").Group("group2").Resource("resource1").Subresource("subresource2").New(): false,
|
||||
@ -339,7 +340,7 @@ func TestRuleMatches(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "star nonresource, exact match other",
|
||||
rule: rbac.NewRule("verb1").URLs("*").RuleOrDie(),
|
||||
rule: rbacv1helpers.NewRule("verb1").URLs("*").RuleOrDie(),
|
||||
requestsToExpected: map[authorizer.AttributesRecord]bool{
|
||||
nonresourceRequest("verb1").URL("/foo").New(): true,
|
||||
nonresourceRequest("verb1").URL("/foo/bar").New(): true,
|
||||
@ -355,7 +356,7 @@ func TestRuleMatches(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "star nonresource subpath",
|
||||
rule: rbac.NewRule("verb1").URLs("/foo/*").RuleOrDie(),
|
||||
rule: rbacv1helpers.NewRule("verb1").URLs("/foo/*").RuleOrDie(),
|
||||
requestsToExpected: map[authorizer.AttributesRecord]bool{
|
||||
nonresourceRequest("verb1").URL("/foo").New(): false,
|
||||
nonresourceRequest("verb1").URL("/foo/bar").New(): true,
|
||||
@ -371,7 +372,7 @@ func TestRuleMatches(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "star verb, exact nonresource",
|
||||
rule: rbac.NewRule("*").URLs("/foo", "/foo/bar/one").RuleOrDie(),
|
||||
rule: rbacv1helpers.NewRule("*").URLs("/foo", "/foo/bar/one").RuleOrDie(),
|
||||
requestsToExpected: map[authorizer.AttributesRecord]bool{
|
||||
nonresourceRequest("verb1").URL("/foo").New(): true,
|
||||
nonresourceRequest("verb1").URL("/foo/bar").New(): false,
|
||||
@ -441,19 +442,19 @@ func (r *requestAttributeBuilder) New() authorizer.AttributesRecord {
|
||||
}
|
||||
|
||||
func BenchmarkAuthorize(b *testing.B) {
|
||||
bootstrapRoles := []rbac.ClusterRole{}
|
||||
bootstrapRoles := []rbacv1.ClusterRole{}
|
||||
bootstrapRoles = append(bootstrapRoles, bootstrappolicy.ControllerRoles()...)
|
||||
bootstrapRoles = append(bootstrapRoles, bootstrappolicy.ClusterRoles()...)
|
||||
|
||||
bootstrapBindings := []rbac.ClusterRoleBinding{}
|
||||
bootstrapBindings := []rbacv1.ClusterRoleBinding{}
|
||||
bootstrapBindings = append(bootstrapBindings, bootstrappolicy.ClusterRoleBindings()...)
|
||||
bootstrapBindings = append(bootstrapBindings, bootstrappolicy.ControllerRoleBindings()...)
|
||||
|
||||
clusterRoles := []*rbac.ClusterRole{}
|
||||
clusterRoles := []*rbacv1.ClusterRole{}
|
||||
for i := range bootstrapRoles {
|
||||
clusterRoles = append(clusterRoles, &bootstrapRoles[i])
|
||||
}
|
||||
clusterRoleBindings := []*rbac.ClusterRoleBinding{}
|
||||
clusterRoleBindings := []*rbacv1.ClusterRoleBinding{}
|
||||
for i := range bootstrapBindings {
|
||||
clusterRoleBindings = append(clusterRoleBindings, &bootstrapBindings[i])
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/subject_locator.go
generated
vendored
14
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/subject_locator.go
generated
vendored
@ -18,21 +18,21 @@ limitations under the License.
|
||||
package rbac
|
||||
|
||||
import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/kubernetes/pkg/apis/rbac"
|
||||
rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation"
|
||||
)
|
||||
|
||||
type RoleToRuleMapper interface {
|
||||
// GetRoleReferenceRules attempts to resolve the role reference of a RoleBinding or ClusterRoleBinding. The passed namespace should be the namespace
|
||||
// of the role binding, the empty string if a cluster role binding.
|
||||
GetRoleReferenceRules(roleRef rbac.RoleRef, namespace string) ([]rbac.PolicyRule, error)
|
||||
GetRoleReferenceRules(roleRef rbacv1.RoleRef, namespace string) ([]rbacv1.PolicyRule, error)
|
||||
}
|
||||
|
||||
type SubjectLocator interface {
|
||||
AllowedSubjects(attributes authorizer.Attributes) ([]rbac.Subject, error)
|
||||
AllowedSubjects(attributes authorizer.Attributes) ([]rbacv1.Subject, error)
|
||||
}
|
||||
|
||||
var _ = SubjectLocator(&SubjectAccessEvaluator{})
|
||||
@ -59,10 +59,10 @@ func NewSubjectAccessEvaluator(roles rbacregistryvalidation.RoleGetter, roleBind
|
||||
|
||||
// AllowedSubjects returns the subjects that can perform an action and any errors encountered while computing the list.
|
||||
// It is possible to have both subjects and errors returned if some rolebindings couldn't be resolved, but others could be.
|
||||
func (r *SubjectAccessEvaluator) AllowedSubjects(requestAttributes authorizer.Attributes) ([]rbac.Subject, error) {
|
||||
subjects := []rbac.Subject{{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup}}
|
||||
func (r *SubjectAccessEvaluator) AllowedSubjects(requestAttributes authorizer.Attributes) ([]rbacv1.Subject, error) {
|
||||
subjects := []rbacv1.Subject{{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup}}
|
||||
if len(r.superUser) > 0 {
|
||||
subjects = append(subjects, rbac.Subject{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: r.superUser})
|
||||
subjects = append(subjects, rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: r.superUser})
|
||||
}
|
||||
errorlist := []error{}
|
||||
|
||||
@ -104,7 +104,7 @@ func (r *SubjectAccessEvaluator) AllowedSubjects(requestAttributes authorizer.At
|
||||
}
|
||||
}
|
||||
|
||||
dedupedSubjects := []rbac.Subject{}
|
||||
dedupedSubjects := []rbacv1.Subject{}
|
||||
for _, subject := range subjects {
|
||||
found := false
|
||||
for _, curr := range dedupedSubjects {
|
||||
|
88
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/subject_locator_test.go
generated
vendored
88
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/subject_locator_test.go
generated
vendored
@ -20,24 +20,24 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/kubernetes/pkg/apis/rbac"
|
||||
rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation"
|
||||
)
|
||||
|
||||
func TestSubjectLocator(t *testing.T) {
|
||||
type actionToSubjects struct {
|
||||
action authorizer.Attributes
|
||||
subjects []rbac.Subject
|
||||
subjects []rbacv1.Subject
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
roles []*rbac.Role
|
||||
roleBindings []*rbac.RoleBinding
|
||||
clusterRoles []*rbac.ClusterRole
|
||||
clusterRoleBindings []*rbac.ClusterRoleBinding
|
||||
roles []*rbacv1.Role
|
||||
roleBindings []*rbacv1.RoleBinding
|
||||
clusterRoles []*rbacv1.ClusterRole
|
||||
clusterRoleBindings []*rbacv1.ClusterRoleBinding
|
||||
|
||||
superUser string
|
||||
|
||||
@ -45,42 +45,42 @@ func TestSubjectLocator(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "no super user, star matches star",
|
||||
clusterRoles: []*rbac.ClusterRole{
|
||||
clusterRoles: []*rbacv1.ClusterRole{
|
||||
newClusterRole("admin", newRule("*", "*", "*", "*")),
|
||||
},
|
||||
clusterRoleBindings: []*rbac.ClusterRoleBinding{
|
||||
clusterRoleBindings: []*rbacv1.ClusterRoleBinding{
|
||||
newClusterRoleBinding("admin", "User:super-admin", "Group:super-admins"),
|
||||
},
|
||||
roleBindings: []*rbac.RoleBinding{
|
||||
roleBindings: []*rbacv1.RoleBinding{
|
||||
newRoleBinding("ns1", "admin", bindToClusterRole, "User:admin", "Group:admins"),
|
||||
},
|
||||
actionsToSubjects: []actionToSubjects{
|
||||
{
|
||||
&defaultAttributes{"", "", "get", "Pods", "", "ns1", ""},
|
||||
[]rbac.Subject{
|
||||
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup},
|
||||
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"},
|
||||
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"},
|
||||
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "admin"},
|
||||
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "admins"},
|
||||
[]rbacv1.Subject{
|
||||
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup},
|
||||
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"},
|
||||
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"},
|
||||
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "admin"},
|
||||
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "admins"},
|
||||
},
|
||||
},
|
||||
{
|
||||
// cluster role matches star in namespace
|
||||
&defaultAttributes{"", "", "*", "Pods", "", "*", ""},
|
||||
[]rbac.Subject{
|
||||
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup},
|
||||
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"},
|
||||
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"},
|
||||
[]rbacv1.Subject{
|
||||
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup},
|
||||
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"},
|
||||
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"},
|
||||
},
|
||||
},
|
||||
{
|
||||
// empty ns
|
||||
&defaultAttributes{"", "", "*", "Pods", "", "", ""},
|
||||
[]rbac.Subject{
|
||||
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup},
|
||||
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"},
|
||||
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"},
|
||||
[]rbacv1.Subject{
|
||||
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup},
|
||||
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"},
|
||||
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -88,48 +88,48 @@ func TestSubjectLocator(t *testing.T) {
|
||||
{
|
||||
name: "super user, local roles work",
|
||||
superUser: "foo",
|
||||
clusterRoles: []*rbac.ClusterRole{
|
||||
clusterRoles: []*rbacv1.ClusterRole{
|
||||
newClusterRole("admin", newRule("*", "*", "*", "*")),
|
||||
},
|
||||
clusterRoleBindings: []*rbac.ClusterRoleBinding{
|
||||
clusterRoleBindings: []*rbacv1.ClusterRoleBinding{
|
||||
newClusterRoleBinding("admin", "User:super-admin", "Group:super-admins"),
|
||||
},
|
||||
roles: []*rbac.Role{
|
||||
roles: []*rbacv1.Role{
|
||||
newRole("admin", "ns1", newRule("get", "*", "Pods", "*")),
|
||||
},
|
||||
roleBindings: []*rbac.RoleBinding{
|
||||
roleBindings: []*rbacv1.RoleBinding{
|
||||
newRoleBinding("ns1", "admin", bindToRole, "User:admin", "Group:admins"),
|
||||
},
|
||||
actionsToSubjects: []actionToSubjects{
|
||||
{
|
||||
&defaultAttributes{"", "", "get", "Pods", "", "ns1", ""},
|
||||
[]rbac.Subject{
|
||||
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup},
|
||||
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "foo"},
|
||||
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"},
|
||||
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"},
|
||||
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "admin"},
|
||||
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "admins"},
|
||||
[]rbacv1.Subject{
|
||||
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup},
|
||||
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "foo"},
|
||||
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"},
|
||||
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"},
|
||||
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "admin"},
|
||||
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "admins"},
|
||||
},
|
||||
},
|
||||
{
|
||||
// verb matchies correctly
|
||||
&defaultAttributes{"", "", "create", "Pods", "", "ns1", ""},
|
||||
[]rbac.Subject{
|
||||
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup},
|
||||
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "foo"},
|
||||
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"},
|
||||
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"},
|
||||
[]rbacv1.Subject{
|
||||
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup},
|
||||
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "foo"},
|
||||
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"},
|
||||
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"},
|
||||
},
|
||||
},
|
||||
{
|
||||
// binding only works in correct ns
|
||||
&defaultAttributes{"", "", "get", "Pods", "", "ns2", ""},
|
||||
[]rbac.Subject{
|
||||
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: user.SystemPrivilegedGroup},
|
||||
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "foo"},
|
||||
{Kind: rbac.UserKind, APIGroup: rbac.GroupName, Name: "super-admin"},
|
||||
{Kind: rbac.GroupKind, APIGroup: rbac.GroupName, Name: "super-admins"},
|
||||
[]rbacv1.Subject{
|
||||
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: user.SystemPrivilegedGroup},
|
||||
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "foo"},
|
||||
{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: "super-admin"},
|
||||
{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: "super-admins"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
Reference in New Issue
Block a user