rebase: update kubernetes to latest

updating the kubernetes release to the
latest in main go.mod

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2024-08-19 10:01:33 +02:00
committed by mergify[bot]
parent 63c4c05b35
commit 5a66991bb3
2173 changed files with 98906 additions and 61334 deletions

View File

@ -163,11 +163,12 @@ type variableDeclEnvs map[OptionalVariableDeclarations]*environment.EnvSet
// CompileCELExpression returns a compiled CEL expression.
// perCallLimit was added for testing purpose only. Callers should always use const PerCallLimit from k8s.io/apiserver/pkg/apis/cel/config.go as input.
func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor, options OptionalVariableDeclarations, envType environment.Type) CompilationResult {
resultError := func(errorString string, errType apiservercel.ErrorType) CompilationResult {
resultError := func(errorString string, errType apiservercel.ErrorType, cause error) CompilationResult {
return CompilationResult{
Error: &apiservercel.Error{
Type: errType,
Detail: errorString,
Cause: cause,
},
ExpressionAccessor: expressionAccessor,
}
@ -175,12 +176,12 @@ func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor, op
env, err := c.varEnvs[options].Env(envType)
if err != nil {
return resultError(fmt.Sprintf("unexpected error loading CEL environment: %v", err), apiservercel.ErrorTypeInternal)
return resultError(fmt.Sprintf("unexpected error loading CEL environment: %v", err), apiservercel.ErrorTypeInternal, nil)
}
ast, issues := env.Compile(expressionAccessor.GetExpression())
if issues != nil {
return resultError("compilation failed: "+issues.String(), apiservercel.ErrorTypeInvalid)
return resultError("compilation failed: "+issues.String(), apiservercel.ErrorTypeInvalid, apiservercel.NewCompilationError(issues))
}
found := false
returnTypes := expressionAccessor.ReturnTypes()
@ -198,19 +199,19 @@ func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor, op
reason = fmt.Sprintf("must evaluate to one of %v", returnTypes)
}
return resultError(reason, apiservercel.ErrorTypeInvalid)
return resultError(reason, apiservercel.ErrorTypeInvalid, nil)
}
_, err = cel.AstToCheckedExpr(ast)
if err != nil {
// should be impossible since env.Compile returned no issues
return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal)
return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal, nil)
}
prog, err := env.Program(ast,
cel.InterruptCheckFrequency(celconfig.CheckFrequency),
)
if err != nil {
return resultError("program instantiation failed: "+err.Error(), apiservercel.ErrorTypeInternal)
return resultError("program instantiation failed: "+err.Error(), apiservercel.ErrorTypeInternal, nil)
}
return CompilationResult{
Program: prog,

View File

@ -192,6 +192,7 @@ func (f *filter) ForInput(ctx context.Context, versionedAttr *admission.Versione
evaluation.Error = &cel.Error{
Type: cel.ErrorTypeInvalid,
Detail: fmt.Sprintf("compilation error: %v", compilationResult.Error),
Cause: compilationResult.Error,
}
continue
}
@ -211,6 +212,7 @@ func (f *filter) ForInput(ctx context.Context, versionedAttr *admission.Versione
return nil, -1, &cel.Error{
Type: cel.ErrorTypeInvalid,
Detail: fmt.Sprintf("validation failed due to running out of cost budget, no further validation rules will be run"),
Cause: cel.ErrOutOfBudget,
}
}
remainingBudget -= compositionCost
@ -228,12 +230,14 @@ func (f *filter) ForInput(ctx context.Context, versionedAttr *admission.Versione
return nil, -1, &cel.Error{
Type: cel.ErrorTypeInvalid,
Detail: fmt.Sprintf("runtime cost could not be calculated for expression: %v, no further expression will be run", compilationResult.ExpressionAccessor.GetExpression()),
Cause: cel.ErrOutOfBudget,
}
} else {
if *rtCost > math.MaxInt64 || int64(*rtCost) > remainingBudget {
return nil, -1, &cel.Error{
Type: cel.ErrorTypeInvalid,
Detail: fmt.Sprintf("validation failed due to running out of cost budget, no further validation rules will be run"),
Cause: cel.ErrOutOfBudget,
}
}
remainingBudget -= int64(*rtCost)

View File

@ -40,7 +40,7 @@ var _ Controller[runtime.Object] = &controller[runtime.Object]{}
type controller[T runtime.Object] struct {
informer Informer[T]
queue workqueue.RateLimitingInterface
queue workqueue.TypedRateLimitingInterface[string]
// Returns an error if there was a transient error during reconciliation
// and the object should be tried again later.
@ -99,7 +99,10 @@ func (c *controller[T]) Run(ctx context.Context) error {
klog.Infof("starting %s", c.options.Name)
defer klog.Infof("stopping %s", c.options.Name)
c.queue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), c.options.Name)
c.queue = workqueue.NewTypedRateLimitingQueueWithConfig(
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: c.options.Name},
)
// Forcefully shutdown workqueue. Drop any enqueued items.
// Important to do this in a `defer` at the start of `Run`.
@ -219,7 +222,7 @@ func (c *controller[T]) runWorker() {
}
// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
err := func(obj string) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
@ -227,19 +230,6 @@ func (c *controller[T]) runWorker() {
// put back on the workqueue and attempted again after a back-off
// period.
defer c.queue.Done(obj)
var key string
var ok bool
// We expect strings to come off the workqueue. These are of the
// form namespace/name. We do this as the delayed nature of the
// workqueue means the items in the informer cache may actually be
// more up to date that when the item was initially put onto the
// workqueue.
if key, ok = obj.(string); !ok {
// How did an incorrectly formatted key get in the workqueue?
// Done is sufficient. (Forget resets rate limiter for the key,
// but the key is invalid so there is no point in doing that)
return fmt.Errorf("expected string in workqueue but got %#v", obj)
}
defer c.hasProcessed.Finished(key)
if err := c.reconcile(key); err != nil {

View File

@ -22,6 +22,8 @@ import (
"sort"
"strings"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
)
@ -58,6 +60,8 @@ var _ authorizer.Attributes = (interface {
GetAPIVersion() string
IsResourceRequest() bool
GetPath() string
GetFieldSelector() (fields.Requirements, error)
GetLabelSelector() (labels.Requirements, error)
})(nil)
// The user info accessors known to cache key construction. If this fails to compile, the cache
@ -72,16 +76,31 @@ var _ user.Info = (interface {
// Authorize returns an authorization decision by delegating to another Authorizer. If an equivalent
// check has already been performed, a cached result is returned. Not safe for concurrent use.
func (ca *cachingAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
serializableAttributes := authorizer.AttributesRecord{
Verb: a.GetVerb(),
Namespace: a.GetNamespace(),
APIGroup: a.GetAPIGroup(),
APIVersion: a.GetAPIVersion(),
Resource: a.GetResource(),
Subresource: a.GetSubresource(),
Name: a.GetName(),
ResourceRequest: a.IsResourceRequest(),
Path: a.GetPath(),
type SerializableAttributes struct {
authorizer.AttributesRecord
LabelSelector string
}
serializableAttributes := SerializableAttributes{
AttributesRecord: authorizer.AttributesRecord{
Verb: a.GetVerb(),
Namespace: a.GetNamespace(),
APIGroup: a.GetAPIGroup(),
APIVersion: a.GetAPIVersion(),
Resource: a.GetResource(),
Subresource: a.GetSubresource(),
Name: a.GetName(),
ResourceRequest: a.IsResourceRequest(),
Path: a.GetPath(),
},
}
// in the error case, we won't honor this field selector, so the cache doesn't need it.
if fieldSelector, err := a.GetFieldSelector(); len(fieldSelector) > 0 {
serializableAttributes.FieldSelectorRequirements, serializableAttributes.FieldSelectorParsingErr = fieldSelector, err
}
if labelSelector, _ := a.GetLabelSelector(); len(labelSelector) > 0 {
// the labels requirements have private elements so those don't help us serialize to a unique key
serializableAttributes.LabelSelector = labelSelector.String()
}
if u := a.GetUser(); u != nil {

View File

@ -223,7 +223,7 @@ func (c *dispatcher) Dispatch(ctx context.Context, a admission.Attributes, o adm
switch decision.Action {
case ActionAdmit:
if decision.Evaluation == EvalError {
celmetrics.Metrics.ObserveAdmissionWithError(ctx, decision.Elapsed, definition.Name, binding.Name, "active")
celmetrics.Metrics.ObserveAdmission(ctx, decision.Elapsed, definition.Name, binding.Name, ErrorType(&decision))
}
case ActionDeny:
for _, action := range binding.Spec.ValidationActions {
@ -234,13 +234,13 @@ func (c *dispatcher) Dispatch(ctx context.Context, a admission.Attributes, o adm
Binding: binding,
PolicyDecision: decision,
})
celmetrics.Metrics.ObserveRejection(ctx, decision.Elapsed, definition.Name, binding.Name, "active")
celmetrics.Metrics.ObserveRejection(ctx, decision.Elapsed, definition.Name, binding.Name, ErrorType(&decision))
case admissionregistrationv1.Audit:
publishValidationFailureAnnotation(binding, i, decision, versionedAttr)
celmetrics.Metrics.ObserveAudit(ctx, decision.Elapsed, definition.Name, binding.Name, "active")
celmetrics.Metrics.ObserveAudit(ctx, decision.Elapsed, definition.Name, binding.Name, ErrorType(&decision))
case admissionregistrationv1.Warn:
warning.AddWarning(ctx, "", fmt.Sprintf("Validation failed for ValidatingAdmissionPolicy '%s' with binding '%s': %s", definition.Name, binding.Name, decision.Message))
celmetrics.Metrics.ObserveWarn(ctx, decision.Elapsed, definition.Name, binding.Name, "active")
celmetrics.Metrics.ObserveWarn(ctx, decision.Elapsed, definition.Name, binding.Name, ErrorType(&decision))
}
}
default:
@ -259,7 +259,7 @@ func (c *dispatcher) Dispatch(ctx context.Context, a admission.Attributes, o adm
auditAnnotationCollector.add(auditAnnotation.Key, value)
case AuditAnnotationActionError:
// When failurePolicy=fail, audit annotation errors result in deny
deniedDecisions = append(deniedDecisions, policyDecisionWithMetadata{
d := policyDecisionWithMetadata{
Definition: definition,
Binding: binding,
PolicyDecision: PolicyDecision{
@ -268,8 +268,9 @@ func (c *dispatcher) Dispatch(ctx context.Context, a admission.Attributes, o adm
Message: auditAnnotation.Error,
Elapsed: auditAnnotation.Elapsed,
},
})
celmetrics.Metrics.ObserveRejection(ctx, auditAnnotation.Elapsed, definition.Name, binding.Name, "active")
}
deniedDecisions = append(deniedDecisions, d)
celmetrics.Metrics.ObserveRejection(ctx, auditAnnotation.Elapsed, definition.Name, binding.Name, ErrorType(&d.PolicyDecision))
case AuditAnnotationActionExclude: // skip it
default:
return fmt.Errorf("unsupported AuditAnnotation Action: %s", auditAnnotation.Action)

View File

@ -0,0 +1,38 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validating
import (
"strings"
celmetrics "k8s.io/apiserver/pkg/admission/plugin/policy/validating/metrics"
)
// ErrorType decodes the error to determine the error type
// that the metrics understand.
func ErrorType(decision *PolicyDecision) celmetrics.ValidationErrorType {
if decision.Evaluation == EvalAdmit {
return celmetrics.ValidationNoError
}
if strings.HasPrefix(decision.Message, "compilation") {
return celmetrics.ValidationCompileError
}
if strings.HasPrefix(decision.Message, "validation failed due to running out of cost budget") {
return celmetrics.ValidatingOutOfBudget
}
return celmetrics.ValidatingInvalidError
}

View File

@ -0,0 +1,38 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cel
import (
"errors"
apiservercel "k8s.io/apiserver/pkg/cel"
)
// ErrorType decodes the error to determine the error type
// that the metrics understand.
func ErrorType(err error) ValidationErrorType {
if err == nil {
return ValidationNoError
}
if errors.Is(err, apiservercel.ErrCompilation) {
return ValidationCompileError
}
if errors.Is(err, apiservercel.ErrOutOfBudget) {
return ValidatingOutOfBudget
}
return ValidatingInvalidError
}

View File

@ -29,6 +29,22 @@ const (
metricsSubsystem = "validating_admission_policy"
)
// ValidationErrorType defines different error types that happen to a validation expression
type ValidationErrorType string
const (
// ValidationCompileError indicates that the expression fails to compile.
ValidationCompileError ValidationErrorType = "compile_error"
// ValidatingInvalidError indicates that the expression fails due to internal
// errors that are out of the control of the user.
ValidatingInvalidError ValidationErrorType = "invalid_error"
// ValidatingOutOfBudget indicates that the expression fails due to running
// out of cost budget, or the budget cannot be obtained.
ValidatingOutOfBudget ValidationErrorType = "out_of_budget"
// ValidationNoError indicates that the expression returns without an error.
ValidationNoError ValidationErrorType = "no_error"
)
var (
// Metrics provides access to validation admission metrics.
Metrics = newValidationAdmissionMetrics()
@ -36,9 +52,8 @@ var (
// ValidatingAdmissionPolicyMetrics aggregates Prometheus metrics related to validation admission control.
type ValidatingAdmissionPolicyMetrics struct {
policyCheck *metrics.CounterVec
policyDefinition *metrics.CounterVec
policyLatency *metrics.HistogramVec
policyCheck *metrics.CounterVec
policyLatency *metrics.HistogramVec
}
func newValidationAdmissionMetrics() *ValidatingAdmissionPolicyMetrics {
@ -47,25 +62,16 @@ func newValidationAdmissionMetrics() *ValidatingAdmissionPolicyMetrics {
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
Name: "check_total",
Help: "Validation admission policy check total, labeled by policy and further identified by binding, enforcement action taken, and state.",
StabilityLevel: metrics.ALPHA,
Help: "Validation admission policy check total, labeled by policy and further identified by binding and enforcement action taken.",
StabilityLevel: metrics.BETA,
},
[]string{"policy", "policy_binding", "enforcement_action", "state"},
)
definition := metrics.NewCounterVec(&metrics.CounterOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
Name: "definition_total",
Help: "Validation admission policy count total, labeled by state and enforcement action.",
StabilityLevel: metrics.ALPHA,
},
[]string{"state", "enforcement_action"},
[]string{"policy", "policy_binding", "error_type", "enforcement_action"},
)
latency := metrics.NewHistogramVec(&metrics.HistogramOpts{
Namespace: metricsNamespace,
Subsystem: metricsSubsystem,
Name: "check_duration_seconds",
Help: "Validation admission latency for individual validation expressions in seconds, labeled by policy and further including binding, state and enforcement action taken.",
Help: "Validation admission latency for individual validation expressions in seconds, labeled by policy and further including binding and enforcement action taken.",
// the bucket distribution here is based oo the benchmark suite at
// github.com/DangerOnTheRanger/cel-benchmark performed on 16-core Intel Xeon
// the lowest bucket was based around the 180ns/op figure for BenchmarkAccess,
@ -75,49 +81,42 @@ func newValidationAdmissionMetrics() *ValidatingAdmissionPolicyMetrics {
// around 760ms, so that bucket should only ever have the slowest CEL expressions
// in it
Buckets: []float64{0.0000005, 0.001, 0.01, 0.1, 1.0},
StabilityLevel: metrics.ALPHA,
StabilityLevel: metrics.BETA,
},
[]string{"policy", "policy_binding", "enforcement_action", "state"},
[]string{"policy", "policy_binding", "error_type", "enforcement_action"},
)
legacyregistry.MustRegister(check)
legacyregistry.MustRegister(definition)
legacyregistry.MustRegister(latency)
return &ValidatingAdmissionPolicyMetrics{policyCheck: check, policyDefinition: definition, policyLatency: latency}
return &ValidatingAdmissionPolicyMetrics{policyCheck: check, policyLatency: latency}
}
// Reset resets all validation admission-related Prometheus metrics.
func (m *ValidatingAdmissionPolicyMetrics) Reset() {
m.policyCheck.Reset()
m.policyDefinition.Reset()
m.policyLatency.Reset()
}
// ObserveDefinition observes a policy definition.
func (m *ValidatingAdmissionPolicyMetrics) ObserveDefinition(ctx context.Context, state, enforcementAction string) {
m.policyDefinition.WithContext(ctx).WithLabelValues(state, enforcementAction).Inc()
}
// ObserveAdmissionWithError observes a policy validation error that was ignored due to failure policy.
func (m *ValidatingAdmissionPolicyMetrics) ObserveAdmissionWithError(ctx context.Context, elapsed time.Duration, policy, binding, state string) {
m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, "allow", state).Inc()
m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, "allow", state).Observe(elapsed.Seconds())
// ObserveAdmission observes a policy validation, with an optional error to indicate the error that may occur but ignored.
func (m *ValidatingAdmissionPolicyMetrics) ObserveAdmission(ctx context.Context, elapsed time.Duration, policy, binding string, errorType ValidationErrorType) {
m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, string(errorType), "allow").Inc()
m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, string(errorType), "allow").Observe(elapsed.Seconds())
}
// ObserveRejection observes a policy validation error that was at least one of the reasons for a deny.
func (m *ValidatingAdmissionPolicyMetrics) ObserveRejection(ctx context.Context, elapsed time.Duration, policy, binding, state string) {
m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, "deny", state).Inc()
m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, "deny", state).Observe(elapsed.Seconds())
func (m *ValidatingAdmissionPolicyMetrics) ObserveRejection(ctx context.Context, elapsed time.Duration, policy, binding string, errorType ValidationErrorType) {
m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, string(errorType), "deny").Inc()
m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, string(errorType), "deny").Observe(elapsed.Seconds())
}
// ObserveAudit observes a policy validation audit annotation was published for a validation failure.
func (m *ValidatingAdmissionPolicyMetrics) ObserveAudit(ctx context.Context, elapsed time.Duration, policy, binding, state string) {
m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, "audit", state).Inc()
m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, "audit", state).Observe(elapsed.Seconds())
func (m *ValidatingAdmissionPolicyMetrics) ObserveAudit(ctx context.Context, elapsed time.Duration, policy, binding string, errorType ValidationErrorType) {
m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, string(errorType), "audit").Inc()
m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, string(errorType), "audit").Observe(elapsed.Seconds())
}
// ObserveWarn observes a policy validation warning was published for a validation failure.
func (m *ValidatingAdmissionPolicyMetrics) ObserveWarn(ctx context.Context, elapsed time.Duration, policy, binding, state string) {
m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, "warn", state).Inc()
m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, "warn", state).Observe(elapsed.Seconds())
func (m *ValidatingAdmissionPolicyMetrics) ObserveWarn(ctx context.Context, elapsed time.Duration, policy, binding string, errorType ValidationErrorType) {
m.policyCheck.WithContext(ctx).WithLabelValues(policy, binding, string(errorType), "warn").Inc()
m.policyLatency.WithContext(ctx).WithLabelValues(policy, binding, string(errorType), "warn").Observe(elapsed.Seconds())
}

View File

@ -19,6 +19,7 @@ package validating
import (
"context"
"io"
"sync"
v1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apimachinery/pkg/api/meta"
@ -44,24 +45,35 @@ const (
)
var (
compositionEnvTemplateWithStrictCost *cel.CompositionEnv = func() *cel.CompositionEnv {
compositionEnvTemplateWithStrictCost, err := cel.NewCompositionEnv(cel.VariablesTypeName, environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true))
if err != nil {
panic(err)
}
lazyCompositionEnvTemplateWithStrictCostInit sync.Once
lazyCompositionEnvTemplateWithStrictCost *cel.CompositionEnv
return compositionEnvTemplateWithStrictCost
}()
compositionEnvTemplateWithoutStrictCost *cel.CompositionEnv = func() *cel.CompositionEnv {
compositionEnvTemplateWithoutStrictCost, err := cel.NewCompositionEnv(cel.VariablesTypeName, environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), false))
if err != nil {
panic(err)
}
return compositionEnvTemplateWithoutStrictCost
}()
lazyCompositionEnvTemplateWithoutStrictCostInit sync.Once
lazyCompositionEnvTemplateWithoutStrictCost *cel.CompositionEnv
)
func getCompositionEnvTemplateWithStrictCost() *cel.CompositionEnv {
lazyCompositionEnvTemplateWithStrictCostInit.Do(func() {
env, err := cel.NewCompositionEnv(cel.VariablesTypeName, environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true))
if err != nil {
panic(err)
}
lazyCompositionEnvTemplateWithStrictCost = env
})
return lazyCompositionEnvTemplateWithStrictCost
}
func getCompositionEnvTemplateWithoutStrictCost() *cel.CompositionEnv {
lazyCompositionEnvTemplateWithoutStrictCostInit.Do(func() {
env, err := cel.NewCompositionEnv(cel.VariablesTypeName, environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), false))
if err != nil {
panic(err)
}
lazyCompositionEnvTemplateWithoutStrictCost = env
})
return lazyCompositionEnvTemplateWithoutStrictCost
}
// Register registers a plugin
func Register(plugins *admission.Plugins) {
plugins.Register(PluginName, func(configFile io.Reader) (admission.Interface, error) {
@ -131,9 +143,9 @@ func compilePolicy(policy *Policy) Validator {
matchConditions := policy.Spec.MatchConditions
var compositionEnvTemplate *cel.CompositionEnv
if strictCost {
compositionEnvTemplate = compositionEnvTemplateWithStrictCost
compositionEnvTemplate = getCompositionEnvTemplateWithStrictCost()
} else {
compositionEnvTemplate = compositionEnvTemplateWithoutStrictCost
compositionEnvTemplate = getCompositionEnvTemplateWithoutStrictCost()
}
filterCompiler := cel.NewCompositedCompilerFromTemplate(compositionEnvTemplate)
filterCompiler.CompileAndStoreVariables(convertv1beta1Variables(policy.Spec.Variables), optionalVars, environment.StoredExpressions)

View File

@ -18,6 +18,7 @@ package validating
import (
"context"
"errors"
"fmt"
"strings"
@ -132,19 +133,14 @@ func (v *validator) Validate(ctx context.Context, matchedResource schema.GroupVe
}
var messageResult *cel.EvaluationResult
var messageError *apiservercel.Error
if len(messageResults) > i {
messageResult = &messageResults[i]
}
messageError, _ = err.(*apiservercel.Error)
if evalResult.Error != nil {
decision.Action = policyDecisionActionForError(f)
decision.Evaluation = EvalError
decision.Message = evalResult.Error.Error()
} else if messageError != nil &&
(messageError.Type == apiservercel.ErrorTypeInternal ||
(messageError.Type == apiservercel.ErrorTypeInvalid &&
strings.HasPrefix(messageError.Detail, "validation failed due to running out of cost budget"))) {
} else if errors.Is(err, apiservercel.ErrInternal) || errors.Is(err, apiservercel.ErrOutOfBudget) {
decision.Action = policyDecisionActionForError(f)
decision.Evaluation = EvalError
decision.Message = fmt.Sprintf("failed messageExpression: %s", err)

View File

@ -24,8 +24,8 @@ import (
"fmt"
"time"
jsonpatch "github.com/evanphx/json-patch"
"go.opentelemetry.io/otel/attribute"
jsonpatch "gopkg.in/evanphx/json-patch.v4"
admissionv1 "k8s.io/api/admission/v1"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"

View File

@ -165,6 +165,25 @@ type AuthenticationConfiguration struct {
metav1.TypeMeta
JWT []JWTAuthenticator
// If present --anonymous-auth must not be set
Anonymous *AnonymousAuthConfig
}
// AnonymousAuthConfig provides the configuration for the anonymous authenticator.
type AnonymousAuthConfig struct {
Enabled bool
// If set, anonymous auth is only allowed if the request meets one of the
// conditions.
Conditions []AnonymousAuthCondition
}
// AnonymousAuthCondition describes the condition under which anonymous auth
// should be enabled.
type AnonymousAuthCondition struct {
// Path for which anonymous auth is enabled.
Path string
}
// JWTAuthenticator provides the configuration for a single JWT authenticator.

View File

@ -185,6 +185,25 @@ type AuthenticationConfiguration struct {
// "<username claim>": "username"
// }
JWT []JWTAuthenticator `json:"jwt"`
// If present --anonymous-auth must not be set
Anonymous *AnonymousAuthConfig `json:"anonymous,omitempty"`
}
// AnonymousAuthConfig provides the configuration for the anonymous authenticator.
type AnonymousAuthConfig struct {
Enabled bool `json:"enabled"`
// If set, anonymous auth is only allowed if the request meets one of the
// conditions.
Conditions []AnonymousAuthCondition `json:"conditions,omitempty"`
}
// AnonymousAuthCondition describes the condition under which anonymous auth
// should be enabled.
type AnonymousAuthCondition struct {
// Path for which anonymous auth is enabled.
Path string `json:"path"`
}
// JWTAuthenticator provides the configuration for a single JWT authenticator.

View File

@ -57,6 +57,26 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*AnonymousAuthCondition)(nil), (*apiserver.AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(a.(*AnonymousAuthCondition), b.(*apiserver.AnonymousAuthCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthCondition)(nil), (*AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(a.(*apiserver.AnonymousAuthCondition), b.(*AnonymousAuthCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*AnonymousAuthConfig)(nil), (*apiserver.AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(a.(*AnonymousAuthConfig), b.(*apiserver.AnonymousAuthConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthConfig)(nil), (*AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(a.(*apiserver.AnonymousAuthConfig), b.(*AnonymousAuthConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*AuthenticationConfiguration)(nil), (*apiserver.AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(a.(*AuthenticationConfiguration), b.(*apiserver.AuthenticationConfiguration), scope)
}); err != nil {
@ -324,6 +344,48 @@ func Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginC
return autoConvert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in, out, s)
}
func autoConvert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error {
out.Path = in.Path
return nil
}
// Convert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition is an autogenerated conversion function.
func Convert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error {
return autoConvert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in, out, s)
}
func autoConvert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error {
out.Path = in.Path
return nil
}
// Convert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition is an autogenerated conversion function.
func Convert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error {
return autoConvert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(in, out, s)
}
func autoConvert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
out.Conditions = *(*[]apiserver.AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig is an autogenerated conversion function.
func Convert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error {
return autoConvert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in, out, s)
}
func autoConvert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
out.Conditions = *(*[]AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig is an autogenerated conversion function.
func Convert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error {
return autoConvert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(in, out, s)
}
func autoConvert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error {
if in.JWT != nil {
in, out := &in.JWT, &out.JWT
@ -336,6 +398,7 @@ func autoConvert_v1alpha1_AuthenticationConfiguration_To_apiserver_Authenticatio
} else {
out.JWT = nil
}
out.Anonymous = (*apiserver.AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous))
return nil
}
@ -356,6 +419,7 @@ func autoConvert_apiserver_AuthenticationConfiguration_To_v1alpha1_Authenticatio
} else {
out.JWT = nil
}
out.Anonymous = (*AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous))
return nil
}

View File

@ -78,6 +78,43 @@ func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AnonymousAuthCondition) DeepCopyInto(out *AnonymousAuthCondition) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthCondition.
func (in *AnonymousAuthCondition) DeepCopy() *AnonymousAuthCondition {
if in == nil {
return nil
}
out := new(AnonymousAuthCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AnonymousAuthConfig) DeepCopyInto(out *AnonymousAuthConfig) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]AnonymousAuthCondition, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthConfig.
func (in *AnonymousAuthConfig) DeepCopy() *AnonymousAuthConfig {
if in == nil {
return nil
}
out := new(AnonymousAuthConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) {
*out = *in
@ -89,6 +126,11 @@ func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfigura
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Anonymous != nil {
in, out := &in.Anonymous, &out.Anonymous
*out = new(AnonymousAuthConfig)
(*in).DeepCopyInto(*out)
}
return
}

View File

@ -156,6 +156,25 @@ type AuthenticationConfiguration struct {
// "<username claim>": "username"
// }
JWT []JWTAuthenticator `json:"jwt"`
// If present --anonymous-auth must not be set
Anonymous *AnonymousAuthConfig `json:"anonymous,omitempty"`
}
// AnonymousAuthConfig provides the configuration for the anonymous authenticator.
type AnonymousAuthConfig struct {
Enabled bool `json:"enabled"`
// If set, anonymous auth is only allowed if the request meets one of the
// conditions.
Conditions []AnonymousAuthCondition `json:"conditions,omitempty"`
}
// AnonymousAuthCondition describes the condition under which anonymous auth
// should be enabled.
type AnonymousAuthCondition struct {
// Path for which anonymous auth is enabled.
Path string `json:"path"`
}
// JWTAuthenticator provides the configuration for a single JWT authenticator.

View File

@ -37,6 +37,26 @@ func init() {
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*AnonymousAuthCondition)(nil), (*apiserver.AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(a.(*AnonymousAuthCondition), b.(*apiserver.AnonymousAuthCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthCondition)(nil), (*AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(a.(*apiserver.AnonymousAuthCondition), b.(*AnonymousAuthCondition), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*AnonymousAuthConfig)(nil), (*apiserver.AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(a.(*AnonymousAuthConfig), b.(*apiserver.AnonymousAuthConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthConfig)(nil), (*AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(a.(*apiserver.AnonymousAuthConfig), b.(*AnonymousAuthConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*AuthenticationConfiguration)(nil), (*apiserver.AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(a.(*AuthenticationConfiguration), b.(*apiserver.AuthenticationConfiguration), scope)
}); err != nil {
@ -260,6 +280,48 @@ func RegisterConversions(s *runtime.Scheme) error {
return nil
}
func autoConvert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error {
out.Path = in.Path
return nil
}
// Convert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition is an autogenerated conversion function.
func Convert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error {
return autoConvert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in, out, s)
}
func autoConvert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error {
out.Path = in.Path
return nil
}
// Convert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition is an autogenerated conversion function.
func Convert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error {
return autoConvert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(in, out, s)
}
func autoConvert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
out.Conditions = *(*[]apiserver.AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig is an autogenerated conversion function.
func Convert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error {
return autoConvert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in, out, s)
}
func autoConvert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
out.Conditions = *(*[]AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions))
return nil
}
// Convert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig is an autogenerated conversion function.
func Convert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error {
return autoConvert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(in, out, s)
}
func autoConvert_v1beta1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error {
if in.JWT != nil {
in, out := &in.JWT, &out.JWT
@ -272,6 +334,7 @@ func autoConvert_v1beta1_AuthenticationConfiguration_To_apiserver_Authentication
} else {
out.JWT = nil
}
out.Anonymous = (*apiserver.AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous))
return nil
}
@ -292,6 +355,7 @@ func autoConvert_apiserver_AuthenticationConfiguration_To_v1beta1_Authentication
} else {
out.JWT = nil
}
out.Anonymous = (*AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous))
return nil
}

View File

@ -25,6 +25,43 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AnonymousAuthCondition) DeepCopyInto(out *AnonymousAuthCondition) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthCondition.
func (in *AnonymousAuthCondition) DeepCopy() *AnonymousAuthCondition {
if in == nil {
return nil
}
out := new(AnonymousAuthCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AnonymousAuthConfig) DeepCopyInto(out *AnonymousAuthConfig) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]AnonymousAuthCondition, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthConfig.
func (in *AnonymousAuthConfig) DeepCopy() *AnonymousAuthConfig {
if in == nil {
return nil
}
out := new(AnonymousAuthConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) {
*out = *in
@ -36,6 +73,11 @@ func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfigura
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Anonymous != nil {
in, out := &in.Anonymous, &out.Anonymous
*out = new(AnonymousAuthConfig)
(*in).DeepCopyInto(*out)
}
return
}

View File

@ -78,6 +78,15 @@ func ValidateAuthenticationConfiguration(c *api.AuthenticationConfiguration, dis
}
}
if c.Anonymous != nil {
if !utilfeature.DefaultFeatureGate.Enabled(features.AnonymousAuthConfigurableEndpoints) {
allErrs = append(allErrs, field.Forbidden(field.NewPath("anonymous"), "anonymous is not supported when AnonymousAuthConfigurableEnpoints feature gate is disabled"))
}
if !c.Anonymous.Enabled && len(c.Anonymous.Conditions) > 0 {
allErrs = append(allErrs, field.Invalid(field.NewPath("anonymous", "conditions"), c.Anonymous.Conditions, "enabled should be set to true when conditions are defined"))
}
}
return allErrs
}
@ -727,6 +736,7 @@ func compileMatchConditions(matchConditions []api.WebhookMatchCondition, fldPath
compiler := authorizationcel.NewCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true))
seenExpressions := sets.NewString()
var compilationResults []authorizationcel.CompilationResult
var usesFieldSelector, usesLabelSelector bool
for i, condition := range matchConditions {
fldPath := fldPath.Child("matchConditions").Index(i).Child("expression")
@ -745,12 +755,16 @@ func compileMatchConditions(matchConditions []api.WebhookMatchCondition, fldPath
continue
}
compilationResults = append(compilationResults, compilationResult)
usesFieldSelector = usesFieldSelector || compilationResult.UsesFieldSelector
usesLabelSelector = usesLabelSelector || compilationResult.UsesLabelSelector
}
if len(compilationResults) == 0 {
return nil, allErrs
}
return &authorizationcel.CELMatcher{
CompilationResults: compilationResults,
UsesFieldSelector: usesFieldSelector,
UsesLabelSelector: usesLabelSelector,
}, allErrs
}

View File

@ -100,6 +100,43 @@ func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AnonymousAuthCondition) DeepCopyInto(out *AnonymousAuthCondition) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthCondition.
func (in *AnonymousAuthCondition) DeepCopy() *AnonymousAuthCondition {
if in == nil {
return nil
}
out := new(AnonymousAuthCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AnonymousAuthConfig) DeepCopyInto(out *AnonymousAuthConfig) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]AnonymousAuthCondition, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthConfig.
func (in *AnonymousAuthConfig) DeepCopy() *AnonymousAuthConfig {
if in == nil {
return nil
}
out := new(AnonymousAuthConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) {
*out = *in
@ -111,6 +148,11 @@ func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfigura
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Anonymous != nil {
in, out := &in.Anonymous, &out.Anonymous
*out = new(AnonymousAuthConfig)
(*in).DeepCopyInto(*out)
}
return
}

View File

@ -48,11 +48,11 @@ message Event {
optional string verb = 5;
// Authenticated user information.
optional k8s.io.api.authentication.v1.UserInfo user = 6;
optional .k8s.io.api.authentication.v1.UserInfo user = 6;
// Impersonated user information.
// +optional
optional k8s.io.api.authentication.v1.UserInfo impersonatedUser = 7;
optional .k8s.io.api.authentication.v1.UserInfo impersonatedUser = 7;
// Source IPs, from where the request originated and intermediate proxies.
// The source IPs are listed from (in order):
@ -79,28 +79,28 @@ message Event {
// For successful responses, this will only include the Code and StatusSuccess.
// For non-status type error responses, this will be auto-populated with the error Message.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.Status responseStatus = 10;
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status responseStatus = 10;
// API object from the request, in JSON format. The RequestObject is recorded as-is in the request
// (possibly re-encoded as JSON), prior to version conversion, defaulting, admission or
// merging. It is an external versioned object type, and may not be a valid object on its own.
// Omitted for non-resource requests. Only logged at Request Level and higher.
// +optional
optional k8s.io.apimachinery.pkg.runtime.Unknown requestObject = 11;
optional .k8s.io.apimachinery.pkg.runtime.Unknown requestObject = 11;
// API object returned in the response, in JSON. The ResponseObject is recorded after conversion
// to the external type, and serialized as JSON. Omitted for non-resource requests. Only logged
// at Response Level.
// +optional
optional k8s.io.apimachinery.pkg.runtime.Unknown responseObject = 12;
optional .k8s.io.apimachinery.pkg.runtime.Unknown responseObject = 12;
// Time the request reached the apiserver.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime requestReceivedTimestamp = 13;
optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime requestReceivedTimestamp = 13;
// Time the request reached current audit stage.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime stageTimestamp = 14;
optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime stageTimestamp = 14;
// Annotations is an unstructured key value map stored with an audit event that may be set by
// plugins invoked in the request serving chain, including authentication, authorization and
@ -115,7 +115,7 @@ message Event {
// EventList is a list of audit Events.
message EventList {
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
repeated Event items = 2;
}
@ -187,7 +187,7 @@ message ObjectReference {
message Policy {
// ObjectMeta is included for interoperability with API infrastructure.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Rules specify the audit Level a request should be recorded at.
// A request may match multiple rules, in which case the FIRST matching rule is used.
@ -215,7 +215,7 @@ message Policy {
// PolicyList is a list of audit Policies.
message PolicyList {
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
repeated Policy items = 2;
}

View File

@ -21,6 +21,7 @@ import (
"time"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/apis/apiserver"
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/apiserver/pkg/authentication/group"
"k8s.io/apiserver/pkg/authentication/request/anonymous"
@ -39,7 +40,7 @@ import (
// DelegatingAuthenticatorConfig is the minimal configuration needed to create an authenticator
// built to delegate authentication to a kube API server
type DelegatingAuthenticatorConfig struct {
Anonymous bool
Anonymous *apiserver.AnonymousAuthConfig
// TokenAccessReviewClient is a client to do token review. It can be nil. Then every token is ignored.
TokenAccessReviewClient authenticationclient.AuthenticationV1Interface
@ -112,15 +113,15 @@ func (c DelegatingAuthenticatorConfig) New() (authenticator.Request, *spec.Secur
}
if len(authenticators) == 0 {
if c.Anonymous {
return anonymous.NewAuthenticator(), &securityDefinitions, nil
if c.Anonymous != nil && c.Anonymous.Enabled {
return anonymous.NewAuthenticator(c.Anonymous.Conditions), &securityDefinitions, nil
}
return nil, nil, errors.New("No authentication method configured")
return nil, nil, errors.New("no authentication method configured")
}
authenticator := group.NewAuthenticatedGroupAdder(unionauth.New(authenticators...))
if c.Anonymous {
authenticator = unionauth.NewFailOnError(authenticator, anonymous.NewAuthenticator())
if c.Anonymous != nil && c.Anonymous.Enabled {
authenticator = unionauth.NewFailOnError(authenticator, anonymous.NewAuthenticator(c.Anonymous.Conditions))
}
return authenticator, &securityDefinitions, nil
}

View File

@ -19,25 +19,44 @@ package anonymous
import (
"net/http"
"k8s.io/apiserver/pkg/apis/apiserver"
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/apiserver/pkg/authentication/user"
)
const (
anonymousUser = user.Anonymous
anonymousUser = user.Anonymous
unauthenticatedGroup = user.AllUnauthenticated
)
func NewAuthenticator() authenticator.Request {
return authenticator.RequestFunc(func(req *http.Request) (*authenticator.Response, bool, error) {
auds, _ := authenticator.AudiencesFrom(req.Context())
return &authenticator.Response{
User: &user.DefaultInfo{
Name: anonymousUser,
Groups: []string{unauthenticatedGroup},
},
Audiences: auds,
}, true, nil
})
type Authenticator struct {
allowedPaths map[string]bool
}
func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) {
if len(a.allowedPaths) > 0 && !a.allowedPaths[req.URL.Path] {
return nil, false, nil
}
auds, _ := authenticator.AudiencesFrom(req.Context())
return &authenticator.Response{
User: &user.DefaultInfo{
Name: anonymousUser,
Groups: []string{unauthenticatedGroup},
},
Audiences: auds,
}, true, nil
}
// NewAuthenticator returns a new anonymous authenticator.
// When conditions is empty all requests are authenticated as anonymous.
// When conditions are non-empty only those requests that match the at-least one
// condition are authenticated as anonymous.
func NewAuthenticator(conditions []apiserver.AnonymousAuthCondition) authenticator.Request {
allowedPaths := make(map[string]bool)
for _, c := range conditions {
allowedPaths[c.Path] = true
}
return &Authenticator{allowedPaths: allowedPaths}
}

View File

@ -17,9 +17,7 @@ limitations under the License.
package headerrequest
import (
"crypto/x509"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
@ -27,7 +25,6 @@ import (
"k8s.io/apiserver/pkg/authentication/authenticator"
x509request "k8s.io/apiserver/pkg/authentication/request/x509"
"k8s.io/apiserver/pkg/authentication/user"
utilcert "k8s.io/client-go/util/cert"
)
// StringSliceProvider is a way to get a string slice value. It is heavily used for authentication headers among other places.
@ -106,48 +103,6 @@ func trimHeaders(headerNames ...string) ([]string, error) {
return ret, nil
}
func NewSecure(clientCA string, proxyClientNames []string, nameHeaders []string, groupHeaders []string, extraHeaderPrefixes []string) (authenticator.Request, error) {
if len(clientCA) == 0 {
return nil, fmt.Errorf("missing clientCA file")
}
// Wrap with an x509 verifier
caData, err := ioutil.ReadFile(clientCA)
if err != nil {
return nil, fmt.Errorf("error reading %s: %v", clientCA, err)
}
opts := x509request.DefaultVerifyOptions()
opts.Roots = x509.NewCertPool()
certs, err := utilcert.ParseCertsPEM(caData)
if err != nil {
return nil, fmt.Errorf("error loading certs from %s: %v", clientCA, err)
}
for _, cert := range certs {
opts.Roots.AddCert(cert)
}
trimmedNameHeaders, err := trimHeaders(nameHeaders...)
if err != nil {
return nil, err
}
trimmedGroupHeaders, err := trimHeaders(groupHeaders...)
if err != nil {
return nil, err
}
trimmedExtraHeaderPrefixes, err := trimHeaders(extraHeaderPrefixes...)
if err != nil {
return nil, err
}
return NewDynamicVerifyOptionsSecure(
x509request.StaticVerifierFn(opts),
StaticStringSlice(proxyClientNames),
StaticStringSlice(trimmedNameHeaders),
StaticStringSlice(trimmedGroupHeaders),
StaticStringSlice(trimmedExtraHeaderPrefixes),
), nil
}
func NewDynamicVerifyOptionsSecure(verifyOptionFn x509request.VerifyOptionFunc, proxyClientNames, nameHeaders, groupHeaders, extraHeaderPrefixes StringSliceProvider) authenticator.Request {
headerAuthenticator := NewDynamic(nameHeaders, groupHeaders, extraHeaderPrefixes)

View File

@ -20,6 +20,7 @@ import (
"context"
"encoding/json"
"fmt"
"sync/atomic"
"time"
corev1 "k8s.io/api/core/v1"
@ -35,7 +36,6 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"sync/atomic"
)
const (
@ -74,7 +74,7 @@ type RequestHeaderAuthRequestController struct {
configmapInformer cache.SharedIndexInformer
configmapInformerSynced cache.InformerSynced
queue workqueue.RateLimitingInterface
queue workqueue.TypedRateLimitingInterface[string]
// exportedRequestHeaderBundle is a requestHeaderBundle that contains the last read, non-zero length content of the configmap
exportedRequestHeaderBundle atomic.Value
@ -104,7 +104,10 @@ func NewRequestHeaderAuthRequestController(
extraHeaderPrefixesKey: extraHeaderPrefixesKey,
allowedClientNamesKey: allowedClientNamesKey,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "RequestHeaderAuthRequestController"),
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "RequestHeaderAuthRequestController"},
),
}
// we construct our own informer because we need such a small subset of the information available. Just one namespace.

View File

@ -20,6 +20,8 @@ import (
"context"
"net/http"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apiserver/pkg/authentication/user"
)
@ -62,6 +64,16 @@ type Attributes interface {
// GetPath returns the path of the request
GetPath() string
// ParseFieldSelector is lazy, thread-safe, and stores the parsed result and error.
// It returns an error if the field selector cannot be parsed.
// The returned requirements must be treated as readonly and not modified.
GetFieldSelector() (fields.Requirements, error)
// ParseLabelSelector is lazy, thread-safe, and stores the parsed result and error.
// It returns an error if the label selector cannot be parsed.
// The returned requirements must be treated as readonly and not modified.
GetLabelSelector() (labels.Requirements, error)
}
// Authorizer makes an authorization decision based on information gained by making
@ -100,6 +112,11 @@ type AttributesRecord struct {
Name string
ResourceRequest bool
Path string
FieldSelectorRequirements fields.Requirements
FieldSelectorParsingErr error
LabelSelectorRequirements labels.Requirements
LabelSelectorParsingErr error
}
func (a AttributesRecord) GetUser() user.Info {
@ -146,6 +163,14 @@ func (a AttributesRecord) GetPath() string {
return a.Path
}
func (a AttributesRecord) GetFieldSelector() (fields.Requirements, error) {
return a.FieldSelectorRequirements, a.FieldSelectorParsingErr
}
func (a AttributesRecord) GetLabelSelector() (labels.Requirements, error) {
return a.LabelSelectorRequirements, a.LabelSelectorParsingErr
}
type Decision int
const (

View File

@ -20,22 +20,34 @@ import (
"fmt"
"github.com/google/cel-go/cel"
celast "github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/types/ref"
authorizationv1 "k8s.io/api/authorization/v1"
"k8s.io/apimachinery/pkg/util/version"
apiservercel "k8s.io/apiserver/pkg/cel"
"k8s.io/apiserver/pkg/cel/environment"
genericfeatures "k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
)
const (
subjectAccessReviewRequestVarName = "request"
fieldSelectorVarName = "fieldSelector"
labelSelectorVarName = "labelSelector"
)
// CompilationResult represents a compiled authorization cel expression.
type CompilationResult struct {
Program cel.Program
ExpressionAccessor ExpressionAccessor
// These track if a given expression uses fieldSelector and labelSelector,
// so construction of data passed to the CEL expression can be optimized if those fields are unused.
UsesFieldSelector bool
UsesLabelSelector bool
}
// EvaluationResult contains the minimal required fields and metadata of a cel evaluation
@ -96,11 +108,50 @@ func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor) (C
return resultError(reason, apiservercel.ErrorTypeInvalid)
}
_, err = cel.AstToCheckedExpr(ast)
checkedExpr, err := cel.AstToCheckedExpr(ast)
if err != nil {
// should be impossible since env.Compile returned no issues
return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal)
}
celAST, err := celast.ToAST(checkedExpr)
if err != nil {
// should be impossible since env.Compile returned no issues
return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal)
}
var usesFieldSelector, usesLabelSelector bool
celast.PreOrderVisit(celast.NavigateAST(celAST), celast.NewExprVisitor(func(e celast.Expr) {
// we already know we use both, no need to inspect more
if usesFieldSelector && usesLabelSelector {
return
}
var fieldName string
switch e.Kind() {
case celast.SelectKind:
// simple select (.fieldSelector / .labelSelector)
fieldName = e.AsSelect().FieldName()
case celast.CallKind:
// optional select (.?fieldSelector / .?labelSelector)
if e.AsCall().FunctionName() != operators.OptSelect {
return
}
args := e.AsCall().Args()
// args[0] is the receiver (what comes before the `.?`), args[1] is the field name being optionally selected (what comes after the `.?`)
if len(args) != 2 || args[1].Kind() != celast.LiteralKind || args[1].AsLiteral().Type() != cel.StringType {
return
}
fieldName, _ = args[1].AsLiteral().Value().(string)
}
switch fieldName {
case fieldSelectorVarName:
usesFieldSelector = true
case labelSelectorVarName:
usesLabelSelector = true
}
}))
prog, err := env.Program(ast)
if err != nil {
return resultError("program instantiation failed: "+err.Error(), apiservercel.ErrorTypeInternal)
@ -108,6 +159,8 @@ func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor) (C
return CompilationResult{
Program: prog,
ExpressionAccessor: expressionAccessor,
UsesFieldSelector: usesFieldSelector,
UsesLabelSelector: usesLabelSelector,
}, nil
}
@ -161,7 +214,7 @@ func buildRequestType(field func(name string, declType *apiservercel.DeclType, r
// buildResourceAttributesType generates a DeclType for ResourceAttributes.
// if attributes are added here, also add to convertObjectToUnstructured.
func buildResourceAttributesType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType {
return apiservercel.NewObjectType("kubernetes.ResourceAttributes", fields(
resourceAttributesFields := []*apiservercel.DeclField{
field("namespace", apiservercel.StringType, false),
field("verb", apiservercel.StringType, false),
field("group", apiservercel.StringType, false),
@ -169,6 +222,33 @@ func buildResourceAttributesType(field func(name string, declType *apiservercel.
field("resource", apiservercel.StringType, false),
field("subresource", apiservercel.StringType, false),
field("name", apiservercel.StringType, false),
}
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) {
resourceAttributesFields = append(resourceAttributesFields, field("fieldSelector", buildFieldSelectorType(field, fields), false))
resourceAttributesFields = append(resourceAttributesFields, field("labelSelector", buildLabelSelectorType(field, fields), false))
}
return apiservercel.NewObjectType("kubernetes.ResourceAttributes", fields(resourceAttributesFields...))
}
func buildFieldSelectorType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType {
return apiservercel.NewObjectType("kubernetes.FieldSelectorAttributes", fields(
field("rawSelector", apiservercel.StringType, false),
field("requirements", apiservercel.NewListType(buildSelectorRequirementType(field, fields), -1), false),
))
}
func buildLabelSelectorType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType {
return apiservercel.NewObjectType("kubernetes.LabelSelectorAttributes", fields(
field("rawSelector", apiservercel.StringType, false),
field("requirements", apiservercel.NewListType(buildSelectorRequirementType(field, fields), -1), false),
))
}
func buildSelectorRequirementType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType {
return apiservercel.NewObjectType("kubernetes.SelectorRequirement", fields(
field("key", apiservercel.StringType, false),
field("operator", apiservercel.StringType, false),
field("values", apiservercel.NewListType(apiservercel.StringType, -1), false),
))
}
@ -181,7 +261,7 @@ func buildNonResourceAttributesType(field func(name string, declType *apiserverc
))
}
func convertObjectToUnstructured(obj *authorizationv1.SubjectAccessReviewSpec) map[string]interface{} {
func convertObjectToUnstructured(obj *authorizationv1.SubjectAccessReviewSpec, includeFieldSelector, includeLabelSelector bool) map[string]interface{} {
// Construct version containing every SubjectAccessReview user and string attribute field, even omitempty ones, for evaluation by CEL
extra := obj.Extra
if extra == nil {
@ -194,7 +274,7 @@ func convertObjectToUnstructured(obj *authorizationv1.SubjectAccessReviewSpec) m
"extra": extra,
}
if obj.ResourceAttributes != nil {
ret["resourceAttributes"] = map[string]string{
resourceAttributes := map[string]interface{}{
"namespace": obj.ResourceAttributes.Namespace,
"verb": obj.ResourceAttributes.Verb,
"group": obj.ResourceAttributes.Group,
@ -203,6 +283,42 @@ func convertObjectToUnstructured(obj *authorizationv1.SubjectAccessReviewSpec) m
"subresource": obj.ResourceAttributes.Subresource,
"name": obj.ResourceAttributes.Name,
}
if includeFieldSelector && obj.ResourceAttributes.FieldSelector != nil {
if len(obj.ResourceAttributes.FieldSelector.Requirements) > 0 {
requirements := make([]map[string]interface{}, 0, len(obj.ResourceAttributes.FieldSelector.Requirements))
for _, r := range obj.ResourceAttributes.FieldSelector.Requirements {
requirements = append(requirements, map[string]interface{}{
"key": r.Key,
"operator": r.Operator,
"values": r.Values,
})
}
resourceAttributes[fieldSelectorVarName] = map[string]interface{}{"requirements": requirements}
}
if len(obj.ResourceAttributes.FieldSelector.RawSelector) > 0 {
resourceAttributes[fieldSelectorVarName] = map[string]interface{}{"rawSelector": obj.ResourceAttributes.FieldSelector.RawSelector}
}
}
if includeLabelSelector && obj.ResourceAttributes.LabelSelector != nil {
if len(obj.ResourceAttributes.LabelSelector.Requirements) > 0 {
requirements := make([]map[string]interface{}, 0, len(obj.ResourceAttributes.LabelSelector.Requirements))
for _, r := range obj.ResourceAttributes.LabelSelector.Requirements {
requirements = append(requirements, map[string]interface{}{
"key": r.Key,
"operator": r.Operator,
"values": r.Values,
})
}
resourceAttributes[labelSelectorVarName] = map[string]interface{}{"requirements": requirements}
}
if len(obj.ResourceAttributes.LabelSelector.RawSelector) > 0 {
resourceAttributes[labelSelectorVarName] = map[string]interface{}{"rawSelector": obj.ResourceAttributes.LabelSelector.RawSelector}
}
}
ret["resourceAttributes"] = resourceAttributes
}
if obj.NonResourceAttributes != nil {
ret["nonResourceAttributes"] = map[string]string{

View File

@ -30,6 +30,11 @@ import (
type CELMatcher struct {
CompilationResults []CompilationResult
// These track if any expressions use fieldSelector and labelSelector,
// so construction of data passed to the CEL expression can be optimized if those fields are unused.
UsesLabelSelector bool
UsesFieldSelector bool
// These are optional fields which can be populated if metrics reporting is desired
Metrics MatcherMetrics
AuthorizerType string
@ -53,7 +58,7 @@ func (c *CELMatcher) Eval(ctx context.Context, r *authorizationv1.SubjectAccessR
}()
va := map[string]interface{}{
"request": convertObjectToUnstructured(&r.Spec),
"request": convertObjectToUnstructured(&r.Spec, c.UsesFieldSelector, c.UsesLabelSelector),
}
for _, compilationResult := range c.CompilationResults {
evalResult, _, err := compilationResult.Program.ContextEval(ctx, va)

View File

@ -20,6 +20,7 @@ import (
"fmt"
"strconv"
"sync"
"sync/atomic"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/checker"
@ -30,20 +31,29 @@ import (
"k8s.io/apimachinery/pkg/util/version"
celconfig "k8s.io/apiserver/pkg/apis/cel"
"k8s.io/apiserver/pkg/cel/library"
genericfeatures "k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilversion "k8s.io/apiserver/pkg/util/version"
)
// DefaultCompatibilityVersion returns a default compatibility version for use with EnvSet
// that guarantees compatibility with CEL features/libraries/parameters understood by
// an n-1 version
// the api server min compatibility version
//
// This default will be set to no more than n-1 the current Kubernetes major.minor version.
// This default will be set to no more than the current Kubernetes major.minor version.
//
// Note that a default version number less than n-1 indicates a wider range of version
// compatibility than strictly required for rollback. A wide range of compatibility is
// desirable because it means that CEL expressions are portable across a wider range
// of Kubernetes versions.
// Note that a default version number less than n-1 the current Kubernetes major.minor version
// indicates a wider range of version compatibility than strictly required for rollback.
// A wide range of compatibility is desirable because it means that CEL expressions are portable
// across a wider range of Kubernetes versions.
// A default version number equal to the current Kubernetes major.minor version
// indicates fast forward CEL features that can be used when rollback is no longer needed.
func DefaultCompatibilityVersion() *version.Version {
return version.MajorMinor(1, 29)
effectiveVer := utilversion.DefaultComponentGlobalsRegistry.EffectiveVersionFor(utilversion.DefaultKubeComponent)
if effectiveVer == nil {
effectiveVer = utilversion.DefaultKubeEffectiveVersion()
}
return effectiveVer.MinCompatibilityVersion()
}
var baseOpts = append(baseOptsWithoutStrictCost, StrictCostOpt)
@ -132,6 +142,45 @@ var baseOptsWithoutStrictCost = []VersionedOptions{
library.CIDR(),
},
},
// Format Library
{
IntroducedVersion: version.MajorMinor(1, 31),
EnvOptions: []cel.EnvOption{
library.Format(),
},
},
// Authz selectors
{
IntroducedVersion: version.MajorMinor(1, 31),
FeatureEnabled: func() bool {
enabled := utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors)
authzSelectorsLibraryInit.Do(func() {
// Record the first time feature enablement was checked for this library.
// This is checked from integration tests to ensure no cached cel envs
// are constructed before feature enablement is effectively set.
authzSelectorsLibraryEnabled.Store(enabled)
// Uncomment to debug where the first initialization is coming from if needed.
// debug.PrintStack()
})
return enabled
},
EnvOptions: []cel.EnvOption{
library.AuthzSelectors(),
},
},
}
var (
authzSelectorsLibraryInit sync.Once
authzSelectorsLibraryEnabled atomic.Value
)
// AuthzSelectorsLibraryEnabled returns whether the AuthzSelectors library was enabled when it was constructed.
// If it has not been contructed yet, this returns `false, false`.
// This is solely for the benefit of the integration tests making sure feature gates get correctly parsed before AuthzSelector ever has to check for enablement.
func AuthzSelectorsLibraryEnabled() (enabled, constructed bool) {
enabled, constructed = authzSelectorsLibraryEnabled.Load().(bool)
return
}
var StrictCostOpt = VersionedOptions{

View File

@ -175,7 +175,15 @@ type VersionedOptions struct {
//
// Optional.
RemovedVersion *version.Version
// FeatureEnabled returns true if these options are enabled by feature gates,
// and returns false if these options are not enabled due to feature gates.
//
// This takes priority over IntroducedVersion / RemovedVersion for the NewExpressions environment.
//
// The StoredExpressions environment ignores this function.
//
// Optional.
FeatureEnabled func() bool
// EnvOptions provides CEL EnvOptions. This may be used to add a cel.Variable, a
// cel.Library, or to enable other CEL EnvOptions such as language settings.
//
@ -210,7 +218,7 @@ type VersionedOptions struct {
// making multiple calls to Extend.
func (e *EnvSet) Extend(options ...VersionedOptions) (*EnvSet, error) {
if len(options) > 0 {
newExprOpts, err := e.filterAndBuildOpts(e.newExpressions, e.compatibilityVersion, options)
newExprOpts, err := e.filterAndBuildOpts(e.newExpressions, e.compatibilityVersion, true, options)
if err != nil {
return nil, err
}
@ -218,7 +226,7 @@ func (e *EnvSet) Extend(options ...VersionedOptions) (*EnvSet, error) {
if err != nil {
return nil, err
}
storedExprOpt, err := e.filterAndBuildOpts(e.storedExpressions, version.MajorMinor(math.MaxUint, math.MaxUint), options)
storedExprOpt, err := e.filterAndBuildOpts(e.storedExpressions, version.MajorMinor(math.MaxUint, math.MaxUint), false, options)
if err != nil {
return nil, err
}
@ -231,13 +239,26 @@ func (e *EnvSet) Extend(options ...VersionedOptions) (*EnvSet, error) {
return e, nil
}
func (e *EnvSet) filterAndBuildOpts(base *cel.Env, compatVer *version.Version, opts []VersionedOptions) (cel.EnvOption, error) {
func (e *EnvSet) filterAndBuildOpts(base *cel.Env, compatVer *version.Version, honorFeatureGateEnablement bool, opts []VersionedOptions) (cel.EnvOption, error) {
var envOpts []cel.EnvOption
var progOpts []cel.ProgramOption
var declTypes []*apiservercel.DeclType
for _, opt := range opts {
var allowedByFeatureGate, allowedByVersion bool
if opt.FeatureEnabled != nil && honorFeatureGateEnablement {
// Feature-gate-enabled libraries must follow compatible default feature enablement.
// Enabling alpha features in their first release enables libraries the previous API server is unaware of.
allowedByFeatureGate = opt.FeatureEnabled()
if !allowedByFeatureGate {
continue
}
}
if compatVer.AtLeast(opt.IntroducedVersion) && (opt.RemovedVersion == nil || compatVer.LessThan(opt.RemovedVersion)) {
allowedByVersion = true
}
if allowedByFeatureGate || allowedByVersion {
envOpts = append(envOpts, opt.EnvOptions...)
progOpts = append(progOpts, opt.ProgramOptions...)
declTypes = append(declTypes, opt.DeclTypes...)
@ -246,7 +267,10 @@ func (e *EnvSet) filterAndBuildOpts(base *cel.Env, compatVer *version.Version, o
if len(declTypes) > 0 {
provider := apiservercel.NewDeclTypeProvider(declTypes...)
providerOpts, err := provider.EnvOptions(base.TypeProvider())
if compatVer.AtLeast(version.MajorMinor(1, 31)) {
provider.SetRecognizeKeywordAsFieldName(true)
}
providerOpts, err := provider.EnvOptions(base.CELTypeProvider())
if err != nil {
return nil, err
}

View File

@ -16,11 +16,46 @@ limitations under the License.
package cel
import (
"fmt"
"github.com/google/cel-go/cel"
)
// ErrInternal the basic error that occurs when the expression fails to evaluate
// due to internal reasons. Any Error that has the Type of
// ErrorInternal is considered equal to ErrInternal
var ErrInternal = fmt.Errorf("internal")
// ErrInvalid is the basic error that occurs when the expression fails to
// evaluate but not due to internal reasons. Any Error that has the Type of
// ErrorInvalid is considered equal to ErrInvalid.
var ErrInvalid = fmt.Errorf("invalid")
// ErrRequired is the basic error that occurs when the expression is required
// but absent.
// Any Error that has the Type of ErrorRequired is considered equal
// to ErrRequired.
var ErrRequired = fmt.Errorf("required")
// ErrCompilation is the basic error that occurs when the expression fails to
// compile. Any CompilationError wraps ErrCompilation.
// ErrCompilation wraps ErrInvalid
var ErrCompilation = fmt.Errorf("%w: compilation error", ErrInvalid)
// ErrOutOfBudget is the basic error that occurs when the expression fails due to
// exceeding budget.
var ErrOutOfBudget = fmt.Errorf("out of budget")
// Error is an implementation of the 'error' interface, which represents a
// XValidation error.
type Error struct {
Type ErrorType
Detail string
// Cause is an optional wrapped errors that can be useful to
// programmatically retrieve detailed errors.
Cause error
}
var _ error = &Error{}
@ -30,7 +65,24 @@ func (v *Error) Error() string {
return v.Detail
}
// ErrorType is a machine readable value providing more detail about why
func (v *Error) Is(err error) bool {
switch v.Type {
case ErrorTypeRequired:
return err == ErrRequired
case ErrorTypeInvalid:
return err == ErrInvalid
case ErrorTypeInternal:
return err == ErrInternal
}
return false
}
// Unwrap returns the wrapped Cause.
func (v *Error) Unwrap() error {
return v.Cause
}
// ErrorType is a machine-readable value providing more detail about why
// a XValidation is invalid.
type ErrorType string
@ -45,3 +97,28 @@ const (
// to user input. See InternalError().
ErrorTypeInternal ErrorType = "InternalError"
)
// CompilationError indicates an error during expression compilation.
// It wraps ErrCompilation.
type CompilationError struct {
err *Error
Issues *cel.Issues
}
// NewCompilationError wraps a cel.Issues to indicate a compilation failure.
func NewCompilationError(issues *cel.Issues) *CompilationError {
return &CompilationError{
Issues: issues,
err: &Error{
Type: ErrorTypeInvalid,
Detail: fmt.Sprintf("compilation error: %s", issues),
}}
}
func (e *CompilationError) Error() string {
return e.err.Error()
}
func (e *CompilationError) Unwrap() []error {
return []error{e.err, ErrCompilation}
}

73
vendor/k8s.io/apiserver/pkg/cel/format.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cel
import (
"fmt"
"reflect"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
)
var (
FormatObject = decls.NewObjectType("kubernetes.NamedFormat")
FormatType = cel.ObjectType("kubernetes.NamedFormat")
)
// Format provdes a CEL representation of kubernetes format
type Format struct {
Name string
ValidateFunc func(string) []string
// Size of the regex string or estimated equivalent regex string used
// for cost estimation
MaxRegexSize int
}
func (d *Format) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
return nil, fmt.Errorf("type conversion error from 'Format' to '%v'", typeDesc)
}
func (d *Format) ConvertToType(typeVal ref.Type) ref.Val {
switch typeVal {
case FormatType:
return d
case types.TypeType:
return FormatType
default:
return types.NewErr("type conversion error from '%s' to '%s'", FormatType, typeVal)
}
}
func (d *Format) Equal(other ref.Val) ref.Val {
otherDur, ok := other.(*Format)
if !ok {
return types.MaybeNoSuchOverloadErr(other)
}
return types.Bool(d.Name == otherDur.Name)
}
func (d *Format) Type() ref.Type {
return FormatType
}
func (d *Format) Value() interface{} {
return d
}

View File

@ -22,6 +22,11 @@ import (
"reflect"
"strings"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
genericfeatures "k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
@ -194,6 +199,30 @@ import (
// Examples:
//
// authorizer.group('').resource('pods').namespace('default').check('create').error()
//
// fieldSelector
//
// Takes a string field selector, parses it to field selector requirements, and includes it in the authorization check.
// If the field selector does not parse successfully, no field selector requirements are included in the authorization check.
// Added in Kubernetes 1.31+, Authz library version 1.
//
// <ResourceCheck>.fieldSelector(<string>) <ResourceCheck>
//
// Examples:
//
// authorizer.group('').resource('pods').fieldSelector('spec.nodeName=mynode').check('list').allowed()
//
// labelSelector (added in v1, Kubernetes 1.31+)
//
// Takes a string label selector, parses it to label selector requirements, and includes it in the authorization check.
// If the label selector does not parse successfully, no label selector requirements are included in the authorization check.
// Added in Kubernetes 1.31+, Authz library version 1.
//
// <ResourceCheck>.labelSelector(<string>) <ResourceCheck>
//
// Examples:
//
// authorizer.group('').resource('pods').labelSelector('app=example').check('list').allowed()
func Authz() cel.EnvOption {
return cel.Lib(authzLib)
}
@ -259,6 +288,66 @@ func (*authz) ProgramOptions() []cel.ProgramOption {
return []cel.ProgramOption{}
}
// AuthzSelectors provides a CEL function library extension for adding fieldSelector and
// labelSelector filters to authorization checks. This requires the Authz library.
// See documentation of the Authz library for use and availability of the authorizer variable.
//
// fieldSelector
//
// Takes a string field selector, parses it to field selector requirements, and includes it in the authorization check.
// If the field selector does not parse successfully, no field selector requirements are included in the authorization check.
// Added in Kubernetes 1.31+.
//
// <ResourceCheck>.fieldSelector(<string>) <ResourceCheck>
//
// Examples:
//
// authorizer.group('').resource('pods').fieldSelector('spec.nodeName=mynode').check('list').allowed()
//
// labelSelector
//
// Takes a string label selector, parses it to label selector requirements, and includes it in the authorization check.
// If the label selector does not parse successfully, no label selector requirements are included in the authorization check.
// Added in Kubernetes 1.31+.
//
// <ResourceCheck>.labelSelector(<string>) <ResourceCheck>
//
// Examples:
//
// authorizer.group('').resource('pods').labelSelector('app=example').check('list').allowed()
func AuthzSelectors() cel.EnvOption {
return cel.Lib(authzSelectorsLib)
}
var authzSelectorsLib = &authzSelectors{}
type authzSelectors struct{}
func (*authzSelectors) LibraryName() string {
return "k8s.authzSelectors"
}
var authzSelectorsLibraryDecls = map[string][]cel.FunctionOpt{
"fieldSelector": {
cel.MemberOverload("authorizer_fieldselector", []*cel.Type{ResourceCheckType, cel.StringType}, ResourceCheckType,
cel.BinaryBinding(resourceCheckFieldSelector))},
"labelSelector": {
cel.MemberOverload("authorizer_labelselector", []*cel.Type{ResourceCheckType, cel.StringType}, ResourceCheckType,
cel.BinaryBinding(resourceCheckLabelSelector))},
}
func (*authzSelectors) CompileOptions() []cel.EnvOption {
options := make([]cel.EnvOption, 0, len(authzSelectorsLibraryDecls))
for name, overloads := range authzSelectorsLibraryDecls {
options = append(options, cel.Function(name, overloads...))
}
return options
}
func (*authzSelectors) ProgramOptions() []cel.ProgramOption {
return []cel.ProgramOption{}
}
func authorizerPath(arg1, arg2 ref.Val) ref.Val {
authz, ok := arg1.(authorizerVal)
if !ok {
@ -354,6 +443,38 @@ func resourceCheckSubresource(arg1, arg2 ref.Val) ref.Val {
return result
}
func resourceCheckFieldSelector(arg1, arg2 ref.Val) ref.Val {
resourceCheck, ok := arg1.(resourceCheckVal)
if !ok {
return types.MaybeNoSuchOverloadErr(arg1)
}
fieldSelector, ok := arg2.Value().(string)
if !ok {
return types.MaybeNoSuchOverloadErr(arg1)
}
result := resourceCheck
result.fieldSelector = fieldSelector
return result
}
func resourceCheckLabelSelector(arg1, arg2 ref.Val) ref.Val {
resourceCheck, ok := arg1.(resourceCheckVal)
if !ok {
return types.MaybeNoSuchOverloadErr(arg1)
}
labelSelector, ok := arg2.Value().(string)
if !ok {
return types.MaybeNoSuchOverloadErr(arg1)
}
result := resourceCheck
result.labelSelector = labelSelector
return result
}
func resourceCheckNamespace(arg1, arg2 ref.Val) ref.Val {
resourceCheck, ok := arg1.(resourceCheckVal)
if !ok {
@ -544,11 +665,13 @@ func (g groupCheckVal) resourceCheck(resource string) resourceCheckVal {
type resourceCheckVal struct {
receiverOnlyObjectVal
groupCheck groupCheckVal
resource string
subresource string
namespace string
name string
groupCheck groupCheckVal
resource string
subresource string
namespace string
name string
fieldSelector string
labelSelector string
}
func (a resourceCheckVal) Authorize(ctx context.Context, verb string) ref.Val {
@ -563,6 +686,26 @@ func (a resourceCheckVal) Authorize(ctx context.Context, verb string) ref.Val {
Verb: verb,
User: a.groupCheck.authorizer.userInfo,
}
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) {
if len(a.fieldSelector) > 0 {
selector, err := fields.ParseSelector(a.fieldSelector)
if err != nil {
attr.FieldSelectorRequirements, attr.FieldSelectorParsingErr = nil, err
} else {
attr.FieldSelectorRequirements, attr.FieldSelectorParsingErr = selector.Requirements(), nil
}
}
if len(a.labelSelector) > 0 {
requirements, err := labels.ParseToRequirements(a.labelSelector)
if err != nil {
attr.LabelSelectorRequirements, attr.LabelSelectorParsingErr = nil, err
} else {
attr.LabelSelectorRequirements, attr.LabelSelectorParsingErr = requirements, nil
}
}
}
decision, reason, err := a.groupCheck.authorizer.authAuthorizer.Authorize(ctx, attr)
return newDecision(decision, err, reason)
}

View File

@ -17,16 +17,37 @@ limitations under the License.
package library
import (
"fmt"
"math"
"github.com/google/cel-go/checker"
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
"k8s.io/apiserver/pkg/cel"
)
// panicOnUnknown makes cost estimate functions panic on unrecognized functions.
// This is only set to true for unit tests.
var panicOnUnknown = false
// builtInFunctions is a list of functions used in cost tests that are not handled by CostEstimator.
var knownUnhandledFunctions = map[string]bool{
"uint": true,
"duration": true,
"bytes": true,
"timestamp": true,
"value": true,
"_==_": true,
"_&&_": true,
"_>_": true,
"!_": true,
"strings.quote": true,
}
// CostEstimator implements CEL's interpretable.ActualCostEstimator and checker.CostEstimator.
type CostEstimator struct {
// SizeEstimator provides a CostEstimator.EstimateSize that this CostEstimator will delegate size estimation
@ -34,6 +55,25 @@ type CostEstimator struct {
SizeEstimator checker.CostEstimator
}
const (
// shortest repeatable selector requirement that allocates a values slice is 2 characters: k,
selectorLengthToRequirementCount = float64(.5)
// the expensive parts to represent each requirement are a struct and a values slice
costPerRequirement = float64(common.ListCreateBaseCost + common.StructCreateBaseCost)
)
// a selector consists of a list of requirements held in a slice
var baseSelectorCost = checker.CostEstimate{Min: common.ListCreateBaseCost, Max: common.ListCreateBaseCost}
func selectorCostEstimate(selectorLength checker.SizeEstimate) checker.CostEstimate {
parseCost := selectorLength.MultiplyByCostFactor(common.StringTraversalCostFactor)
requirementCount := selectorLength.MultiplyByCostFactor(selectorLengthToRequirementCount)
requirementCost := requirementCount.MultiplyByCostFactor(costPerRequirement)
return baseSelectorCost.Add(parseCost).Add(requirementCost)
}
func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, result ref.Val) *uint64 {
switch function {
case "check":
@ -45,6 +85,13 @@ func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, re
// All authorization builder and accessor functions have a nominal cost
cost := uint64(1)
return &cost
case "fieldSelector", "labelSelector":
// field and label selector parse is a string parse into a structured set of requirements
if len(args) >= 2 {
selectorLength := actualSize(args[1])
cost := selectorCostEstimate(checker.SizeEstimate{Min: selectorLength, Max: selectorLength})
return &cost.Max
}
case "isSorted", "sum", "max", "min", "indexOf", "lastIndexOf":
var cost uint64
if len(args) > 0 {
@ -105,7 +152,7 @@ func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, re
cost := uint64(math.Ceil(float64(actualSize(args[0])) * 2 * common.StringTraversalCostFactor))
return &cost
}
case "masked", "prefixLength", "family", "isUnspecified", "isLoopback", "isLinkLocalMulticast", "isLinkLocalUnicast":
case "masked", "prefixLength", "family", "isUnspecified", "isLoopback", "isLinkLocalMulticast", "isLinkLocalUnicast", "isGlobalUnicast":
// IP and CIDR accessors are nominal cost.
cost := uint64(1)
return &cost
@ -152,9 +199,45 @@ func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, re
cost := uint64(math.Ceil(float64(actualSize(args[0])) * common.StringTraversalCostFactor))
return &cost
}
case "validate":
if len(args) >= 2 {
format, isFormat := args[0].Value().(*cel.Format)
if isFormat {
strSize := actualSize(args[1])
// Dont have access to underlying regex, estimate a long regexp
regexSize := format.MaxRegexSize
// Copied from CEL implementation for regex cost
//
// https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL
// Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0
// in case where string is empty but regex is still expensive.
strCost := uint64(math.Ceil((1.0 + float64(strSize)) * common.StringTraversalCostFactor))
// We don't know how many expressions are in the regex, just the string length (a huge
// improvement here would be to somehow get a count the number of expressions in the regex or
// how many states are in the regex state machine and use that to measure regex cost).
// For now, we're making a guess that each expression in a regex is typically at least 4 chars
// in length.
regexCost := uint64(math.Ceil(float64(regexSize) * common.RegexStringLengthCostFactor))
cost := strCost * regexCost
return &cost
}
}
case "format.named":
// Simply dictionary lookup
cost := uint64(1)
return &cost
case "sign", "asInteger", "isInteger", "asApproximateFloat", "isGreaterThan", "isLessThan", "compareTo", "add", "sub":
cost := uint64(1)
return &cost
case "getScheme", "getHostname", "getHost", "getPort", "getEscapedPath", "getQuery":
// url accessors
cost := uint64(1)
return &cost
}
if panicOnUnknown && !knownUnhandledFunctions[function] {
panic(fmt.Errorf("CallCost: unhandled function %q or args %v", function, args))
}
return nil
}
@ -170,6 +253,11 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch
case "serviceAccount", "path", "group", "resource", "subresource", "namespace", "name", "allowed", "reason", "error", "errored":
// All authorization builder and accessor functions have a nominal cost
return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}}
case "fieldSelector", "labelSelector":
// field and label selector parse is a string parse into a structured set of requirements
if len(args) == 1 {
return &checker.CallEstimate{CostEstimate: selectorCostEstimate(l.sizeEstimate(args[0]))}
}
case "isSorted", "sum", "max", "min", "indexOf", "lastIndexOf":
if target != nil {
// Charge 1 cost for comparing each element in the list
@ -255,8 +343,10 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch
// Worst case size is where is that a separator of "" is used, and each char is returned as a list element.
max := sz.Max
if len(args) > 1 {
if c := args[1].Expr().GetConstExpr(); c != nil {
max = uint64(c.GetInt64Value())
if v := args[1].Expr().AsLiteral(); v != nil {
if i, ok := v.Value().(int64); ok {
max = uint64(i)
}
}
}
// Cost is the traversal plus the construction of the result.
@ -327,7 +417,7 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch
// So we double the cost of parsing the string.
return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(2 * common.StringTraversalCostFactor)}
}
case "masked", "prefixLength", "family", "isUnspecified", "isLoopback", "isLinkLocalMulticast", "isLinkLocalUnicast":
case "masked", "prefixLength", "family", "isUnspecified", "isLoopback", "isLinkLocalMulticast", "isLinkLocalUnicast", "isGlobalUnicast":
// IP and CIDR accessors are nominal cost.
return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}}
case "containsIP":
@ -373,8 +463,21 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch
sz := l.sizeEstimate(args[0])
return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor)}
}
case "validate":
if target != nil {
sz := l.sizeEstimate(args[0])
return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).MultiplyByCostFactor(cel.MaxNameFormatRegexSize * common.RegexStringLengthCostFactor)}
}
case "format.named":
return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}}
case "sign", "asInteger", "isInteger", "asApproximateFloat", "isGreaterThan", "isLessThan", "compareTo", "add", "sub":
return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}}
case "getScheme", "getHostname", "getHost", "getPort", "getEscapedPath", "getQuery":
// url accessors
return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}}
}
if panicOnUnknown && !knownUnhandledFunctions[function] {
panic(fmt.Errorf("EstimateCallCost: unhandled function %q, target %v, args %v", function, target, args))
}
return nil
}
@ -383,6 +486,10 @@ func actualSize(value ref.Val) uint64 {
if sz, ok := value.(traits.Sizer); ok {
return uint64(sz.Size().(types.Int))
}
if panicOnUnknown {
// debug.PrintStack()
panic(fmt.Errorf("actualSize: non-sizer type %T", value))
}
return 1
}
@ -425,7 +532,7 @@ func (l *CostEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstim
type itemsNode struct {
path []string
t *types.Type
expr *exprpb.Expr
expr ast.Expr
}
func (i *itemsNode) Path() []string {
@ -436,7 +543,7 @@ func (i *itemsNode) Type() *types.Type {
return i.t
}
func (i *itemsNode) Expr() *exprpb.Expr {
func (i *itemsNode) Expr() ast.Expr {
return i.expr
}
@ -444,6 +551,8 @@ func (i *itemsNode) ComputedSize() *checker.SizeEstimate {
return nil
}
var _ checker.AstNode = (*itemsNode)(nil)
// traversalCost computes the cost of traversing a ref.Val as a data tree.
func traversalCost(v ref.Val) uint64 {
// TODO: This could potentially be optimized by sampling maps and lists instead of traversing.

270
vendor/k8s.io/apiserver/pkg/cel/library/format.go generated vendored Normal file
View File

@ -0,0 +1,270 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package library
import (
"fmt"
"net/url"
"github.com/asaskevich/govalidator"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/common/decls"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
"k8s.io/apimachinery/pkg/util/validation"
apiservercel "k8s.io/apiserver/pkg/cel"
"k8s.io/kube-openapi/pkg/validation/strfmt"
)
// Format provides a CEL library exposing common named Kubernetes string
// validations. Can be used in CRD ValidationRules messageExpression.
//
// Example:
//
// rule: format.dns1123label.validate(object.metadata.name).hasValue()
// messageExpression: format.dns1123label.validate(object.metadata.name).value().join("\n")
//
// format.named(name: string) -> ?Format
//
// Returns the Format with the given name, if it exists. Otherwise, optional.none
// Allowed names are:
// - `dns1123Label`
// - `dns1123Subdomain`
// - `dns1035Label`
// - `qualifiedName`
// - `dns1123LabelPrefix`
// - `dns1123SubdomainPrefix`
// - `dns1035LabelPrefix`
// - `labelValue`
// - `uri`
// - `uuid`
// - `byte`
// - `date`
// - `datetime`
//
// format.<formatName>() -> Format
//
// Convenience functions for all the named formats are also available
//
// Examples:
// format.dns1123Label().validate("my-label-name")
// format.dns1123Subdomain().validate("apiextensions.k8s.io")
// format.dns1035Label().validate("my-label-name")
// format.qualifiedName().validate("apiextensions.k8s.io/v1beta1")
// format.dns1123LabelPrefix().validate("my-label-prefix-")
// format.dns1123SubdomainPrefix().validate("mysubdomain.prefix.-")
// format.dns1035LabelPrefix().validate("my-label-prefix-")
// format.uri().validate("http://example.com")
// Uses same pattern as isURL, but returns an error
// format.uuid().validate("123e4567-e89b-12d3-a456-426614174000")
// format.byte().validate("aGVsbG8=")
// format.date().validate("2021-01-01")
// format.datetime().validate("2021-01-01T00:00:00Z")
//
// <Format>.validate(str: string) -> ?list<string>
//
// Validates the given string against the given format. Returns optional.none
// if the string is valid, otherwise a list of validation error strings.
func Format() cel.EnvOption {
return cel.Lib(formatLib)
}
var formatLib = &format{}
type format struct{}
func (*format) LibraryName() string {
return "format"
}
func ZeroArgumentFunctionBinding(binding func() ref.Val) decls.OverloadOpt {
return func(o *decls.OverloadDecl) (*decls.OverloadDecl, error) {
wrapped, err := decls.FunctionBinding(func(values ...ref.Val) ref.Val { return binding() })(o)
if err != nil {
return nil, err
}
if len(wrapped.ArgTypes()) != 0 {
return nil, fmt.Errorf("function binding must have 0 arguments")
}
return o, nil
}
}
func (*format) CompileOptions() []cel.EnvOption {
options := make([]cel.EnvOption, 0, len(formatLibraryDecls))
for name, overloads := range formatLibraryDecls {
options = append(options, cel.Function(name, overloads...))
}
for name, constantValue := range ConstantFormats {
prefixedName := "format." + name
options = append(options, cel.Function(prefixedName, cel.Overload(prefixedName, []*cel.Type{}, apiservercel.FormatType, ZeroArgumentFunctionBinding(func() ref.Val {
return constantValue
}))))
}
return options
}
func (*format) ProgramOptions() []cel.ProgramOption {
return []cel.ProgramOption{}
}
var ConstantFormats map[string]*apiservercel.Format = map[string]*apiservercel.Format{
"dns1123Label": {
Name: "DNS1123Label",
ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSLabel(s, false) },
MaxRegexSize: 30,
},
"dns1123Subdomain": {
Name: "DNS1123Subdomain",
ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSSubdomain(s, false) },
MaxRegexSize: 60,
},
"dns1035Label": {
Name: "DNS1035Label",
ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNS1035Label(s, false) },
MaxRegexSize: 30,
},
"qualifiedName": {
Name: "QualifiedName",
ValidateFunc: validation.IsQualifiedName,
MaxRegexSize: 60, // uses subdomain regex
},
"dns1123LabelPrefix": {
Name: "DNS1123LabelPrefix",
ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSLabel(s, true) },
MaxRegexSize: 30,
},
"dns1123SubdomainPrefix": {
Name: "DNS1123SubdomainPrefix",
ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSSubdomain(s, true) },
MaxRegexSize: 60,
},
"dns1035LabelPrefix": {
Name: "DNS1035LabelPrefix",
ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNS1035Label(s, true) },
MaxRegexSize: 30,
},
"labelValue": {
Name: "LabelValue",
ValidateFunc: validation.IsValidLabelValue,
MaxRegexSize: 40,
},
// CRD formats
// Implementations sourced from strfmt, which kube-openapi uses as its
// format library. There are other CRD formats supported, but they are
// covered by other portions of the CEL library (like IP/CIDR), or their
// use is discouraged (like bsonobjectid, email, etc)
"uri": {
Name: "URI",
ValidateFunc: func(s string) []string {
// Directly call ParseRequestURI since we can get a better error message
_, err := url.ParseRequestURI(s)
if err != nil {
return []string{err.Error()}
}
return nil
},
// Use govalidator url regex to estimate, since ParseRequestURI
// doesnt use regex
MaxRegexSize: len(govalidator.URL),
},
"uuid": {
Name: "uuid",
ValidateFunc: func(s string) []string {
if !strfmt.Default.Validates("uuid", s) {
return []string{"does not match the UUID format"}
}
return nil
},
MaxRegexSize: len(strfmt.UUIDPattern),
},
"byte": {
Name: "byte",
ValidateFunc: func(s string) []string {
if !strfmt.Default.Validates("byte", s) {
return []string{"invalid base64"}
}
return nil
},
MaxRegexSize: len(govalidator.Base64),
},
"date": {
Name: "date",
ValidateFunc: func(s string) []string {
if !strfmt.Default.Validates("date", s) {
return []string{"invalid date"}
}
return nil
},
// Estimated regex size for RFC3339FullDate which is
// a date format. Assume a date-time pattern is longer
// so use that to conservatively estimate this
MaxRegexSize: len(strfmt.DateTimePattern),
},
"datetime": {
Name: "datetime",
ValidateFunc: func(s string) []string {
if !strfmt.Default.Validates("datetime", s) {
return []string{"invalid datetime"}
}
return nil
},
MaxRegexSize: len(strfmt.DateTimePattern),
},
}
var formatLibraryDecls = map[string][]cel.FunctionOpt{
"validate": {
cel.MemberOverload("format-validate", []*cel.Type{apiservercel.FormatType, cel.StringType}, cel.OptionalType(cel.ListType(cel.StringType)), cel.BinaryBinding(formatValidate)),
},
"format.named": {
cel.Overload("format-named", []*cel.Type{cel.StringType}, cel.OptionalType(apiservercel.FormatType), cel.UnaryBinding(func(name ref.Val) ref.Val {
nameString, ok := name.Value().(string)
if !ok {
return types.MaybeNoSuchOverloadErr(name)
}
f, ok := ConstantFormats[nameString]
if !ok {
return types.OptionalNone
}
return types.OptionalOf(f)
})),
},
}
func formatValidate(arg1, arg2 ref.Val) ref.Val {
f, ok := arg1.Value().(*apiservercel.Format)
if !ok {
return types.MaybeNoSuchOverloadErr(arg1)
}
str, ok := arg2.Value().(string)
if !ok {
return types.MaybeNoSuchOverloadErr(arg2)
}
res := f.ValidateFunc(str)
if len(res) == 0 {
return types.OptionalNone
}
return types.OptionalOf(types.NewStringList(types.DefaultTypeAdapter, res))
}

View File

@ -47,4 +47,6 @@ const (
MinBoolSize = 4
// MinNumberSize is the length of literal 0
MinNumberSize = 1
MaxNameFormatRegexSize = 128
)

View File

@ -50,7 +50,7 @@ func (d Quantity) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
func (d Quantity) ConvertToType(typeVal ref.Type) ref.Val {
switch typeVal {
case typeValue:
case quantityTypeValue:
return d
case types.TypeType:
return quantityTypeValue

View File

@ -27,7 +27,7 @@ import (
"github.com/google/cel-go/common/types/traits"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
"google.golang.org/protobuf/proto"
"k8s.io/apimachinery/pkg/api/resource"
)
const (
@ -348,9 +348,14 @@ func NewDeclTypeProvider(rootTypes ...*DeclType) *DeclTypeProvider {
// DeclTypeProvider extends the CEL ref.TypeProvider interface and provides an Open API Schema-based
// type-system.
type DeclTypeProvider struct {
registeredTypes map[string]*DeclType
typeProvider ref.TypeProvider
typeAdapter ref.TypeAdapter
registeredTypes map[string]*DeclType
typeProvider types.Provider
typeAdapter types.Adapter
recognizeKeywordAsFieldName bool
}
func (rt *DeclTypeProvider) SetRecognizeKeywordAsFieldName(recognize bool) {
rt.recognizeKeywordAsFieldName = recognize
}
func (rt *DeclTypeProvider) EnumValue(enumName string) ref.Val {
@ -365,7 +370,7 @@ func (rt *DeclTypeProvider) FindIdent(identName string) (ref.Val, bool) {
// as well as a custom ref.TypeProvider.
//
// If the DeclTypeProvider value is nil, an empty []cel.EnvOption set is returned.
func (rt *DeclTypeProvider) EnvOptions(tp ref.TypeProvider) ([]cel.EnvOption, error) {
func (rt *DeclTypeProvider) EnvOptions(tp types.Provider) ([]cel.EnvOption, error) {
if rt == nil {
return []cel.EnvOption{}, nil
}
@ -381,54 +386,52 @@ func (rt *DeclTypeProvider) EnvOptions(tp ref.TypeProvider) ([]cel.EnvOption, er
// WithTypeProvider returns a new DeclTypeProvider that sets the given TypeProvider
// If the original DeclTypeProvider is nil, the returned DeclTypeProvider is still nil.
func (rt *DeclTypeProvider) WithTypeProvider(tp ref.TypeProvider) (*DeclTypeProvider, error) {
func (rt *DeclTypeProvider) WithTypeProvider(tp types.Provider) (*DeclTypeProvider, error) {
if rt == nil {
return nil, nil
}
var ta ref.TypeAdapter = types.DefaultTypeAdapter
tpa, ok := tp.(ref.TypeAdapter)
var ta types.Adapter = types.DefaultTypeAdapter
tpa, ok := tp.(types.Adapter)
if ok {
ta = tpa
}
rtWithTypes := &DeclTypeProvider{
typeProvider: tp,
typeAdapter: ta,
registeredTypes: rt.registeredTypes,
typeProvider: tp,
typeAdapter: ta,
registeredTypes: rt.registeredTypes,
recognizeKeywordAsFieldName: rt.recognizeKeywordAsFieldName,
}
for name, declType := range rt.registeredTypes {
tpType, found := tp.FindType(name)
expT, err := declType.ExprType()
if err != nil {
return nil, fmt.Errorf("fail to get cel type: %s", err)
}
if found && !proto.Equal(tpType, expT) {
tpType, found := tp.FindStructType(name)
// cast celType to types.type
expT := declType.CelType()
if found && !expT.IsExactType(tpType) {
return nil, fmt.Errorf(
"type %s definition differs between CEL environment and type provider", name)
}
}
return rtWithTypes, nil
}
// FindType attempts to resolve the typeName provided from the rule's rule-schema, or if not
// FindStructType attempts to resolve the typeName provided from the rule's rule-schema, or if not
// from the embedded ref.TypeProvider.
//
// FindType overrides the default type-finding behavior of the embedded TypeProvider.
// FindStructType overrides the default type-finding behavior of the embedded TypeProvider.
//
// Note, when the type name is based on the Open API Schema, the name will reflect the object path
// where the type definition appears.
func (rt *DeclTypeProvider) FindType(typeName string) (*exprpb.Type, bool) {
func (rt *DeclTypeProvider) FindStructType(typeName string) (*types.Type, bool) {
if rt == nil {
return nil, false
}
declType, found := rt.findDeclType(typeName)
if found {
expT, err := declType.ExprType()
if err != nil {
return expT, false
}
expT := declType.CelType()
return expT, found
}
return rt.typeProvider.FindType(typeName)
return rt.typeProvider.FindStructType(typeName)
}
// FindDeclType returns the CPT type description which can be mapped to a CEL type.
@ -439,37 +442,41 @@ func (rt *DeclTypeProvider) FindDeclType(typeName string) (*DeclType, bool) {
return rt.findDeclType(typeName)
}
// FindFieldType returns a field type given a type name and field name, if found.
// FindStructFieldNames returns the field names associated with the type, if the type
// is found.
func (rt *DeclTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) {
return []string{}, false
}
// FindStructFieldType returns a field type given a type name and field name, if found.
//
// Note, the type name for an Open API Schema type is likely to be its qualified object path.
// If, in the future an object instance rather than a type name were provided, the field
// resolution might more accurately reflect the expected type model. However, in this case
// concessions were made to align with the existing CEL interfaces.
func (rt *DeclTypeProvider) FindFieldType(typeName, fieldName string) (*ref.FieldType, bool) {
func (rt *DeclTypeProvider) FindStructFieldType(typeName, fieldName string) (*types.FieldType, bool) {
st, found := rt.findDeclType(typeName)
if !found {
return rt.typeProvider.FindFieldType(typeName, fieldName)
return rt.typeProvider.FindStructFieldType(typeName, fieldName)
}
f, found := st.Fields[fieldName]
if rt.recognizeKeywordAsFieldName && !found && celReservedSymbols.Has(fieldName) {
f, found = st.Fields["__"+fieldName+"__"]
}
if found {
ft := f.Type
expT, err := ft.ExprType()
if err != nil {
return nil, false
}
return &ref.FieldType{
expT := ft.CelType()
return &types.FieldType{
Type: expT,
}, true
}
// This could be a dynamic map.
if st.IsMap() {
et := st.ElemType
expT, err := et.ExprType()
if err != nil {
return nil, false
}
return &ref.FieldType{
expT := et.CelType()
return &types.FieldType{
Type: expT,
}, true
}
@ -576,6 +583,10 @@ var (
// labeled as Timestamp will necessarily have the same MinSerializedSize.
TimestampType = NewSimpleTypeWithMinSize("timestamp", cel.TimestampType, types.Timestamp{Time: time.Time{}}, JSONDateSize)
// QuantityDeclType wraps a [QuantityType] and makes it usable with functions that expect
// a [DeclType].
QuantityDeclType = NewSimpleTypeWithMinSize("quantity", QuantityType, Quantity{Quantity: resource.NewQuantity(0, resource.DecimalSI)}, 8)
// UintType is equivalent to the CEL 'uint' type.
UintType = NewSimpleTypeWithMinSize("uint", cel.UintType, types.Uint(0), 1)

View File

@ -22,6 +22,11 @@ import (
"net/http"
"time"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
genericfeatures "k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/runtime"
@ -117,5 +122,31 @@ func GetAuthorizerAttributes(ctx context.Context) (authorizer.Attributes, error)
attribs.Namespace = requestInfo.Namespace
attribs.Name = requestInfo.Name
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) {
// parsing here makes it easy to keep the AttributesRecord type value-only and avoids any mutex copies when
// doing shallow copies in other steps.
if len(requestInfo.FieldSelector) > 0 {
fieldSelector, err := fields.ParseSelector(requestInfo.FieldSelector)
if err != nil {
attribs.FieldSelectorRequirements, attribs.FieldSelectorParsingErr = nil, err
} else {
if requirements := fieldSelector.Requirements(); len(requirements) > 0 {
attribs.FieldSelectorRequirements, attribs.FieldSelectorParsingErr = fieldSelector.Requirements(), nil
}
}
}
if len(requestInfo.LabelSelector) > 0 {
labelSelector, err := labels.Parse(requestInfo.LabelSelector)
if err != nil {
attribs.LabelSelectorRequirements, attribs.LabelSelectorParsingErr = nil, err
} else {
if requirements, _ /*selectable*/ := labelSelector.Requirements(); len(requirements) > 0 {
attribs.LabelSelectorRequirements, attribs.LabelSelectorParsingErr = requirements, nil
}
}
}
}
return &attribs, nil
}

View File

@ -22,6 +22,7 @@ import (
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
"go.opentelemetry.io/otel/trace"
"k8s.io/apiserver/pkg/endpoints/request"
tracing "k8s.io/component-base/tracing"
)
@ -32,6 +33,14 @@ func WithTracing(handler http.Handler, tp trace.TracerProvider) http.Handler {
otelhttp.WithPropagators(tracing.Propagators()),
otelhttp.WithPublicEndpoint(),
otelhttp.WithTracerProvider(tp),
otelhttp.WithSpanNameFormatter(func(operation string, r *http.Request) string {
ctx := r.Context()
info, exist := request.RequestInfoFrom(ctx)
if !exist || !info.IsResourceRequest {
return r.Method
}
return getSpanNameFromRequestInfo(info, r)
}),
}
wrappedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Add the http.target attribute to the otelhttp span
@ -45,3 +54,22 @@ func WithTracing(handler http.Handler, tp trace.TracerProvider) http.Handler {
// See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough
return otelhttp.NewHandler(wrappedHandler, "KubernetesAPI", opts...)
}
func getSpanNameFromRequestInfo(info *request.RequestInfo, r *http.Request) string {
spanName := "/" + info.APIPrefix
if info.APIGroup != "" {
spanName += "/" + info.APIGroup
}
spanName += "/" + info.APIVersion
if info.Namespace != "" {
spanName += "/namespaces/{:namespace}"
}
spanName += "/" + info.Resource
if info.Name != "" {
spanName += "/" + "{:name}"
}
if info.Subresource != "" {
spanName += "/" + info.Subresource
}
return r.Method + " " + spanName
}

View File

@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/endpoints/metrics"
@ -51,7 +52,7 @@ func getAvoidTimestampEqualities() conversion.Equalities {
}
var eqs = equality.Semantic.Copy()
err := eqs.AddFunc(
err := eqs.AddFuncs(
func(a, b metav1.ManagedFieldsEntry) bool {
// Two objects' managed fields are equivalent if, ignoring timestamp,
// the objects are deeply equal.
@ -59,6 +60,14 @@ func getAvoidTimestampEqualities() conversion.Equalities {
b.Time = nil
return reflect.DeepEqual(a, b)
},
func(a, b unstructured.Unstructured) bool {
// Check if the managed fields are equal by converting to structured types and leveraging the above
// function, then, ignoring the managed fields, equality check the rest of the unstructured data.
if !avoidTimestampEqualities.DeepEqual(a.GetManagedFields(), b.GetManagedFields()) {
return false
}
return equalIgnoringValueAtPath(a.Object, b.Object, []string{"metadata", "managedFields"})
},
)
if err != nil {
@ -70,6 +79,36 @@ func getAvoidTimestampEqualities() conversion.Equalities {
return avoidTimestampEqualities
}
func equalIgnoringValueAtPath(a, b any, path []string) bool {
if len(path) == 0 { // found the value to ignore
return true
}
aMap, aOk := a.(map[string]any)
bMap, bOk := b.(map[string]any)
if !aOk || !bOk {
// Can't traverse into non-maps, ignore
return true
}
if len(aMap) != len(bMap) {
return false
}
pathHead := path[0]
for k, aVal := range aMap {
bVal, ok := bMap[k]
if !ok {
return false
}
if k == pathHead {
if !equalIgnoringValueAtPath(aVal, bVal, path[1:]) {
return false
}
} else if !avoidTimestampEqualities.DeepEqual(aVal, bVal) {
return false
}
}
return true
}
// IgnoreManagedFieldsTimestampsTransformer reverts timestamp updates
// if the non-managed parts of the object are equivalent
func IgnoreManagedFieldsTimestampsTransformer(
@ -152,14 +191,20 @@ func IgnoreManagedFieldsTimestampsTransformer(
return newObj, nil
}
eqFn := equalities.DeepEqual
if _, ok := newObj.(*unstructured.Unstructured); ok {
// Use strict equality with unstructured
eqFn = equalities.DeepEqualWithNilDifferentFromEmpty
}
// This condition ensures the managed fields are always compared first. If
// this check fails, the if statement will short circuit. If the check
// succeeds the slow path is taken which compares entire objects.
if !equalities.DeepEqualWithNilDifferentFromEmpty(oldManagedFields, newManagedFields) {
if !eqFn(oldManagedFields, newManagedFields) {
return newObj, nil
}
if equalities.DeepEqualWithNilDifferentFromEmpty(newObj, oldObj) {
if eqFn(newObj, oldObj) {
// Remove any changed timestamps, so that timestamp is not the only
// change seen by etcd.
//

View File

@ -41,7 +41,7 @@ import (
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/registry/rest"
genericfilters "k8s.io/apiserver/pkg/server/filters"
"k8s.io/apiserver/pkg/server/routine"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/tracing"
"k8s.io/klog/v2"
@ -285,7 +285,7 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc
}
// Run watch serving in a separate goroutine to allow freeing current stack memory
t := genericfilters.TaskFrom(req.Context())
t := routine.TaskFrom(req.Context())
if t != nil {
t.Func = serve
} else {

View File

@ -23,8 +23,8 @@ import (
"strings"
"time"
jsonpatch "github.com/evanphx/json-patch"
"go.opentelemetry.io/otel/attribute"
jsonpatch "gopkg.in/evanphx/json-patch.v4"
kjson "sigs.k8s.io/json"
"k8s.io/apimachinery/pkg/api/errors"

View File

@ -27,6 +27,8 @@ import (
metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
genericfeatures "k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
)
@ -62,6 +64,13 @@ type RequestInfo struct {
Name string
// Parts are the path parts for the request, always starting with /{resource}/{name}
Parts []string
// FieldSelector contains the unparsed field selector from a request. It is only present if the apiserver
// honors field selectors for the verb this request is associated with.
FieldSelector string
// LabelSelector contains the unparsed field selector from a request. It is only present if the apiserver
// honors field selectors for the verb this request is associated with.
LabelSelector string
}
// specialVerbs contains just strings which are used in REST paths for special actions that don't fall under the normal
@ -77,6 +86,9 @@ var specialVerbsNoSubresources = sets.NewString("proxy")
// this list allows the parser to distinguish between a namespace subresource, and a namespaced resource
var namespaceSubresources = sets.NewString("status", "finalize")
// verbsWithSelectors is the list of verbs which support fieldSelector and labelSelector parameters
var verbsWithSelectors = sets.NewString("list", "watch", "deletecollection")
// NamespaceSubResourcesForTest exports namespaceSubresources for testing in pkg/controlplane/master_test.go, so we never drift
var NamespaceSubResourcesForTest = sets.NewString(namespaceSubresources.List()...)
@ -151,6 +163,7 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er
currentParts = currentParts[1:]
// handle input of form /{specialVerb}/*
verbViaPathPrefix := false
if specialVerbs.Has(currentParts[0]) {
if len(currentParts) < 2 {
return &requestInfo, fmt.Errorf("unable to determine kind and namespace from url, %v", req.URL)
@ -158,6 +171,7 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er
requestInfo.Verb = currentParts[0]
currentParts = currentParts[1:]
verbViaPathPrefix = true
} else {
switch req.Method {
@ -238,11 +252,28 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er
}
}
}
// if there's no name on the request and we thought it was a delete before, then the actual verb is deletecollection
if len(requestInfo.Name) == 0 && requestInfo.Verb == "delete" {
requestInfo.Verb = "deletecollection"
}
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) {
// Don't support selector authorization on requests that used the deprecated verb-via-path mechanism, since they don't support selectors consistently.
// There are multi-object and single-object watch endpoints, and only the multi-object one supports selectors.
if !verbViaPathPrefix && verbsWithSelectors.Has(requestInfo.Verb) {
// interestingly these are parsed above, but the current structure there means that if one (or anything) in the
// listOptions fails to decode, the field and label selectors are lost.
// therefore, do the straight query param read here.
if vals := req.URL.Query()["fieldSelector"]; len(vals) > 0 {
requestInfo.FieldSelector = vals[0]
}
if vals := req.URL.Query()["labelSelector"]; len(vals) > 0 {
requestInfo.LabelSelector = vals[0]
}
}
}
return &requestInfo, nil
}

View File

@ -274,6 +274,7 @@ func AuditAnnotationsFromLatencyTrackers(ctx context.Context) map[string]string
mutatingWebhookLatencyKey = "apiserver.latency.k8s.io/mutating-webhook"
validatingWebhookLatencyKey = "apiserver.latency.k8s.io/validating-webhook"
decodeLatencyKey = "apiserver.latency.k8s.io/decode-response-object"
apfQueueWaitLatencyKey = "apiserver.latency.k8s.io/apf-queue-wait"
)
tracker, ok := LatencyTrackersFrom(ctx)
@ -303,6 +304,8 @@ func AuditAnnotationsFromLatencyTrackers(ctx context.Context) map[string]string
if latency := tracker.DecodeTracker.GetLatency(); latency != 0 {
annotations[decodeLatencyKey] = latency.String()
}
if latency := tracker.APFQueueWaitTracker.GetLatency(); latency != 0 {
annotations[apfQueueWaitLatencyKey] = latency.String()
}
return annotations
}

View File

@ -18,7 +18,6 @@ package features
import (
"k8s.io/apimachinery/pkg/util/runtime"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/featuregate"
)
@ -53,6 +52,13 @@ const (
// caching with ETags containing all APIResources known to the apiserver.
AggregatedDiscoveryEndpoint featuregate.Feature = "AggregatedDiscoveryEndpoint"
// owner: @vinayakankugoyal
// kep: https://kep.k8s.io/4633
// alpha: v1.31
//
// Allows us to enable anonymous auth for only certain apiserver endpoints.
AnonymousAuthConfigurableEndpoints featuregate.Feature = "AnonymousAuthConfigurableEndpoints"
// owner: @smarterclayton
// alpha: v1.8
// beta: v1.9
@ -62,16 +68,6 @@ const (
// all at once.
APIListChunking featuregate.Feature = "APIListChunking"
// owner: @MikeSpreitzer @yue9944882
// alpha: v1.18
// beta: v1.20
// stable: 1.29
//
// Enables managing request concurrency with prioritization and fairness at each server.
// The FeatureGate was introduced in release 1.15 but the feature
// was not really implemented before 1.18.
APIPriorityAndFairness featuregate.Feature = "APIPriorityAndFairness"
// owner: @ilackams
// alpha: v1.7
// beta: v1.16
@ -98,6 +94,18 @@ const (
// Enables serving watch requests in separate goroutines.
APIServingWithRoutine featuregate.Feature = "APIServingWithRoutine"
// owner: @deads2k
// kep: https://kep.k8s.io/4601
// alpha: v1.31
//
// Allows authorization to use field and label selectors.
AuthorizeWithSelectors featuregate.Feature = "AuthorizeWithSelectors"
// owner: @serathius
// beta: v1.31
// Enables concurrent watch object decoding to avoid starving watch cache when conversion webhook is installed.
ConcurrentWatchObjectDecode featuregate.Feature = "ConcurrentWatchObjectDecode"
// owner: @cici37 @jpbetz
// kep: http://kep.k8s.io/3488
// alpha: v1.26
@ -108,14 +116,12 @@ const (
// Enables expression validation in Admission Control
ValidatingAdmissionPolicy featuregate.Feature = "ValidatingAdmissionPolicy"
// owner: @cici37
// kep: https://kep.k8s.io/2876
// alpha: v1.23
// beta: v1.25
// stable: v1.29
// owner: @jefftree
// kep: https://kep.k8s.io/4355
// alpha: v1.31
//
// Enables expression validation for Custom Resource
CustomResourceValidationExpressions featuregate.Feature = "CustomResourceValidationExpressions"
// Enables coordinated leader election in the API server
CoordinatedLeaderElection featuregate.Feature = "CoordinatedLeaderElection"
// alpha: v1.20
// beta: v1.21
@ -173,6 +179,13 @@ const (
// to a chunking list request.
RemainingItemCount featuregate.Feature = "RemainingItemCount"
// owner: @wojtek-t
// beta: v1.31
//
// Enables resilient watchcache initialization to avoid controlplane
// overload.
ResilientWatchCacheInitialization featuregate.Feature = "ResilientWatchCacheInitialization"
// owner: @serathius
// beta: v1.30
//
@ -276,6 +289,12 @@ const (
// Enables support for watch bookmark events.
WatchBookmark featuregate.Feature = "WatchBookmark"
// owner: @wojtek-t
// beta: v1.31
//
// Enables post-start-hook for storage readiness
WatchCacheInitializationPostStartHook featuregate.Feature = "WatchCacheInitializationPostStartHook"
// owner: @serathius
// beta: 1.30
// Enables watches without resourceVersion to be served from storage.
@ -298,6 +317,7 @@ const (
// owner: @serathius
// kep: http://kep.k8s.io/2340
// alpha: v1.28
// beta: v1.31
//
// Allow the API server to serve consistent lists from cache
ConsistentListFromCache featuregate.Feature = "ConsistentListFromCache"
@ -314,6 +334,17 @@ const (
func init() {
runtime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates))
runtime.Must(utilfeature.DefaultMutableFeatureGate.AddVersioned(defaultVersionedKubernetesFeatureGates))
}
// defaultVersionedKubernetesFeatureGates consists of all known Kubernetes-specific feature keys with VersionedSpecs.
// To add a new feature, define a key for it above and add it here. The features will be
// available throughout Kubernetes binaries.
var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate.VersionedSpecs{
// Example:
// EmulationVersion: {
// {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
// },
}
// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys.
@ -321,25 +352,29 @@ func init() {
// available throughout Kubernetes binaries.
var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
AnonymousAuthConfigurableEndpoints: {Default: false, PreRelease: featuregate.Alpha},
AggregatedDiscoveryEndpoint: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
AdmissionWebhookMatchConditions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
APIListChunking: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32
APIPriorityAndFairness: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
APIResponseCompression: {Default: true, PreRelease: featuregate.Beta},
APIServerIdentity: {Default: true, PreRelease: featuregate.Beta},
APIServerTracing: {Default: true, PreRelease: featuregate.Beta},
APIServingWithRoutine: {Default: true, PreRelease: featuregate.Beta},
APIServingWithRoutine: {Default: false, PreRelease: featuregate.Alpha},
AuthorizeWithSelectors: {Default: false, PreRelease: featuregate.Alpha},
ConcurrentWatchObjectDecode: {Default: false, PreRelease: featuregate.Beta},
ValidatingAdmissionPolicy: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32
CustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
CoordinatedLeaderElection: {Default: false, PreRelease: featuregate.Alpha},
EfficientWatchResumption: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
@ -353,7 +388,9 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
RemainingItemCount: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32
RetryGenerateName: {Default: false, PreRelease: featuregate.Alpha},
ResilientWatchCacheInitialization: {Default: true, PreRelease: featuregate.Beta},
RetryGenerateName: {Default: true, PreRelease: featuregate.Beta},
SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta},
@ -377,13 +414,15 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
WatchCacheInitializationPostStartHook: {Default: false, PreRelease: featuregate.Beta},
WatchFromStorageWithoutResourceVersion: {Default: false, PreRelease: featuregate.Beta},
InPlacePodVerticalScaling: {Default: false, PreRelease: featuregate.Alpha},
WatchList: {Default: false, PreRelease: featuregate.Alpha},
ConsistentListFromCache: {Default: false, PreRelease: featuregate.Alpha},
ConsistentListFromCache: {Default: true, PreRelease: featuregate.Beta},
ZeroLimitedNominalConcurrencyShares: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32
}

View File

@ -19,6 +19,7 @@ package generic
import (
"time"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/storagebackend"
@ -39,12 +40,15 @@ type RESTOptions struct {
}
// Implement RESTOptionsGetter so that RESTOptions can directly be used when available (i.e. tests)
func (opts RESTOptions) GetRESTOptions(schema.GroupResource) (RESTOptions, error) {
func (opts RESTOptions) GetRESTOptions(schema.GroupResource, runtime.Object) (RESTOptions, error) {
return opts, nil
}
type RESTOptionsGetter interface {
GetRESTOptions(resource schema.GroupResource) (RESTOptions, error)
// GetRESTOptions returns the RESTOptions for the given resource and example object.
// The example object is used to determine the storage version for the resource.
// If the example object is nil, the storage version will be determined by the resource's default storage version.
GetRESTOptions(resource schema.GroupResource, example runtime.Object) (RESTOptions, error)
}
// StoreOptions is set of configuration options used to complete generic registries.

View File

@ -226,6 +226,10 @@ type Store struct {
// storageVersionHash as empty in the discovery document.
StorageVersioner runtime.GroupVersioner
// ReadinessCheckFunc checks if the storage is ready for accepting requests.
// The field is optional, if set needs to be thread-safe.
ReadinessCheckFunc func() error
// DestroyFunc cleans up clients used by the underlying Storage; optional.
// If set, DestroyFunc has to be implemented in thread-safe way and
// be prepared for being called more than once.
@ -234,6 +238,7 @@ type Store struct {
// Note: the rest.StandardStorage interface aggregates the common REST verbs
var _ rest.StandardStorage = &Store{}
var _ rest.StorageWithReadiness = &Store{}
var _ rest.TableConvertor = &Store{}
var _ GenericStore = &Store{}
@ -292,6 +297,14 @@ func (e *Store) New() runtime.Object {
return e.NewFunc()
}
// ReadinessCheck checks if the storage is ready for accepting requests.
func (e *Store) ReadinessCheck() error {
if e.ReadinessCheckFunc != nil {
return e.ReadinessCheckFunc()
}
return nil
}
// Destroy cleans up its resources on shutdown.
func (e *Store) Destroy() {
if e.DestroyFunc != nil {
@ -1518,7 +1531,7 @@ func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error {
return err
}
opts, err := options.RESTOptions.GetRESTOptions(e.DefaultQualifiedResource)
opts, err := options.RESTOptions.GetRESTOptions(e.DefaultQualifiedResource, e.NewFunc())
if err != nil {
return err
}
@ -1614,6 +1627,9 @@ func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error {
}
}
}
if e.Storage.Storage != nil {
e.ReadinessCheckFunc = e.Storage.Storage.ReadinessCheck
}
return nil
}

View File

@ -52,6 +52,8 @@ import (
// Storage is a generic interface for RESTful storage services.
// Resources which are exported to the RESTful API of apiserver need to implement this interface. It is expected
// that objects may implement any of the below interfaces.
//
// Consider using StorageWithReadiness whenever possible.
type Storage interface {
// New returns an empty object that can be used with Create and Update after request data has been put into it.
// This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object)
@ -63,6 +65,14 @@ type Storage interface {
Destroy()
}
// StorageWithReadiness extends Storage interface with the readiness check.
type StorageWithReadiness interface {
Storage
// ReadinessCheck allows for checking storage readiness.
ReadinessCheck() error
}
// Scoper indicates what scope the resource is at. It must be specified.
// It is usually provided automatically based on your strategy.
type Scoper interface {

View File

@ -32,9 +32,9 @@ import (
"sync/atomic"
"time"
jsonpatch "github.com/evanphx/json-patch"
"github.com/google/uuid"
"golang.org/x/crypto/cryptobyte"
jsonpatch "gopkg.in/evanphx/json-patch.v4"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -42,8 +42,8 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/version"
utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup"
"k8s.io/apimachinery/pkg/version"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/audit"
"k8s.io/apiserver/pkg/authentication/authenticator"
@ -64,14 +64,17 @@ import (
genericfilters "k8s.io/apiserver/pkg/server/filters"
"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/apiserver/pkg/server/routes"
"k8s.io/apiserver/pkg/server/routine"
serverstore "k8s.io/apiserver/pkg/server/storage"
storagevalue "k8s.io/apiserver/pkg/storage/value"
"k8s.io/apiserver/pkg/storageversion"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol"
flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request"
utilversion "k8s.io/apiserver/pkg/util/version"
"k8s.io/client-go/informers"
restclient "k8s.io/client-go/rest"
"k8s.io/component-base/featuregate"
"k8s.io/component-base/logs"
"k8s.io/component-base/metrics/features"
"k8s.io/component-base/metrics/prometheus/slis"
@ -147,8 +150,11 @@ type Config struct {
// done values in this values for this map are ignored.
PostStartHooks map[string]PostStartHookConfigEntry
// Version will enable the /version endpoint if non-nil
Version *version.Info
// EffectiveVersion determines which apis and features are available
// based on when the api/feature lifecyle.
EffectiveVersion utilversion.EffectiveVersion
// FeatureGate is a way to plumb feature gate through if you have them.
FeatureGate featuregate.FeatureGate
// AuditBackend is where audit events are sent to.
AuditBackend audit.Backend
// AuditPolicyRuleEvaluator makes the decision of whether and how to audit log a request.
@ -211,6 +217,10 @@ type Config struct {
// twice this value. Note that it is up to the request handlers to ignore or honor this timeout. In seconds.
MinRequestTimeout int
// StorageInitializationTimeout defines the maximum amount of time to wait for storage initialization
// before declaring apiserver ready.
StorageInitializationTimeout time.Duration
// This represents the maximum amount of time it should take for apiserver to complete its startup
// sequence and become healthy. From apiserver's start time to when this amount of time has
// elapsed, /livez will assume that unfinished post-start hooks will complete successfully and
@ -421,6 +431,7 @@ func NewConfig(codecs serializer.CodecFactory) *Config {
MaxMutatingRequestsInFlight: 200,
RequestTimeout: time.Duration(60) * time.Second,
MinRequestTimeout: 1800,
StorageInitializationTimeout: time.Minute,
LivezGracePeriod: time.Duration(0),
ShutdownDelayDuration: time.Duration(0),
// 1.5MB is the default client request size in bytes
@ -585,7 +596,7 @@ func (c *Config) AddPostStartHookOrDie(name string, hook PostStartHookFunc) {
}
}
func completeOpenAPI(config *openapicommon.Config, version *version.Info) {
func completeOpenAPI(config *openapicommon.Config, version *version.Version) {
if config == nil {
return
}
@ -624,7 +635,7 @@ func completeOpenAPI(config *openapicommon.Config, version *version.Info) {
}
}
func completeOpenAPIV3(config *openapicommon.OpenAPIV3Config, version *version.Info) {
func completeOpenAPIV3(config *openapicommon.OpenAPIV3Config, version *version.Version) {
if config == nil {
return
}
@ -676,6 +687,9 @@ func (c *Config) ShutdownInitiatedNotify() <-chan struct{} {
// Complete fills in any fields not set that are required to have valid data and can be derived
// from other fields. If you're going to `ApplyOptions`, do that first. It's mutating the receiver.
func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedConfig {
if c.FeatureGate == nil {
c.FeatureGate = utilfeature.DefaultFeatureGate
}
if len(c.ExternalAddress) == 0 && c.PublicAddress != nil {
c.ExternalAddress = c.PublicAddress.String()
}
@ -691,9 +705,8 @@ func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedCo
}
c.ExternalAddress = net.JoinHostPort(c.ExternalAddress, strconv.Itoa(port))
}
completeOpenAPI(c.OpenAPIConfig, c.Version)
completeOpenAPIV3(c.OpenAPIV3Config, c.Version)
completeOpenAPI(c.OpenAPIConfig, c.EffectiveVersion.EmulationVersion())
completeOpenAPIV3(c.OpenAPIV3Config, c.EffectiveVersion.EmulationVersion())
if c.DiscoveryAddresses == nil {
c.DiscoveryAddresses = discovery.DefaultAddresses{DefaultAddress: c.ExternalAddress}
@ -711,7 +724,7 @@ func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedCo
} else {
c.EquivalentResourceRegistry = runtime.NewEquivalentResourceRegistryWithIdentity(func(groupResource schema.GroupResource) string {
// use the storage prefix as the key if possible
if opts, err := c.RESTOptionsGetter.GetRESTOptions(groupResource); err == nil {
if opts, err := c.RESTOptionsGetter.GetRESTOptions(groupResource, nil); err == nil {
return opts.ResourcePrefix
}
// otherwise return "" to use the default key (parent GV name)
@ -817,14 +830,16 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G
ShutdownSendRetryAfter: c.ShutdownSendRetryAfter,
APIServerID: c.APIServerID,
StorageReadinessHook: NewStorageReadinessHook(c.StorageInitializationTimeout),
StorageVersionManager: c.StorageVersionManager,
Version: c.Version,
EffectiveVersion: c.EffectiveVersion,
FeatureGate: c.FeatureGate,
muxAndDiscoveryCompleteSignals: map[string]<-chan struct{}{},
}
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AggregatedDiscoveryEndpoint) {
if c.FeatureGate.Enabled(genericfeatures.AggregatedDiscoveryEndpoint) {
manager := c.AggregatedDiscoveryGroupManager
if manager == nil {
manager = discoveryendpoint.NewResourceManager("apis")
@ -1039,15 +1054,15 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler {
handler = genericfilters.WithRetryAfter(handler, c.lifecycleSignals.NotAcceptingNewRequest.Signaled())
}
handler = genericfilters.WithHTTPLogging(handler)
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerTracing) {
if c.FeatureGate.Enabled(genericfeatures.APIServerTracing) {
handler = genericapifilters.WithTracing(handler, c.TracerProvider)
}
handler = genericapifilters.WithLatencyTrackers(handler)
// WithRoutine will execute future handlers in a separate goroutine and serving
// handler in current goroutine to minimize the stack memory usage. It must be
// after WithPanicRecover() to be protected from panics.
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServingWithRoutine) {
handler = genericfilters.WithRoutine(handler, c.LongRunningFunc)
if c.FeatureGate.Enabled(genericfeatures.APIServingWithRoutine) {
handler = routine.WithRoutine(handler, c.LongRunningFunc)
}
handler = genericapifilters.WithRequestInfo(handler, c.RequestInfoResolver)
handler = genericapifilters.WithRequestReceivedTimestamp(handler)
@ -1087,10 +1102,10 @@ func installAPI(s *GenericAPIServer, c *Config) {
}
}
routes.Version{Version: c.Version}.Install(s.Handler.GoRestfulContainer)
routes.Version{Version: c.EffectiveVersion.BinaryVersion().Info()}.Install(s.Handler.GoRestfulContainer)
if c.EnableDiscovery {
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AggregatedDiscoveryEndpoint) {
if c.FeatureGate.Enabled(genericfeatures.AggregatedDiscoveryEndpoint) {
wrapped := discoveryendpoint.WrapAggregatedDiscoveryToHandler(s.DiscoveryGroupManager, s.AggregatedDiscoveryGroupManager)
s.Handler.GoRestfulContainer.Add(wrapped.GenerateWebService("/apis", metav1.APIGroupList{}))
} else {

View File

@ -17,6 +17,7 @@ limitations under the License.
package server
import (
"fmt"
"os"
"strconv"
"strings"
@ -25,16 +26,15 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
apimachineryversion "k8s.io/apimachinery/pkg/version"
apimachineryversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/klog/v2"
)
// resourceExpirationEvaluator holds info for deciding if a particular rest.Storage needs to excluded from the API
type resourceExpirationEvaluator struct {
currentMajor int
currentMinor int
isAlpha bool
currentVersion *apimachineryversion.Version
isAlpha bool
// This is usually set for testing for which tests need to be removed. This prevent insta-failing CI.
// Set KUBE_APISERVER_STRICT_REMOVED_API_HANDLING_IN_ALPHA to see what will be removed when we tag beta
strictRemovedHandlingInAlpha bool
@ -53,30 +53,17 @@ type ResourceExpirationEvaluator interface {
ShouldServeForVersion(majorRemoved, minorRemoved int) bool
}
func NewResourceExpirationEvaluator(currentVersion apimachineryversion.Info) (ResourceExpirationEvaluator, error) {
func NewResourceExpirationEvaluator(currentVersion *apimachineryversion.Version) (ResourceExpirationEvaluator, error) {
if currentVersion == nil {
return nil, fmt.Errorf("empty NewResourceExpirationEvaluator currentVersion")
}
klog.V(1).Infof("NewResourceExpirationEvaluator with currentVersion: %s.", currentVersion)
ret := &resourceExpirationEvaluator{
strictRemovedHandlingInAlpha: false,
}
if len(currentVersion.Major) > 0 {
currentMajor64, err := strconv.ParseInt(currentVersion.Major, 10, 32)
if err != nil {
return nil, err
}
ret.currentMajor = int(currentMajor64)
}
if len(currentVersion.Minor) > 0 {
// split the "normal" + and - for semver stuff
minorString := strings.Split(currentVersion.Minor, "+")[0]
minorString = strings.Split(minorString, "-")[0]
minorString = strings.Split(minorString, ".")[0]
currentMinor64, err := strconv.ParseInt(minorString, 10, 32)
if err != nil {
return nil, err
}
ret.currentMinor = int(currentMinor64)
}
ret.isAlpha = strings.Contains(currentVersion.GitVersion, "alpha")
// Only keeps the major and minor versions from input version.
ret.currentVersion = apimachineryversion.MajorMinor(currentVersion.Major(), currentVersion.Minor())
ret.isAlpha = strings.Contains(currentVersion.PreRelease(), "alpha")
if envString, ok := os.LookupEnv("KUBE_APISERVER_STRICT_REMOVED_API_HANDLING_IN_ALPHA"); !ok {
// do nothing
@ -112,6 +99,15 @@ func (e *resourceExpirationEvaluator) shouldServe(gv schema.GroupVersion, versio
return false
}
introduced, ok := versionedPtr.(introducedInterface)
if ok {
majorIntroduced, minorIntroduced := introduced.APILifecycleIntroduced()
verIntroduced := apimachineryversion.MajorMinor(uint(majorIntroduced), uint(minorIntroduced))
if e.currentVersion.LessThan(verIntroduced) {
return false
}
}
removed, ok := versionedPtr.(removedInterface)
if !ok {
return true
@ -121,16 +117,11 @@ func (e *resourceExpirationEvaluator) shouldServe(gv schema.GroupVersion, versio
}
func (e *resourceExpirationEvaluator) ShouldServeForVersion(majorRemoved, minorRemoved int) bool {
if e.currentMajor < majorRemoved {
removedVer := apimachineryversion.MajorMinor(uint(majorRemoved), uint(minorRemoved))
if removedVer.GreaterThan(e.currentVersion) {
return true
}
if e.currentMajor > majorRemoved {
return false
}
if e.currentMinor < minorRemoved {
return true
}
if e.currentMinor > minorRemoved {
if removedVer.LessThan(e.currentVersion) {
return false
}
// at this point major and minor are equal, so this API should be removed when the current release GAs.
@ -152,6 +143,11 @@ type removedInterface interface {
APILifecycleRemoved() (major, minor int)
}
// Object interface generated from "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
type introducedInterface interface {
APILifecycleIntroduced() (major, minor int)
}
// removeDeletedKinds inspects the storage map and modifies it in place by removing storage for kinds that have been deleted.
// versionedResourcesStorageMap mirrors the field on APIGroupInfo, it's a map from version to resource to the storage.
func (e *resourceExpirationEvaluator) RemoveDeletedKinds(groupName string, versioner runtime.ObjectVersioner, versionedResourcesStorageMap map[string]map[string]rest.Storage) {
@ -171,6 +167,8 @@ func (e *resourceExpirationEvaluator) RemoveDeletedKinds(groupName string, versi
}
klog.V(1).Infof("Removing resource %v.%v.%v because it is time to stop serving it per APILifecycle.", resourceName, apiVersion, groupName)
storage := versionToResource[resourceName]
storage.Destroy()
delete(versionToResource, resourceName)
}
versionedResourcesStorageMap[apiVersion] = versionToResource

View File

@ -54,7 +54,7 @@ type ConfigMapCAController struct {
listeners []Listener
queue workqueue.RateLimitingInterface
queue workqueue.TypedRateLimitingInterface[string]
// preRunCaches are the caches to sync before starting the work of this control loop
preRunCaches []cache.InformerSynced
}
@ -94,7 +94,10 @@ func NewDynamicCAFromConfigMapController(purpose, namespace, name, key string, k
configmapLister: configmapLister,
configMapInformer: uncastConfigmapInformer,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicConfigMapCABundle-%s", purpose)),
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("DynamicConfigMapCABundle-%s", purpose)},
),
preRunCaches: []cache.InformerSynced{uncastConfigmapInformer.HasSynced},
}

View File

@ -60,7 +60,7 @@ type DynamicFileCAContent struct {
listeners []Listener
// queue only ever has one item, but it has nice error handling backoff/retry semantics
queue workqueue.RateLimitingInterface
queue workqueue.TypedRateLimitingInterface[string]
}
var _ Notifier = &DynamicFileCAContent{}
@ -82,7 +82,10 @@ func NewDynamicCAContentFromFile(purpose, filename string) (*DynamicFileCAConten
ret := &DynamicFileCAContent{
name: name,
filename: filename,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicCABundle-%s", purpose)),
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("DynamicCABundle-%s", purpose)},
),
}
if err := ret.loadCABundle(); err != nil {
return nil, err

View File

@ -47,7 +47,7 @@ type DynamicCertKeyPairContent struct {
listeners []Listener
// queue only ever has one item, but it has nice error handling backoff/retry semantics
queue workqueue.RateLimitingInterface
queue workqueue.TypedRateLimitingInterface[string]
}
var _ CertKeyContentProvider = &DynamicCertKeyPairContent{}
@ -64,7 +64,10 @@ func NewDynamicServingContentFromFiles(purpose, certFile, keyFile string) (*Dyna
name: name,
certFile: certFile,
keyFile: keyFile,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), fmt.Sprintf("DynamicCABundle-%s", purpose)),
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("DynamicCABundle-%s", purpose)},
),
}
if err := ret.loadCertKeyPair(); err != nil {
return nil, err

View File

@ -56,7 +56,7 @@ type DynamicServingCertificateController struct {
currentServingTLSConfig atomic.Value
// queue only ever has one item, but it has nice error handling backoff/retry semantics
queue workqueue.RateLimitingInterface
queue workqueue.TypedRateLimitingInterface[string]
eventRecorder events.EventRecorder
}
@ -76,7 +76,10 @@ func NewDynamicServingCertificateController(
servingCert: servingCert,
sniCerts: sniCerts,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DynamicServingCertificateController"),
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "DynamicServingCertificateController"},
),
eventRecorder: eventRecorder,
}

View File

@ -137,9 +137,7 @@ func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
metrics.RecordRequestPostTimeout(metrics.PostTimeoutSourceTimeoutHandler, status)
err := fmt.Errorf("post-timeout activity - time-elapsed: %s, %v %q result: %v",
time.Since(timedOutAt), r.Method, r.URL.Path, res)
utilruntime.HandleError(err)
utilruntime.HandleErrorWithContext(r.Context(), nil, "Post-timeout activity", "timeElapsed", time.Since(timedOutAt), "method", r.Method, "path", r.URL.Path, "result", res)
}()
}()
httplog.SetStacktracePredicate(r.Context(), func(status int) bool {

View File

@ -17,7 +17,6 @@ limitations under the License.
package filters
import (
"fmt"
"net/http"
"k8s.io/apimachinery/pkg/util/runtime"
@ -51,7 +50,7 @@ func WithPanicRecovery(handler http.Handler, resolver request.RequestInfoResolve
// This call can have different handlers, but the default chain rate limits. Call it after the metrics are updated
// in case the rate limit delays it. If you outrun the rate for this one timed out requests, something has gone
// seriously wrong with your server, but generally having a logging signal for timeouts is useful.
runtime.HandleError(fmt.Errorf("timeout or abort while handling: method=%v URI=%q audit-ID=%q", req.Method, req.RequestURI, audit.GetAuditIDTruncated(req.Context())))
runtime.HandleErrorWithContext(req.Context(), nil, "Timeout or abort while handling", "method", req.Method, "URI", req.RequestURI, "auditID", audit.GetAuditIDTruncated(req.Context()))
return
}
http.Error(w, "This request caused apiserver to panic. Look in the logs for details.", http.StatusInternalServerError)

View File

@ -38,8 +38,8 @@ import (
"k8s.io/apimachinery/pkg/util/managedfields"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup"
"k8s.io/apimachinery/pkg/version"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/audit"
"k8s.io/apiserver/pkg/authorization/authorizer"
@ -51,8 +51,9 @@ import (
"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/apiserver/pkg/server/routes"
"k8s.io/apiserver/pkg/storageversion"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilversion "k8s.io/apiserver/pkg/util/version"
restclient "k8s.io/client-go/rest"
"k8s.io/component-base/featuregate"
"k8s.io/klog/v2"
openapibuilder3 "k8s.io/kube-openapi/pkg/builder3"
openapicommon "k8s.io/kube-openapi/pkg/common"
@ -232,11 +233,18 @@ type GenericAPIServer struct {
// APIServerID is the ID of this API server
APIServerID string
// StorageReadinessHook implements post-start-hook functionality for checking readiness
// of underlying storage for registered resources.
StorageReadinessHook *StorageReadinessHook
// StorageVersionManager holds the storage versions of the API resources installed by this server.
StorageVersionManager storageversion.Manager
// Version will enable the /version endpoint if non-nil
Version *version.Info
// EffectiveVersion determines which apis and features are available
// based on when the api/feature lifecyle.
EffectiveVersion utilversion.EffectiveVersion
// FeatureGate is a way to plumb feature gate through if you have them.
FeatureGate featuregate.FeatureGate
// lifecycleSignals provides access to the various signals that happen during the life cycle of the apiserver.
lifecycleSignals lifecycleSignals
@ -442,9 +450,19 @@ func (s *GenericAPIServer) PrepareRun() preparedGenericAPIServer {
// Run spawns the secure http server. It only returns if stopCh is closed
// or the secure port cannot be listened on initially.
// This is the diagram of what channels/signals are dependent on each other:
//
// | stopCh
// Deprecated: use RunWithContext instead. Run will not get removed to avoid
// breaking consumers, but should not be used in new code.
func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error {
ctx := wait.ContextForChannel(stopCh)
return s.RunWithContext(ctx)
}
// RunWithContext spawns the secure http server. It only returns if ctx is canceled
// or the secure port cannot be listened on initially.
// This is the diagram of what contexts/channels/signals are dependent on each other:
//
// | ctx
// | |
// | ---------------------------------------------------------
// | | |
@ -477,12 +495,13 @@ func (s *GenericAPIServer) PrepareRun() preparedGenericAPIServer {
// | | | |
// | |-------------------|---------------------|----------------------------------------|
// | | |
// | stopHttpServerCh (AuditBackend::Shutdown())
// | stopHttpServerCtx (AuditBackend::Shutdown())
// | |
// | listenerStoppedCh
// | |
// | HTTPServerStoppedListening (httpServerStoppedListeningCh)
func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error {
func (s preparedGenericAPIServer) RunWithContext(ctx context.Context) error {
stopCh := ctx.Done()
delayedStopCh := s.lifecycleSignals.AfterShutdownDelayDuration
shutdownInitiatedCh := s.lifecycleSignals.ShutdownInitiated
@ -544,9 +563,11 @@ func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error {
notAcceptingNewRequestCh := s.lifecycleSignals.NotAcceptingNewRequest
drainedCh := s.lifecycleSignals.InFlightRequestsDrained
stopHttpServerCh := make(chan struct{})
// Canceling the parent context does not immediately cancel the HTTP server.
// We only inherit context values here and deal with cancellation ourselves.
stopHTTPServerCtx, stopHTTPServer := context.WithCancelCause(context.WithoutCancel(ctx))
go func() {
defer close(stopHttpServerCh)
defer stopHTTPServer(errors.New("time to stop HTTP server"))
timeToStopHttpServerCh := notAcceptingNewRequestCh.Signaled()
if s.ShutdownSendRetryAfter {
@ -565,7 +586,7 @@ func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error {
}
}
stoppedCh, listenerStoppedCh, err := s.NonBlockingRun(stopHttpServerCh, shutdownTimeout)
stoppedCh, listenerStoppedCh, err := s.NonBlockingRunWithContext(stopHTTPServerCtx, shutdownTimeout)
if err != nil {
return err
}
@ -694,7 +715,18 @@ func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error {
// NonBlockingRun spawns the secure http server. An error is
// returned if the secure port cannot be listened on.
// The returned channel is closed when the (asynchronous) termination is finished.
//
// Deprecated: use RunWithContext instead. Run will not get removed to avoid
// breaking consumers, but should not be used in new code.
func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}, shutdownTimeout time.Duration) (<-chan struct{}, <-chan struct{}, error) {
ctx := wait.ContextForChannel(stopCh)
return s.NonBlockingRunWithContext(ctx, shutdownTimeout)
}
// NonBlockingRunWithContext spawns the secure http server. An error is
// returned if the secure port cannot be listened on.
// The returned channel is closed when the (asynchronous) termination is finished.
func (s preparedGenericAPIServer) NonBlockingRunWithContext(ctx context.Context, shutdownTimeout time.Duration) (<-chan struct{}, <-chan struct{}, error) {
// Use an internal stop channel to allow cleanup of the listeners on error.
internalStopCh := make(chan struct{})
var stoppedCh <-chan struct{}
@ -712,11 +744,11 @@ func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}, shutdow
// responsibility of the caller to close the provided channel to
// ensure cleanup.
go func() {
<-stopCh
<-ctx.Done()
close(internalStopCh)
}()
s.RunPostStartHooks(stopCh)
s.RunPostStartHooks(ctx)
if _, err := systemd.SdNotify(true, "READY=1\n"); err != nil {
klog.Errorf("Unable to send systemd daemon successful start message: %v\n", err)
@ -751,7 +783,7 @@ func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *A
}
resourceInfos = append(resourceInfos, r...)
if utilfeature.DefaultFeatureGate.Enabled(features.AggregatedDiscoveryEndpoint) {
if s.FeatureGate.Enabled(features.AggregatedDiscoveryEndpoint) {
// Aggregated discovery only aggregates resources under /apis
if apiPrefix == APIGroupPrefix {
s.AggregatedDiscoveryGroupManager.AddGroupVersion(
@ -779,8 +811,8 @@ func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *A
s.RegisterDestroyFunc(apiGroupInfo.destroyStorage)
if utilfeature.DefaultFeatureGate.Enabled(features.StorageVersionAPI) &&
utilfeature.DefaultFeatureGate.Enabled(features.APIServerIdentity) {
if s.FeatureGate.Enabled(features.StorageVersionAPI) &&
s.FeatureGate.Enabled(features.APIServerIdentity) {
// API installation happens before we start listening on the handlers,
// therefore it is safe to register ResourceInfos here. The handler will block
// write requests until the storage versions of the targeting resources are updated.
@ -810,12 +842,13 @@ func (s *GenericAPIServer) InstallLegacyAPIGroup(apiPrefix string, apiGroupInfo
// Install the version handler.
// Add a handler at /<apiPrefix> to enumerate the supported api versions.
legacyRootAPIHandler := discovery.NewLegacyRootAPIHandler(s.discoveryAddresses, s.Serializer, apiPrefix)
if utilfeature.DefaultFeatureGate.Enabled(features.AggregatedDiscoveryEndpoint) {
if s.FeatureGate.Enabled(features.AggregatedDiscoveryEndpoint) {
wrapped := discoveryendpoint.WrapAggregatedDiscoveryToHandler(legacyRootAPIHandler, s.AggregatedLegacyDiscoveryGroupManager)
s.Handler.GoRestfulContainer.Add(wrapped.GenerateWebService("/api", metav1.APIVersions{}))
} else {
s.Handler.GoRestfulContainer.Add(legacyRootAPIHandler.WebService())
}
s.registerStorageReadinessCheck("", apiGroupInfo)
return nil
}
@ -874,10 +907,28 @@ func (s *GenericAPIServer) InstallAPIGroups(apiGroupInfos ...*APIGroupInfo) erro
s.DiscoveryGroupManager.AddGroup(apiGroup)
s.Handler.GoRestfulContainer.Add(discovery.NewAPIGroupHandler(s.Serializer, apiGroup).WebService())
s.registerStorageReadinessCheck(apiGroupInfo.PrioritizedVersions[0].Group, apiGroupInfo)
}
return nil
}
// registerStorageReadinessCheck registers the readiness checks for all underlying storages
// for a given APIGroup.
func (s *GenericAPIServer) registerStorageReadinessCheck(groupName string, apiGroupInfo *APIGroupInfo) {
for version, storageMap := range apiGroupInfo.VersionedResourcesStorageMap {
for resource, storage := range storageMap {
if withReadiness, ok := storage.(rest.StorageWithReadiness); ok {
gvr := metav1.GroupVersionResource{
Group: groupName,
Version: version,
Resource: resource,
}
s.StorageReadinessHook.RegisterStorage(gvr, withReadiness)
}
}
}
}
// InstallAPIGroup exposes the given api group in the API.
// The <apiGroupInfo> passed into this function shouldn't be used elsewhere as the
// underlying storage will be destroyed on this servers shutdown.

View File

@ -118,7 +118,7 @@ func (s *GenericAPIServer) AddLivezChecks(delay time.Duration, checks ...healthz
// that we can register that the api-server is no longer ready while we attempt to gracefully
// shutdown.
func (s *GenericAPIServer) addReadyzShutdownCheck(stopCh <-chan struct{}) error {
return s.AddReadyzChecks(shutdownCheck{stopCh})
return s.AddReadyzChecks(healthz.NewShutdownHealthz(stopCh))
}
// installHealthz creates the healthz endpoint for this server
@ -139,25 +139,6 @@ func (s *GenericAPIServer) installLivez() {
s.livezRegistry.installHandler(s.Handler.NonGoRestfulMux)
}
// shutdownCheck fails if the embedded channel is closed. This is intended to allow for graceful shutdown sequences
// for the apiserver.
type shutdownCheck struct {
StopCh <-chan struct{}
}
func (shutdownCheck) Name() string {
return "shutdown"
}
func (c shutdownCheck) Check(req *http.Request) error {
select {
case <-c.StopCh:
return fmt.Errorf("process is shutting down")
default:
}
return nil
}
// delayedHealthCheck wraps a health check which will not fail until the explicitly defined delay has elapsed. This
// is intended for use primarily for livez health checks.
func delayedHealthCheck(check healthz.HealthChecker, clock clock.Clock, delay time.Duration) healthz.HealthChecker {

View File

@ -105,6 +105,29 @@ func (i *informerSync) Name() string {
return "informer-sync"
}
type shutdown struct {
stopCh <-chan struct{}
}
// NewShutdownHealthz returns a new HealthChecker that will fail if the embedded channel is closed.
// This is intended to allow for graceful shutdown sequences.
func NewShutdownHealthz(stopCh <-chan struct{}) HealthChecker {
return &shutdown{stopCh}
}
func (s *shutdown) Name() string {
return "shutdown"
}
func (s *shutdown) Check(req *http.Request) error {
select {
case <-s.stopCh:
return fmt.Errorf("process is shutting down")
default:
}
return nil
}
func (i *informerSync) Check(_ *http.Request) error {
stopCh := make(chan struct{})
// Close stopCh to force checking if informers are synced now.

View File

@ -17,6 +17,7 @@ limitations under the License.
package server
import (
"context"
"errors"
"fmt"
"net/http"
@ -48,8 +49,13 @@ type PreShutdownHookFunc func() error
type PostStartHookContext struct {
// LoopbackClientConfig is a config for a privileged loopback connection to the API server
LoopbackClientConfig *restclient.Config
// StopCh is the channel that will be closed when the server stops
// StopCh is the channel that will be closed when the server stops.
//
// Deprecated: use the PostStartHookContext itself instead, it contains a context that
// gets cancelled when the server stops. StopCh keeps getting provided for existing code.
StopCh <-chan struct{}
// Context gets cancelled when the server stops.
context.Context
}
// PostStartHookProvider is an interface in addition to provide a post start hook for the api server
@ -151,15 +157,16 @@ func (s *GenericAPIServer) AddPreShutdownHookOrDie(name string, hook PreShutdown
}
}
// RunPostStartHooks runs the PostStartHooks for the server
func (s *GenericAPIServer) RunPostStartHooks(stopCh <-chan struct{}) {
// RunPostStartHooks runs the PostStartHooks for the server.
func (s *GenericAPIServer) RunPostStartHooks(ctx context.Context) {
s.postStartHookLock.Lock()
defer s.postStartHookLock.Unlock()
s.postStartHooksCalled = true
context := PostStartHookContext{
LoopbackClientConfig: s.LoopbackClientConfig,
StopCh: stopCh,
StopCh: ctx.Done(),
Context: ctx,
}
for hookName, hookEntry := range s.postStartHooks {

View File

@ -31,6 +31,7 @@ import (
"k8s.io/apiserver/pkg/endpoints/metrics"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/endpoints/responsewriter"
"k8s.io/apiserver/pkg/server/routine"
"k8s.io/klog/v2"
)
@ -125,10 +126,26 @@ func withLogging(handler http.Handler, stackTracePred StacktracePred, shouldLogR
rl := newLoggedWithStartTime(req, w, startTime)
rl.StacktraceWhen(stackTracePred)
req = req.WithContext(context.WithValue(ctx, respLoggerContextKey, rl))
defer rl.Log()
var logFunc func()
logFunc = rl.Log
defer func() {
if logFunc != nil {
logFunc()
}
}()
w = responsewriter.WrapForHTTP1Or2(rl)
handler.ServeHTTP(w, req)
// We need to ensure that the request is logged after it is processed.
// In case the request is executed in a separate goroutine created via
// WithRoutine handler in the handler chain (i.e. above handler.ServeHTTP()
// would return request is completely responsed), we want the logging to
// happen in that goroutine too, so we append it to the task.
if routine.AppendTask(ctx, &routine.Task{Func: rl.Log}) {
logFunc = nil
}
})
}

View File

@ -96,9 +96,11 @@ func NewPathRecorderMux(name string) *PathRecorderMux {
// ListedPaths returns the registered handler exposedPaths.
func (m *PathRecorderMux) ListedPaths() []string {
m.lock.Lock()
handledPaths := append([]string{}, m.exposedPaths...)
sort.Strings(handledPaths)
m.lock.Unlock()
sort.Strings(handledPaths)
return handledPaths
}

View File

@ -59,7 +59,7 @@ type AdmissionOptions struct {
// RecommendedPluginOrder holds an ordered list of plugin names we recommend to use by default
RecommendedPluginOrder []string
// DefaultOffPlugins is a set of plugin names that is disabled by default
DefaultOffPlugins sets.String
DefaultOffPlugins sets.Set[string]
// EnablePlugins indicates plugins to be enabled passed through `--enable-admission-plugins`.
EnablePlugins []string
@ -91,7 +91,7 @@ func NewAdmissionOptions() *AdmissionOptions {
// after all the mutating ones, so their relative order in this list
// doesn't matter.
RecommendedPluginOrder: []string{lifecycle.PluginName, mutatingwebhook.PluginName, validatingadmissionpolicy.PluginName, validatingwebhook.PluginName},
DefaultOffPlugins: sets.NewString(),
DefaultOffPlugins: sets.Set[string]{},
}
server.RegisterAllAdmissionPlugins(options.Plugins)
return options
@ -139,6 +139,9 @@ func (a *AdmissionOptions) ApplyTo(
if informers == nil {
return fmt.Errorf("admission depends on a Kubernetes core API shared informer, it cannot be nil")
}
if kubeClient == nil || dynamicClient == nil {
return fmt.Errorf("admission depends on a Kubernetes core API client, it cannot be nil")
}
pluginNames := a.enabledPluginNames()
@ -223,7 +226,7 @@ func (a *AdmissionOptions) Validate() []error {
// EnablePlugins, DisablePlugins fields
// to prepare a list of ordered plugin names that are enabled.
func (a *AdmissionOptions) enabledPluginNames() []string {
allOffPlugins := append(a.DefaultOffPlugins.List(), a.DisablePlugins...)
allOffPlugins := append(sets.List[string](a.DefaultOffPlugins), a.DisablePlugins...)
disabledPlugins := sets.NewString(allOffPlugins...)
enabledPlugins := sets.NewString(a.EnablePlugins...)
disabledPlugins = disabledPlugins.Difference(enabledPlugins)

View File

@ -26,6 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/apis/apiserver"
"k8s.io/apiserver/pkg/authentication/authenticatorfactory"
"k8s.io/apiserver/pkg/authentication/request/headerrequest"
"k8s.io/apiserver/pkg/server"
@ -222,8 +223,8 @@ type DelegatingAuthenticationOptions struct {
// CustomRoundTripperFn allows for specifying a middleware function for custom HTTP behaviour for the authentication webhook client.
CustomRoundTripperFn transport.WrapperFunc
// DisableAnonymous gives user an option to disable Anonymous authentication.
DisableAnonymous bool
// Anonymous gives user an option to enable/disable Anonymous authentication.
Anonymous *apiserver.AnonymousAuthConfig
}
func NewDelegatingAuthenticationOptions() *DelegatingAuthenticationOptions {
@ -238,6 +239,7 @@ func NewDelegatingAuthenticationOptions() *DelegatingAuthenticationOptions {
},
WebhookRetryBackoff: DefaultAuthWebhookRetryBackoff(),
TokenRequestTimeout: 10 * time.Second,
Anonymous: &apiserver.AnonymousAuthConfig{Enabled: true},
}
}
@ -305,7 +307,7 @@ func (s *DelegatingAuthenticationOptions) ApplyTo(authenticationInfo *server.Aut
}
cfg := authenticatorfactory.DelegatingAuthenticatorConfig{
Anonymous: !s.DisableAnonymous,
Anonymous: &apiserver.AnonymousAuthConfig{Enabled: true},
CacheTTL: s.CacheTTL,
WebhookRetryBackoff: s.WebhookRetryBackoff,
TokenAccessReviewTimeout: s.TokenRequestTimeout,

View File

@ -1,126 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"net"
"github.com/spf13/pflag"
"k8s.io/apiserver/pkg/server"
)
// DeprecatedInsecureServingOptions are for creating an unauthenticated, unauthorized, insecure port.
// No one should be using these anymore.
// DEPRECATED: all insecure serving options are removed in a future version
type DeprecatedInsecureServingOptions struct {
BindAddress net.IP
BindPort int
// BindNetwork is the type of network to bind to - defaults to "tcp", accepts "tcp",
// "tcp4", and "tcp6".
BindNetwork string
// Listener is the secure server network listener.
// either Listener or BindAddress/BindPort/BindNetwork is set,
// if Listener is set, use it and omit BindAddress/BindPort/BindNetwork.
Listener net.Listener
// ListenFunc can be overridden to create a custom listener, e.g. for mocking in tests.
// It defaults to options.CreateListener.
ListenFunc func(network, addr string, config net.ListenConfig) (net.Listener, int, error)
}
// Validate ensures that the insecure port values within the range of the port.
func (s *DeprecatedInsecureServingOptions) Validate() []error {
if s == nil {
return nil
}
errors := []error{}
if s.BindPort < 0 || s.BindPort > 65535 {
errors = append(errors, fmt.Errorf("insecure port %v must be between 0 and 65535, inclusive. 0 for turning off insecure (HTTP) port", s.BindPort))
}
return errors
}
// AddFlags adds flags related to insecure serving to the specified FlagSet.
func (s *DeprecatedInsecureServingOptions) AddFlags(fs *pflag.FlagSet) {
if s == nil {
return
}
fs.IPVar(&s.BindAddress, "insecure-bind-address", s.BindAddress, ""+
"The IP address on which to serve the --insecure-port (set to 0.0.0.0 or :: for listening on all interfaces and IP address families).")
// Though this flag is deprecated, we discovered security concerns over how to do health checks without it e.g. #43784
fs.MarkDeprecated("insecure-bind-address", "This flag will be removed in a future version.")
fs.Lookup("insecure-bind-address").Hidden = false
fs.IntVar(&s.BindPort, "insecure-port", s.BindPort, ""+
"The port on which to serve unsecured, unauthenticated access.")
// Though this flag is deprecated, we discovered security concerns over how to do health checks without it e.g. #43784
fs.MarkDeprecated("insecure-port", "This flag will be removed in a future version.")
fs.Lookup("insecure-port").Hidden = false
}
// AddUnqualifiedFlags adds flags related to insecure serving without the --insecure prefix to the specified FlagSet.
func (s *DeprecatedInsecureServingOptions) AddUnqualifiedFlags(fs *pflag.FlagSet) {
if s == nil {
return
}
fs.IPVar(&s.BindAddress, "address", s.BindAddress,
"The IP address on which to serve the insecure --port (set to '0.0.0.0' or '::' for listening on all interfaces and IP address families).")
fs.MarkDeprecated("address", "see --bind-address instead.")
fs.Lookup("address").Hidden = false
fs.IntVar(&s.BindPort, "port", s.BindPort, "The port on which to serve unsecured, unauthenticated access. Set to 0 to disable.")
fs.MarkDeprecated("port", "see --secure-port instead.")
fs.Lookup("port").Hidden = false
}
// ApplyTo adds DeprecatedInsecureServingOptions to the insecureserverinfo and kube-controller manager configuration.
// Note: the double pointer allows to set the *DeprecatedInsecureServingInfo to nil without referencing the struct hosting this pointer.
func (s *DeprecatedInsecureServingOptions) ApplyTo(c **server.DeprecatedInsecureServingInfo) error {
if s == nil {
return nil
}
if s.BindPort <= 0 {
return nil
}
if s.Listener == nil {
var err error
listen := CreateListener
if s.ListenFunc != nil {
listen = s.ListenFunc
}
addr := net.JoinHostPort(s.BindAddress.String(), fmt.Sprintf("%d", s.BindPort))
s.Listener, s.BindPort, err = listen(s.BindNetwork, addr, net.ListenConfig{})
if err != nil {
return fmt.Errorf("failed to create listener: %v", err)
}
}
*c = &server.DeprecatedInsecureServingInfo{
Listener: s.Listener,
}
return nil
}

View File

@ -130,7 +130,7 @@ func init() {
configScheme := runtime.NewScheme()
utilruntime.Must(apiserver.AddToScheme(configScheme))
utilruntime.Must(apiserverv1.AddToScheme(configScheme))
codecs = serializer.NewCodecFactory(configScheme)
codecs = serializer.NewCodecFactory(configScheme, serializer.EnableStrict)
envelopemetrics.RegisterMetrics()
storagevalue.RegisterMetrics()
metrics.RegisterMetrics()

View File

@ -49,7 +49,7 @@ type DynamicEncryptionConfigContent struct {
lastLoadedEncryptionConfigHash string
// queue for processing changes in encryption config file.
queue workqueue.RateLimitingInterface
queue workqueue.TypedRateLimitingInterface[string]
// dynamicTransformers updates the transformers when encryption config file changes.
dynamicTransformers *encryptionconfig.DynamicTransformers
@ -78,8 +78,11 @@ func NewDynamicEncryptionConfiguration(
filePath: filePath,
lastLoadedEncryptionConfigHash: configContentHash,
dynamicTransformers: dynamicTransformers,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name),
apiServerID: apiServerID,
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: name},
),
apiServerID: apiServerID,
getEncryptionConfigHash: func(_ context.Context, filepath string) (string, error) {
return encryptionconfig.GetEncryptionConfigHash(filepath)
},
@ -150,7 +153,7 @@ func (d *DynamicEncryptionConfigContent) processNextWorkItem(serverCtx context.C
return true
}
func (d *DynamicEncryptionConfigContent) processWorkItem(serverCtx context.Context, workqueueKey interface{}) {
func (d *DynamicEncryptionConfigContent) processWorkItem(serverCtx context.Context, workqueueKey string) {
var (
updatedEffectiveConfig bool
err error

View File

@ -383,8 +383,8 @@ type StorageFactoryRestOptionsFactory struct {
StorageFactory serverstorage.StorageFactory
}
func (f *StorageFactoryRestOptionsFactory) GetRESTOptions(resource schema.GroupResource) (generic.RESTOptions, error) {
storageConfig, err := f.StorageFactory.NewConfig(resource)
func (f *StorageFactoryRestOptionsFactory) GetRESTOptions(resource schema.GroupResource, example runtime.Object) (generic.RESTOptions, error) {
storageConfig, err := f.StorageFactory.NewConfig(resource, example)
if err != nil {
return generic.RESTOptions{}, fmt.Errorf("unable to find storage destination for %v, due to %v", resource, err.Error())
}
@ -399,6 +399,10 @@ func (f *StorageFactoryRestOptionsFactory) GetRESTOptions(resource schema.GroupR
StorageObjectCountTracker: f.Options.StorageConfig.StorageObjectCountTracker,
}
if ret.StorageObjectCountTracker == nil {
ret.StorageObjectCountTracker = storageConfig.StorageObjectCountTracker
}
if f.Options.EnableWatchCache {
sizes, err := ParseWatchCacheSizes(f.Options.WatchCacheSizes)
if err != nil {
@ -465,7 +469,7 @@ type SimpleStorageFactory struct {
StorageConfig storagebackend.Config
}
func (s *SimpleStorageFactory) NewConfig(resource schema.GroupResource) (*storagebackend.ConfigForResource, error) {
func (s *SimpleStorageFactory) NewConfig(resource schema.GroupResource, example runtime.Object) (*storagebackend.ConfigForResource, error) {
return s.StorageConfig.ForResource(resource), nil
}
@ -489,8 +493,8 @@ type transformerStorageFactory struct {
resourceTransformers storagevalue.ResourceTransformers
}
func (t *transformerStorageFactory) NewConfig(resource schema.GroupResource) (*storagebackend.ConfigForResource, error) {
config, err := t.delegate.NewConfig(resource)
func (t *transformerStorageFactory) NewConfig(resource schema.GroupResource, example runtime.Object) (*storagebackend.ConfigForResource, error) {
config, err := t.delegate.NewConfig(resource, example)
if err != nil {
return nil, err
}

View File

@ -71,6 +71,9 @@ func (o *FeatureOptions) ApplyTo(c *server.Config, clientset kubernetes.Interfac
c.EnableContentionProfiling = o.EnableContentionProfiling
if o.EnablePriorityAndFairness {
if clientset == nil {
return fmt.Errorf("invalid configuration: priority and fairness requires a core Kubernetes client")
}
if c.MaxRequestsInFlight+c.MaxMutatingRequestsInFlight <= 0 {
return fmt.Errorf("invalid configuration: MaxRequestsInFlight=%d and MaxMutatingRequestsInFlight=%d; they must add up to something positive", c.MaxRequestsInFlight, c.MaxMutatingRequestsInFlight)

View File

@ -120,9 +120,18 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error {
if err := o.CoreAPI.ApplyTo(config); err != nil {
return err
}
kubeClient, err := kubernetes.NewForConfig(config.ClientConfig)
if err != nil {
return err
var kubeClient *kubernetes.Clientset
var dynamicClient *dynamic.DynamicClient
if config.ClientConfig != nil {
var err error
kubeClient, err = kubernetes.NewForConfig(config.ClientConfig)
if err != nil {
return err
}
dynamicClient, err = dynamic.NewForConfig(config.ClientConfig)
if err != nil {
return err
}
}
if err := o.Features.ApplyTo(&config.Config, kubeClient, config.SharedInformerFactory); err != nil {
return err
@ -131,10 +140,6 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error {
if err != nil {
return err
}
dynamicClient, err := dynamic.NewForConfig(config.ClientConfig)
if err != nil {
return err
}
if err := o.Admission.ApplyTo(&config.Config, config.SharedInformerFactory, kubeClient, dynamicClient, o.FeatureGate,
initializers...); err != nil {
return err

View File

@ -25,8 +25,10 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apiserver/pkg/server"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilversion "k8s.io/apiserver/pkg/util/version"
"github.com/spf13/pflag"
)
@ -45,16 +47,17 @@ const (
type ServerRunOptions struct {
AdvertiseAddress net.IP
CorsAllowedOriginList []string
HSTSDirectives []string
ExternalHost string
MaxRequestsInFlight int
MaxMutatingRequestsInFlight int
RequestTimeout time.Duration
GoawayChance float64
LivezGracePeriod time.Duration
MinRequestTimeout int
ShutdownDelayDuration time.Duration
CorsAllowedOriginList []string
HSTSDirectives []string
ExternalHost string
MaxRequestsInFlight int
MaxMutatingRequestsInFlight int
RequestTimeout time.Duration
GoawayChance float64
LivezGracePeriod time.Duration
MinRequestTimeout int
StorageInitializationTimeout time.Duration
ShutdownDelayDuration time.Duration
// We intentionally did not add a flag for this option. Users of the
// apiserver library can wire it to a flag.
JSONPatchMaxCopyBytes int64
@ -89,9 +92,24 @@ type ServerRunOptions struct {
// This grace period is orthogonal to other grace periods, and
// it is not overridden by any other grace period.
ShutdownWatchTerminationGracePeriod time.Duration
// ComponentGlobalsRegistry is the registry where the effective versions and feature gates for all components are stored.
ComponentGlobalsRegistry utilversion.ComponentGlobalsRegistry
// ComponentName is name under which the server's global variabled are registered in the ComponentGlobalsRegistry.
ComponentName string
}
func NewServerRunOptions() *ServerRunOptions {
if utilversion.DefaultComponentGlobalsRegistry.EffectiveVersionFor(utilversion.DefaultKubeComponent) == nil {
featureGate := utilfeature.DefaultMutableFeatureGate
effectiveVersion := utilversion.DefaultKubeEffectiveVersion()
utilruntime.Must(utilversion.DefaultComponentGlobalsRegistry.Register(utilversion.DefaultKubeComponent, effectiveVersion, featureGate))
}
return NewServerRunOptionsForComponent(utilversion.DefaultKubeComponent, utilversion.DefaultComponentGlobalsRegistry)
}
func NewServerRunOptionsForComponent(componentName string, componentGlobalsRegistry utilversion.ComponentGlobalsRegistry) *ServerRunOptions {
defaults := server.NewConfig(serializer.CodecFactory{})
return &ServerRunOptions{
MaxRequestsInFlight: defaults.MaxRequestsInFlight,
@ -99,16 +117,22 @@ func NewServerRunOptions() *ServerRunOptions {
RequestTimeout: defaults.RequestTimeout,
LivezGracePeriod: defaults.LivezGracePeriod,
MinRequestTimeout: defaults.MinRequestTimeout,
StorageInitializationTimeout: defaults.StorageInitializationTimeout,
ShutdownDelayDuration: defaults.ShutdownDelayDuration,
ShutdownWatchTerminationGracePeriod: defaults.ShutdownWatchTerminationGracePeriod,
JSONPatchMaxCopyBytes: defaults.JSONPatchMaxCopyBytes,
MaxRequestBodyBytes: defaults.MaxRequestBodyBytes,
ShutdownSendRetryAfter: false,
ComponentName: componentName,
ComponentGlobalsRegistry: componentGlobalsRegistry,
}
}
// ApplyTo applies the run options to the method receiver and returns self
func (s *ServerRunOptions) ApplyTo(c *server.Config) error {
if err := s.ComponentGlobalsRegistry.SetFallback(); err != nil {
return err
}
c.CorsAllowedOriginList = s.CorsAllowedOriginList
c.HSTSDirectives = s.HSTSDirectives
c.ExternalAddress = s.ExternalHost
@ -118,12 +142,15 @@ func (s *ServerRunOptions) ApplyTo(c *server.Config) error {
c.RequestTimeout = s.RequestTimeout
c.GoawayChance = s.GoawayChance
c.MinRequestTimeout = s.MinRequestTimeout
c.StorageInitializationTimeout = s.StorageInitializationTimeout
c.ShutdownDelayDuration = s.ShutdownDelayDuration
c.JSONPatchMaxCopyBytes = s.JSONPatchMaxCopyBytes
c.MaxRequestBodyBytes = s.MaxRequestBodyBytes
c.PublicAddress = s.AdvertiseAddress
c.ShutdownSendRetryAfter = s.ShutdownSendRetryAfter
c.ShutdownWatchTerminationGracePeriod = s.ShutdownWatchTerminationGracePeriod
c.EffectiveVersion = s.ComponentGlobalsRegistry.EffectiveVersionFor(s.ComponentName)
c.FeatureGate = s.ComponentGlobalsRegistry.FeatureGateFor(s.ComponentName)
return nil
}
@ -173,6 +200,10 @@ func (s *ServerRunOptions) Validate() []error {
errors = append(errors, fmt.Errorf("--min-request-timeout can not be negative value"))
}
if s.StorageInitializationTimeout < 0 {
errors = append(errors, fmt.Errorf("--storage-initialization-timeout can not be negative value"))
}
if s.ShutdownDelayDuration < 0 {
errors = append(errors, fmt.Errorf("--shutdown-delay-duration can not be negative value"))
}
@ -196,6 +227,9 @@ func (s *ServerRunOptions) Validate() []error {
if err := validateCorsAllowedOriginList(s.CorsAllowedOriginList); err != nil {
errors = append(errors, err)
}
if errs := s.ComponentGlobalsRegistry.Validate(); len(errs) != 0 {
errors = append(errors, errs...)
}
return errors
}
@ -323,6 +357,9 @@ func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) {
"handler, which picks a randomized value above this number as the connection timeout, "+
"to spread out load.")
fs.DurationVar(&s.StorageInitializationTimeout, "storage-initialization-timeout", s.StorageInitializationTimeout,
"Maximum amount of time to wait for storage initialization before declaring apiserver ready. Defaults to 1m.")
fs.DurationVar(&s.ShutdownDelayDuration, "shutdown-delay-duration", s.ShutdownDelayDuration, ""+
"Time to delay the termination. During that time the server keeps serving requests normally. The endpoints /healthz and /livez "+
"will return success, but /readyz immediately returns failure. Graceful termination starts after this delay "+
@ -337,5 +374,10 @@ func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) {
"This option, if set, represents the maximum amount of grace period the apiserver will wait "+
"for active watch request(s) to drain during the graceful server shutdown window.")
utilfeature.DefaultMutableFeatureGate.AddFlag(fs)
s.ComponentGlobalsRegistry.AddFlags(fs)
}
// Complete fills missing fields with defaults.
func (s *ServerRunOptions) Complete() error {
return s.ComponentGlobalsRegistry.SetFallback()
}

View File

@ -44,6 +44,8 @@ type SecureServingOptions struct {
// BindNetwork is the type of network to bind to - defaults to "tcp", accepts "tcp",
// "tcp4", and "tcp6".
BindNetwork string
// DisableHTTP2Serving indicates that http2 serving should not be enabled.
DisableHTTP2Serving bool
// Required set to true means that BindPort cannot be zero.
Required bool
// ExternalAddress is the address advertised, even if BindAddress is a loopback. By default this
@ -163,6 +165,9 @@ func (s *SecureServingOptions) AddFlags(fs *pflag.FlagSet) {
}
fs.IntVar(&s.BindPort, "secure-port", s.BindPort, desc)
fs.BoolVar(&s.DisableHTTP2Serving, "disable-http2-serving", s.DisableHTTP2Serving,
"If true, HTTP2 serving will be disabled [default=false]")
fs.StringVar(&s.ServerCert.CertDirectory, "cert-dir", s.ServerCert.CertDirectory, ""+
"The directory where the TLS certs are located. "+
"If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.")
@ -256,6 +261,7 @@ func (s *SecureServingOptions) ApplyTo(config **server.SecureServingInfo) error
*config = &server.SecureServingInfo{
Listener: s.Listener,
HTTP2MaxStreamsPerConnection: s.HTTP2MaxStreamsPerConnection,
DisableHTTP2: s.DisableHTTP2Serving,
}
c := *config

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package filters
package routine
import (
"context"
@ -35,6 +35,20 @@ func WithTask(parent context.Context, t *Task) context.Context {
return request.WithValue(parent, taskKey, t)
}
// AppendTask appends a task executed after completion of existing task.
// It is a no-op if there is no existing task.
func AppendTask(ctx context.Context, t *Task) bool {
if existTask := TaskFrom(ctx); existTask != nil && existTask.Func != nil {
existFunc := existTask.Func
existTask.Func = func() {
existFunc()
t.Func()
}
return true
}
return false
}
func TaskFrom(ctx context.Context) *Task {
t, _ := ctx.Value(taskKey).(*Task)
return t

View File

@ -21,6 +21,8 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
apimachineryversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apiserver/pkg/util/version"
)
type ResourceEncodingConfig interface {
@ -33,10 +35,15 @@ type ResourceEncodingConfig interface {
InMemoryEncodingFor(schema.GroupResource) (schema.GroupVersion, error)
}
type CompatibilityResourceEncodingConfig interface {
BackwardCompatibileStorageEncodingFor(schema.GroupResource, runtime.Object) (schema.GroupVersion, error)
}
type DefaultResourceEncodingConfig struct {
// resources records the overriding encoding configs for individual resources.
resources map[schema.GroupResource]*OverridingResourceEncoding
scheme *runtime.Scheme
resources map[schema.GroupResource]*OverridingResourceEncoding
scheme *runtime.Scheme
effectiveVersion version.EffectiveVersion
}
type OverridingResourceEncoding struct {
@ -47,7 +54,7 @@ type OverridingResourceEncoding struct {
var _ ResourceEncodingConfig = &DefaultResourceEncodingConfig{}
func NewDefaultResourceEncodingConfig(scheme *runtime.Scheme) *DefaultResourceEncodingConfig {
return &DefaultResourceEncodingConfig{resources: map[schema.GroupResource]*OverridingResourceEncoding{}, scheme: scheme}
return &DefaultResourceEncodingConfig{resources: map[schema.GroupResource]*OverridingResourceEncoding{}, scheme: scheme, effectiveVersion: version.DefaultKubeEffectiveVersion()}
}
func (o *DefaultResourceEncodingConfig) SetResourceEncoding(resourceBeingStored schema.GroupResource, externalEncodingVersion, internalVersion schema.GroupVersion) {
@ -57,6 +64,10 @@ func (o *DefaultResourceEncodingConfig) SetResourceEncoding(resourceBeingStored
}
}
func (o *DefaultResourceEncodingConfig) SetEffectiveVersion(effectiveVersion version.EffectiveVersion) {
o.effectiveVersion = effectiveVersion
}
func (o *DefaultResourceEncodingConfig) StorageEncodingFor(resource schema.GroupResource) (schema.GroupVersion, error) {
if !o.scheme.IsGroupRegistered(resource.Group) {
return schema.GroupVersion{}, fmt.Errorf("group %q is not registered in scheme", resource.Group)
@ -71,6 +82,24 @@ func (o *DefaultResourceEncodingConfig) StorageEncodingFor(resource schema.Group
return o.scheme.PrioritizedVersionsForGroup(resource.Group)[0], nil
}
func (o *DefaultResourceEncodingConfig) BackwardCompatibileStorageEncodingFor(resource schema.GroupResource, example runtime.Object) (schema.GroupVersion, error) {
if !o.scheme.IsGroupRegistered(resource.Group) {
return schema.GroupVersion{}, fmt.Errorf("group %q is not registered in scheme", resource.Group)
}
// Always respect overrides
resourceOverride, resourceExists := o.resources[resource]
if resourceExists {
return resourceOverride.ExternalResourceEncoding, nil
}
return emulatedStorageVersion(
o.scheme.PrioritizedVersionsForGroup(resource.Group)[0],
example,
o.effectiveVersion,
o.scheme)
}
func (o *DefaultResourceEncodingConfig) InMemoryEncodingFor(resource schema.GroupResource) (schema.GroupVersion, error) {
if !o.scheme.IsGroupRegistered(resource.Group) {
return schema.GroupVersion{}, fmt.Errorf("group %q is not registered in scheme", resource.Group)
@ -82,3 +111,79 @@ func (o *DefaultResourceEncodingConfig) InMemoryEncodingFor(resource schema.Grou
}
return schema.GroupVersion{Group: resource.Group, Version: runtime.APIVersionInternal}, nil
}
// Object interface generated from "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
type introducedInterface interface {
APILifecycleIntroduced() (major, minor int)
}
func emulatedStorageVersion(binaryVersionOfResource schema.GroupVersion, example runtime.Object, effectiveVersion version.EffectiveVersion, scheme *runtime.Scheme) (schema.GroupVersion, error) {
if example == nil || effectiveVersion == nil {
return binaryVersionOfResource, nil
}
// Look up example in scheme to find all objects of the same Group-Kind
// Use the highest priority version for that group-kind whose lifecycle window
// includes the current emulation version.
// If no version is found, use the binary version
// (in this case the API should be disabled anyway)
gvks, _, err := scheme.ObjectKinds(example)
if err != nil {
return schema.GroupVersion{}, err
} else if len(gvks) == 0 {
// Probably shouldn't happen if err is non-nil
return schema.GroupVersion{}, fmt.Errorf("object %T has no GVKs registered in scheme", example)
}
// VersionsForGroupKind returns versions in priority order
versions := scheme.VersionsForGroupKind(schema.GroupKind{Group: gvks[0].Group, Kind: gvks[0].Kind})
compatibilityVersion := effectiveVersion.MinCompatibilityVersion()
for _, gv := range versions {
if gv.Version == runtime.APIVersionInternal {
continue
}
gvk := schema.GroupVersionKind{
Group: gv.Group,
Version: gv.Version,
Kind: gvks[0].Kind,
}
exampleOfGVK, err := scheme.New(gvk)
if err != nil {
return schema.GroupVersion{}, err
}
// If it was introduced after current compatibility version, don't use it
// skip the introduced check for test when currentVersion is 0.0 to test all apis
if introduced, hasIntroduced := exampleOfGVK.(introducedInterface); hasIntroduced && (compatibilityVersion.Major() > 0 || compatibilityVersion.Minor() > 0) {
// API resource lifecycles should be relative to k8s api version
majorIntroduced, minorIntroduced := introduced.APILifecycleIntroduced()
introducedVer := apimachineryversion.MajorMinor(uint(majorIntroduced), uint(minorIntroduced))
if introducedVer.GreaterThan(compatibilityVersion) {
continue
}
}
// versions is returned in priority order, so just use first result
return gvk.GroupVersion(), nil
}
// Getting here means we're serving a version that is unknown to the
// min-compatibility-version server.
//
// This is only expected to happen when serving an alpha API type due
// to missing pre-release lifecycle information
// (which doesn't happen by default), or when emulation-version and
// min-compatibility-version are several versions apart so a beta or GA API
// was being served which didn't exist at all in min-compatibility-version.
//
// In the alpha case - we do not support compatibility versioning of
// alpha types and recommend users do not mix the two.
// In the skip-level case - The version of apiserver we are retaining
// compatibility with has no knowledge of the type,
// so storing it in another type is no issue.
return binaryVersionOfResource, nil
}

View File

@ -42,7 +42,7 @@ type Backend struct {
type StorageFactory interface {
// New finds the storage destination for the given group and resource. It will
// return an error if the group has no storage destination configured.
NewConfig(groupResource schema.GroupResource) (*storagebackend.ConfigForResource, error)
NewConfig(groupResource schema.GroupResource, example runtime.Object) (*storagebackend.ConfigForResource, error)
// ResourcePrefix returns the overridden resource prefix for the GroupResource
// This allows for cohabitation of resources with different native types and provides
@ -226,7 +226,7 @@ func (s *DefaultStorageFactory) getStorageGroupResource(groupResource schema.Gro
// New finds the storage destination for the given group and resource. It will
// return an error if the group has no storage destination configured.
func (s *DefaultStorageFactory) NewConfig(groupResource schema.GroupResource) (*storagebackend.ConfigForResource, error) {
func (s *DefaultStorageFactory) NewConfig(groupResource schema.GroupResource, example runtime.Object) (*storagebackend.ConfigForResource, error) {
chosenStorageResource := s.getStorageGroupResource(groupResource)
// operate on copy
@ -244,14 +244,23 @@ func (s *DefaultStorageFactory) NewConfig(groupResource schema.GroupResource) (*
}
var err error
codecConfig.StorageVersion, err = s.ResourceEncodingConfig.StorageEncodingFor(chosenStorageResource)
if err != nil {
return nil, err
if backwardCompatibleInterface, ok := s.ResourceEncodingConfig.(CompatibilityResourceEncodingConfig); ok {
codecConfig.StorageVersion, err = backwardCompatibleInterface.BackwardCompatibileStorageEncodingFor(groupResource, example)
if err != nil {
return nil, err
}
} else {
codecConfig.StorageVersion, err = s.ResourceEncodingConfig.StorageEncodingFor(chosenStorageResource)
if err != nil {
return nil, err
}
}
codecConfig.MemoryVersion, err = s.ResourceEncodingConfig.InMemoryEncodingFor(groupResource)
if err != nil {
return nil, err
}
codecConfig.Config = storageConfig
storageConfig.Codec, storageConfig.EncodeVersioner, err = s.newStorageCodecFn(codecConfig)

View File

@ -0,0 +1,91 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"context"
"errors"
"sync"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/klog/v2"
)
// StorageReadinessHook implements PostStartHook functionality for checking readiness
// of underlying storage for registered resources.
type StorageReadinessHook struct {
timeout time.Duration
lock sync.Mutex
checks map[string]func() error
}
// NewStorageReadinessHook created new StorageReadinessHook.
func NewStorageReadinessHook(timeout time.Duration) *StorageReadinessHook {
return &StorageReadinessHook{
checks: make(map[string]func() error),
timeout: timeout,
}
}
func (h *StorageReadinessHook) RegisterStorage(gvr metav1.GroupVersionResource, storage rest.StorageWithReadiness) {
h.lock.Lock()
defer h.lock.Unlock()
if _, ok := h.checks[gvr.String()]; !ok {
h.checks[gvr.String()] = storage.ReadinessCheck
} else {
klog.Errorf("Registering storage readiness hook for %s again: ", gvr.String())
}
}
func (h *StorageReadinessHook) check() bool {
h.lock.Lock()
defer h.lock.Unlock()
failedChecks := []string{}
for gvr, check := range h.checks {
if err := check(); err != nil {
failedChecks = append(failedChecks, gvr)
}
}
if len(failedChecks) == 0 {
klog.Infof("Storage is ready for all registered resources")
return true
}
klog.V(4).Infof("Storage is not ready for: %v", failedChecks)
return false
}
func (h *StorageReadinessHook) Hook(ctx PostStartHookContext) error {
deadlineCtx, cancel := context.WithTimeout(ctx, h.timeout)
defer cancel()
err := wait.PollUntilContextCancel(deadlineCtx, 100*time.Millisecond, true,
func(_ context.Context) (bool, error) {
if ok := h.check(); ok {
return true, nil
}
return false, nil
})
if errors.Is(err, context.DeadlineExceeded) {
klog.Warningf("Deadline exceeded while waiting for storage readiness... ignoring")
}
return nil
}

View File

@ -21,6 +21,7 @@ import (
"fmt"
"net/http"
"reflect"
"strings"
"sync"
"time"
@ -42,6 +43,7 @@ import (
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/cacher/metrics"
etcdfeature "k8s.io/apiserver/pkg/storage/feature"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/cache"
"k8s.io/component-base/tracing"
@ -51,7 +53,8 @@ import (
)
var (
emptyFunc = func(bool) {}
emptyFunc = func(bool) {}
coreNamespaceResource = schema.GroupResource{Group: "", Resource: "namespaces"}
)
const (
@ -531,9 +534,18 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions
return nil, err
}
readyGeneration, err := c.ready.waitAndReadGeneration(ctx)
if err != nil {
return nil, errors.NewServiceUnavailable(err.Error())
var readyGeneration int
if utilfeature.DefaultFeatureGate.Enabled(features.ResilientWatchCacheInitialization) {
var ok bool
readyGeneration, ok = c.ready.checkAndReadGeneration()
if !ok {
return nil, errors.NewTooManyRequests("storage is (re)initializing", 1)
}
} else {
readyGeneration, err = c.ready.waitAndReadGeneration(ctx)
if err != nil {
return nil, errors.NewServiceUnavailable(err.Error())
}
}
// determine the namespace and name scope of the watch, first from the request, secondarily from the field selector
@ -549,6 +561,12 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions
scope.name = selectorName
}
// for request like '/api/v1/watch/namespaces/*', set scope.namespace to empty.
// namespaces don't populate metadata.namespace in ObjFields.
if c.groupResource == coreNamespaceResource && len(scope.namespace) > 0 && scope.namespace == scope.name {
scope.namespace = ""
}
triggerValue, triggerSupported := "", false
if c.indexedTrigger != nil {
for _, field := range pred.IndexFields {
@ -568,6 +586,12 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions
// watchers on our watcher having a processing hiccup
chanSize := c.watchCache.suggestedWatchChannelSize(c.indexedTrigger != nil, triggerSupported)
// client-go is going to fall back to a standard LIST on any error
// returned for watch-list requests
if isListWatchRequest(opts) && !etcdfeature.DefaultFeatureSupportChecker.Supports(storage.RequestWatchProgress) {
return newErrWatcher(fmt.Errorf("a watch stream was requested by the client but the required storage feature %s is disabled", storage.RequestWatchProgress)), nil
}
// Determine the ResourceVersion to which the watch cache must be synchronized
requiredResourceVersion, err := c.getWatchCacheResourceVersion(ctx, requestedWatchRV, opts)
if err != nil {
@ -591,7 +615,7 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions
// to compute watcher.forget function (which has to happen under lock).
watcher := newCacheWatcher(
chanSize,
filterWithAttrsFunction(key, pred),
filterWithAttrsAndPrefixFunction(key, pred),
emptyFunc,
c.versioner,
deadline,
@ -621,7 +645,7 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions
defer c.watchCache.RUnlock()
var cacheInterval *watchCacheInterval
cacheInterval, err = c.watchCache.getAllEventsSinceLocked(requiredResourceVersion, opts)
cacheInterval, err = c.watchCache.getAllEventsSinceLocked(requiredResourceVersion, key, opts)
if err != nil {
// To match the uncached watch implementation, once we have passed authn/authz/admission,
// and successfully parsed a resource version, other errors must fail with a watch event of type ERROR,
@ -675,6 +699,14 @@ func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, o
return c.storage.Get(ctx, key, opts, objPtr)
}
if utilfeature.DefaultFeatureGate.Enabled(features.ResilientWatchCacheInitialization) {
if !c.ready.check() {
// If Cache is not initialized, delegate Get requests to storage
// as described in https://kep.k8s.io/4568
return c.storage.Get(ctx, key, opts, objPtr)
}
}
// If resourceVersion is specified, serve it from cache.
// It's guaranteed that the returned value is at least that
// fresh as the given resourceVersion.
@ -683,16 +715,18 @@ func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, o
return err
}
if getRV == 0 && !c.ready.check() {
// If Cacher is not yet initialized and we don't require any specific
// minimal resource version, simply forward the request to storage.
return c.storage.Get(ctx, key, opts, objPtr)
}
// Do not create a trace - it's not for free and there are tons
// of Get requests. We can add it if it will be really needed.
if err := c.ready.wait(ctx); err != nil {
return errors.NewServiceUnavailable(err.Error())
if !utilfeature.DefaultFeatureGate.Enabled(features.ResilientWatchCacheInitialization) {
if getRV == 0 && !c.ready.check() {
// If Cacher is not yet initialized and we don't require any specific
// minimal resource version, simply forward the request to storage.
return c.storage.Get(ctx, key, opts, objPtr)
}
if err := c.ready.wait(ctx); err != nil {
return errors.NewServiceUnavailable(err.Error())
}
}
objVal, err := conversion.EnforcePtr(objPtr)
@ -728,17 +762,40 @@ func shouldDelegateList(opts storage.ListOptions) bool {
pred := opts.Predicate
match := opts.ResourceVersionMatch
consistentListFromCacheEnabled := utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache)
requestWatchProgressSupported := etcdfeature.DefaultFeatureSupportChecker.Supports(storage.RequestWatchProgress)
// Serve consistent reads from storage if ConsistentListFromCache is disabled
consistentReadFromStorage := resourceVersion == "" && !consistentListFromCacheEnabled
consistentReadFromStorage := resourceVersion == "" && !(consistentListFromCacheEnabled && requestWatchProgressSupported)
// Watch cache doesn't support continuations, so serve them from etcd.
hasContinuation := len(pred.Continue) > 0
// Serve paginated requests about revision "0" from watch cache to avoid overwhelming etcd.
hasLimit := pred.Limit > 0 && resourceVersion != "0"
// Watch cache only supports ResourceVersionMatchNotOlderThan (default).
unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan
// see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-get-and-list
isLegacyExactMatch := opts.Predicate.Limit > 0 && match == "" && len(resourceVersion) > 0 && resourceVersion != "0"
unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan || isLegacyExactMatch
return consistentReadFromStorage || hasContinuation || hasLimit || unsupportedMatch
return consistentReadFromStorage || hasContinuation || unsupportedMatch
}
// computeListLimit determines whether the cacher should
// apply a limit to an incoming LIST request and returns its value.
//
// note that this function doesn't check RVM nor the Continuation token.
// these parameters are validated by the shouldDelegateList function.
//
// as of today, the limit is ignored for requests that set RV == 0
func computeListLimit(opts storage.ListOptions) int64 {
if opts.Predicate.Limit <= 0 || opts.ResourceVersion == "0" {
return 0
}
return opts.Predicate.Limit
}
func shouldDelegateListOnNotReadyCache(opts storage.ListOptions) bool {
pred := opts.Predicate
noLabelSelector := pred.Label == nil || pred.Label.Empty()
noFieldSelector := pred.Field == nil || pred.Field.Empty()
hasLimit := pred.Limit > 0
return noLabelSelector && noFieldSelector && hasLimit
}
func (c *Cacher) listItems(ctx context.Context, listRV uint64, key string, pred storage.SelectionPredicate, recursive bool) ([]interface{}, uint64, string, error) {
@ -752,7 +809,7 @@ func (c *Cacher) listItems(ctx context.Context, listRV uint64, key string, pred
}
return nil, readResourceVersion, "", nil
}
return c.watchCache.WaitUntilFreshAndList(ctx, listRV, pred.MatcherIndex(ctx))
return c.watchCache.WaitUntilFreshAndList(ctx, listRV, key, pred.MatcherIndex(ctx))
}
// GetList implements storage.Interface
@ -768,12 +825,31 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio
if err != nil {
return err
}
if listRV == 0 && !c.ready.check() {
// If Cacher is not yet initialized and we don't require any specific
// minimal resource version, simply forward the request to storage.
return c.storage.GetList(ctx, key, opts, listObj)
if utilfeature.DefaultFeatureGate.Enabled(features.ResilientWatchCacheInitialization) {
if !c.ready.check() && shouldDelegateListOnNotReadyCache(opts) {
// If Cacher is not initialized, delegate List requests to storage
// as described in https://kep.k8s.io/4568
return c.storage.GetList(ctx, key, opts, listObj)
}
} else {
if listRV == 0 && !c.ready.check() {
// If Cacher is not yet initialized and we don't require any specific
// minimal resource version, simply forward the request to storage.
return c.storage.GetList(ctx, key, opts, listObj)
}
}
if resourceVersion == "" && utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) {
// For recursive lists, we need to make sure the key ended with "/" so that we only
// get children "directories". e.g. if we have key "/a", "/a/b", "/ab", getting keys
// with prefix "/a" will return all three, while with prefix "/a/" will return only
// "/a/b" which is the correct answer.
preparedKey := key
if opts.Recursive && !strings.HasSuffix(key, "/") {
preparedKey += "/"
}
requestWatchProgressSupported := etcdfeature.DefaultFeatureSupportChecker.Supports(storage.RequestWatchProgress)
consistentRead := resourceVersion == "" && utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) && requestWatchProgressSupported
if consistentRead {
listRV, err = storage.GetCurrentResourceVersionFromStorage(ctx, c.storage, c.newListFunc, c.resourcePrefix, c.objectType.String())
if err != nil {
return err
@ -785,8 +861,16 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio
attribute.Stringer("type", c.groupResource))
defer span.End(500 * time.Millisecond)
if err := c.ready.wait(ctx); err != nil {
return errors.NewServiceUnavailable(err.Error())
if utilfeature.DefaultFeatureGate.Enabled(features.ResilientWatchCacheInitialization) {
if !c.ready.check() {
// If Cacher is not initialized, reject List requests
// as described in https://kep.k8s.io/4568
return errors.NewTooManyRequests("storage is (re)initializing", 1)
}
} else {
if err := c.ready.wait(ctx); err != nil {
return errors.NewServiceUnavailable(err.Error())
}
}
span.AddEvent("Ready")
@ -802,25 +886,47 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio
if listVal.Kind() != reflect.Slice {
return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind())
}
filter := filterWithAttrsFunction(key, pred)
objs, readResourceVersion, indexUsed, err := c.listItems(ctx, listRV, key, pred, recursive)
objs, readResourceVersion, indexUsed, err := c.listItems(ctx, listRV, preparedKey, pred, recursive)
success := "true"
fallback := "false"
if err != nil {
if consistentRead {
if storage.IsTooLargeResourceVersion(err) {
fallback = "true"
err = c.storage.GetList(ctx, key, opts, listObj)
}
if err != nil {
success = "false"
}
metrics.ConsistentReadTotal.WithLabelValues(c.resourcePrefix, success, fallback).Add(1)
}
return err
}
if consistentRead {
metrics.ConsistentReadTotal.WithLabelValues(c.resourcePrefix, success, fallback).Add(1)
}
span.AddEvent("Listed items from cache", attribute.Int("count", len(objs)))
// store pointer of eligible objects,
// Why not directly put object in the items of listObj?
// the elements in ListObject are Struct type, making slice will bring excessive memory consumption.
// so we try to delay this action as much as possible
var selectedObjects []runtime.Object
for _, obj := range objs {
var lastSelectedObjectKey string
var hasMoreListItems bool
limit := computeListLimit(opts)
for i, obj := range objs {
elem, ok := obj.(*storeElement)
if !ok {
return fmt.Errorf("non *storeElement returned from storage: %v", obj)
}
if filter(elem.Key, elem.Labels, elem.Fields) {
if pred.MatchesObjectAttributes(elem.Labels, elem.Fields) {
selectedObjects = append(selectedObjects, elem.Object)
lastSelectedObjectKey = elem.Key
}
if limit > 0 && int64(len(selectedObjects)) >= limit {
hasMoreListItems = i < len(objs)-1
break
}
}
if len(selectedObjects) == 0 {
@ -836,7 +942,12 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio
}
span.AddEvent("Filtered items", attribute.Int("count", listVal.Len()))
if c.versioner != nil {
if err := c.versioner.UpdateList(listObj, readResourceVersion, "", nil); err != nil {
continueValue, remainingItemCount, err := storage.PrepareContinueToken(lastSelectedObjectKey, key, int64(readResourceVersion), int64(len(objs)), hasMoreListItems, opts)
if err != nil {
return err
}
if err = c.versioner.UpdateList(listObj, readResourceVersion, continueValue, remainingItemCount); err != nil {
return err
}
}
@ -867,6 +978,14 @@ func (c *Cacher) Count(pathPrefix string) (int64, error) {
return c.storage.Count(pathPrefix)
}
// ReadinessCheck implements storage.Interface.
func (c *Cacher) ReadinessCheck() error {
if !c.ready.check() {
return storage.ErrStorageNotReady
}
return nil
}
// baseObjectThreadUnsafe omits locking for cachingObject.
func baseObjectThreadUnsafe(object runtime.Object) runtime.Object {
if co, ok := object.(*cachingObject); ok {
@ -905,7 +1024,23 @@ func (c *Cacher) dispatchEvents() {
bookmarkTimer := c.clock.NewTimer(wait.Jitter(time.Second, 0.25))
defer bookmarkTimer.Stop()
// The internal informer populates the RV as soon as it conducts
// The first successful sync with the underlying store.
// The cache must wait until this first sync is completed to be deemed ready.
// Since we cannot send a bookmark when the lastProcessedResourceVersion is 0,
// we poll aggressively for the first list RV before entering the dispatch loop.
lastProcessedResourceVersion := uint64(0)
if err := wait.PollUntilContextCancel(wait.ContextForChannel(c.stopCh), 10*time.Millisecond, true, func(_ context.Context) (bool, error) {
if rv := c.watchCache.getListResourceVersion(); rv != 0 {
lastProcessedResourceVersion = rv
return true, nil
}
return false, nil
}); err != nil {
// given the function above never returns error,
// the non-empty error means that the stopCh was closed
return
}
for {
select {
case event, ok := <-c.incoming:
@ -929,29 +1064,6 @@ func (c *Cacher) dispatchEvents() {
metrics.EventsCounter.WithLabelValues(c.groupResource.String()).Inc()
case <-bookmarkTimer.C():
bookmarkTimer.Reset(wait.Jitter(time.Second, 0.25))
// Never send a bookmark event if we did not see an event here, this is fine
// because we don't provide any guarantees on sending bookmarks.
//
// Just pop closed watchers and requeue others if needed.
//
// TODO(#115478): rework the following logic
// in a way that would allow more
// efficient cleanup of closed watchers
if lastProcessedResourceVersion == 0 {
func() {
c.Lock()
defer c.Unlock()
for _, watchers := range c.bookmarkWatchers.popExpiredWatchersThreadUnsafe() {
for _, watcher := range watchers {
if watcher.stopped {
continue
}
c.bookmarkWatchers.addWatcherThreadUnsafe(watcher)
}
}
}()
continue
}
bookmarkEvent := &watchCacheEvent{
Type: watch.Bookmark,
Object: c.newFunc(),
@ -1231,7 +1343,7 @@ func forgetWatcher(c *Cacher, w *cacheWatcher, index int, scope namespacedName,
}
}
func filterWithAttrsFunction(key string, p storage.SelectionPredicate) filterWithAttrsFunc {
func filterWithAttrsAndPrefixFunction(key string, p storage.SelectionPredicate) filterWithAttrsFunc {
filterFunc := func(objKey string, label labels.Set, field fields.Set) bool {
if !hasPathPrefix(objKey, key) {
return false
@ -1256,7 +1368,7 @@ func (c *Cacher) LastSyncResourceVersion() (uint64, error) {
//
// The returned function must be called under the watchCache lock.
func (c *Cacher) getBookmarkAfterResourceVersionLockedFunc(parsedResourceVersion, requiredResourceVersion uint64, opts storage.ListOptions) (func() uint64, error) {
if opts.SendInitialEvents == nil || !*opts.SendInitialEvents || !opts.Predicate.AllowWatchBookmarks {
if !isListWatchRequest(opts) {
return func() uint64 { return 0 }, nil
}
@ -1271,6 +1383,10 @@ func (c *Cacher) getBookmarkAfterResourceVersionLockedFunc(parsedResourceVersion
}
}
func isListWatchRequest(opts storage.ListOptions) bool {
return opts.SendInitialEvents != nil && *opts.SendInitialEvents && opts.Predicate.AllowWatchBookmarks
}
// getWatchCacheResourceVersion returns a ResourceVersion to which the watch cache must be synchronized to
//
// Depending on the input parameters, the semantics of the returned ResourceVersion are:
@ -1300,9 +1416,14 @@ func (c *Cacher) getWatchCacheResourceVersion(ctx context.Context, parsedWatchRe
// can wait for events without unnecessary blocking.
func (c *Cacher) waitUntilWatchCacheFreshAndForceAllEvents(ctx context.Context, requestedWatchRV uint64, opts storage.ListOptions) error {
if opts.SendInitialEvents != nil && *opts.SendInitialEvents {
// TODO(p0lyn0mial): adapt the following logic once
// https://github.com/kubernetes/kubernetes/pull/123264 merges
if utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) && c.watchCache.notFresh(requestedWatchRV) {
// Here be dragons:
// Since the etcd feature checker needs to check all members
// to determine whether a given feature is supported,
// we may receive a positive response even if the feature is not supported.
//
// In this very rare scenario, the worst case will be that this
// request will wait for 3 seconds before it fails.
if etcdfeature.DefaultFeatureSupportChecker.Supports(storage.RequestWatchProgress) && c.watchCache.notFresh(requestedWatchRV) {
c.watchCache.waitingUntilFresh.Add()
defer c.watchCache.waitingUntilFresh.Remove()
}
@ -1313,6 +1434,11 @@ func (c *Cacher) waitUntilWatchCacheFreshAndForceAllEvents(ctx context.Context,
return nil
}
// Wait blocks until the cacher is Ready or Stopped, it returns an error if Stopped.
func (c *Cacher) Wait(ctx context.Context) error {
return c.ready.wait(ctx)
}
// errWatcher implements watch.Interface to return a single error
type errWatcher struct {
result chan watch.Event

View File

@ -106,6 +106,17 @@ var (
[]string{"resource"},
)
watchCacheResourceVersion = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "resource_version",
Help: "Current resource version of watch cache broken by resource type.",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{"resource"},
)
watchCacheCapacityIncreaseTotal = compbasemetrics.NewCounterVec(
&compbasemetrics.CounterOpts{
Subsystem: subsystem,
@ -156,6 +167,15 @@ var (
StabilityLevel: compbasemetrics.ALPHA,
Buckets: []float64{0.005, 0.025, 0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0, 1.25, 1.5, 2, 3},
}, []string{"resource"})
ConsistentReadTotal = compbasemetrics.NewCounterVec(
&compbasemetrics.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "consistent_read_total",
Help: "Counter for consistent reads from cache.",
StabilityLevel: compbasemetrics.ALPHA,
}, []string{"resource", "success", "fallback"})
)
var registerMetrics sync.Once
@ -171,11 +191,13 @@ func Register() {
legacyregistry.MustRegister(EventsReceivedCounter)
legacyregistry.MustRegister(EventsCounter)
legacyregistry.MustRegister(TerminatedWatchersCounter)
legacyregistry.MustRegister(watchCacheResourceVersion)
legacyregistry.MustRegister(watchCacheCapacityIncreaseTotal)
legacyregistry.MustRegister(watchCacheCapacityDecreaseTotal)
legacyregistry.MustRegister(WatchCacheCapacity)
legacyregistry.MustRegister(WatchCacheInitializations)
legacyregistry.MustRegister(WatchCacheReadWait)
legacyregistry.MustRegister(ConsistentReadTotal)
})
}
@ -186,6 +208,11 @@ func RecordListCacheMetrics(resourcePrefix, indexName string, numFetched, numRet
listCacheNumReturned.WithLabelValues(resourcePrefix).Add(float64(numReturned))
}
// RecordResourceVersion sets the current resource version for a given resource type.
func RecordResourceVersion(resourcePrefix string, resourceVersion uint64) {
watchCacheResourceVersion.WithLabelValues(resourcePrefix).Set(float64(resourceVersion))
}
// RecordsWatchCacheCapacityChange record watchCache capacity resize(increase or decrease) operations.
func RecordsWatchCacheCapacityChange(objType string, old, new int) {
WatchCacheCapacity.WithLabelValues(objType).Set(float64(new))

View File

@ -33,6 +33,7 @@ import (
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/cacher/metrics"
etcdfeature "k8s.io/apiserver/pkg/storage/feature"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/cache"
"k8s.io/component-base/tracing"
@ -311,25 +312,26 @@ func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, upd
RecordTime: w.clock.Now(),
}
// We can call w.store.Get() outside of a critical section,
// because the w.store itself is thread-safe and the only
// place where w.store is modified is below (via updateFunc)
// and these calls are serialized because reflector is processing
// events one-by-one.
previous, exists, err := w.store.Get(elem)
if err != nil {
return err
}
if exists {
previousElem := previous.(*storeElement)
wcEvent.PrevObject = previousElem.Object
wcEvent.PrevObjLabels = previousElem.Labels
wcEvent.PrevObjFields = previousElem.Fields
}
if err := func() error {
// TODO: We should consider moving this lock below after the watchCacheEvent
// is created. In such situation, the only problematic scenario is Replace()
// happening after getting object from store and before acquiring a lock.
// Maybe introduce another lock for this purpose.
w.Lock()
defer w.Unlock()
previous, exists, err := w.store.Get(elem)
if err != nil {
return err
}
if exists {
previousElem := previous.(*storeElement)
wcEvent.PrevObject = previousElem.Object
wcEvent.PrevObjLabels = previousElem.Labels
wcEvent.PrevObjFields = previousElem.Fields
}
w.updateCache(wcEvent)
w.resourceVersion = resourceVersion
defer w.cond.Broadcast()
@ -346,6 +348,7 @@ func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, upd
if w.eventHandler != nil {
w.eventHandler(wcEvent)
}
metrics.RecordResourceVersion(w.groupResource.String(), resourceVersion)
return nil
}
@ -428,6 +431,7 @@ func (w *watchCache) UpdateResourceVersion(resourceVersion string) {
}
w.eventHandler(wcEvent)
}
metrics.RecordResourceVersion(w.groupResource.String(), rv)
}
// List returns list of pointers to <storeElement> objects.
@ -497,8 +501,31 @@ func (s sortableStoreElements) Swap(i, j int) {
// WaitUntilFreshAndList returns list of pointers to `storeElement` objects along
// with their ResourceVersion and the name of the index, if any, that was used.
func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, matchValues []storage.MatchValue) (result []interface{}, rv uint64, index string, err error) {
if utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) && w.notFresh(resourceVersion) {
func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, key string, matchValues []storage.MatchValue) ([]interface{}, uint64, string, error) {
items, rv, index, err := w.waitUntilFreshAndListItems(ctx, resourceVersion, key, matchValues)
if err != nil {
return nil, 0, "", err
}
var result []interface{}
for _, item := range items {
elem, ok := item.(*storeElement)
if !ok {
return nil, 0, "", fmt.Errorf("non *storeElement returned from storage: %v", item)
}
if !hasPathPrefix(elem.Key, key) {
continue
}
result = append(result, item)
}
sort.Sort(sortableStoreElements(result))
return result, rv, index, nil
}
func (w *watchCache) waitUntilFreshAndListItems(ctx context.Context, resourceVersion uint64, key string, matchValues []storage.MatchValue) (result []interface{}, rv uint64, index string, err error) {
requestWatchProgressSupported := etcdfeature.DefaultFeatureSupportChecker.Supports(storage.RequestWatchProgress)
if utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) && requestWatchProgressSupported && w.notFresh(resourceVersion) {
w.waitingUntilFresh.Add()
err = w.waitUntilFreshAndBlock(ctx, resourceVersion)
w.waitingUntilFresh.Remove()
@ -506,7 +533,6 @@ func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion
err = w.waitUntilFreshAndBlock(ctx, resourceVersion)
}
defer func() { sort.Sort(sortableStoreElements(result)) }()
defer w.RUnlock()
if err != nil {
return result, rv, index, err
@ -626,7 +652,9 @@ func (w *watchCache) Replace(objs []interface{}, resourceVersion string) error {
w.onReplace()
}
w.cond.Broadcast()
klog.V(3).Infof("Replace watchCache (rev: %v) ", resourceVersion)
metrics.RecordResourceVersion(w.groupResource.String(), version)
klog.V(3).Infof("Replaced watchCache (rev: %v) ", resourceVersion)
return nil
}
@ -641,6 +669,12 @@ func (w *watchCache) Resync() error {
return nil
}
func (w *watchCache) getListResourceVersion() uint64 {
w.RLock()
defer w.RUnlock()
return w.listResourceVersion
}
func (w *watchCache) currentCapacity() int {
w.RLock()
defer w.RUnlock()
@ -703,9 +737,10 @@ func (w *watchCache) isIndexValidLocked(index int) bool {
// getAllEventsSinceLocked returns a watchCacheInterval that can be used to
// retrieve events since a certain resourceVersion. This function assumes to
// be called under the watchCache lock.
func (w *watchCache) getAllEventsSinceLocked(resourceVersion uint64, opts storage.ListOptions) (*watchCacheInterval, error) {
func (w *watchCache) getAllEventsSinceLocked(resourceVersion uint64, key string, opts storage.ListOptions) (*watchCacheInterval, error) {
_, matchesSingle := opts.Predicate.MatchesSingle()
if opts.SendInitialEvents != nil && *opts.SendInitialEvents {
return w.getIntervalFromStoreLocked()
return w.getIntervalFromStoreLocked(key, matchesSingle)
}
size := w.endIndex - w.startIndex
@ -734,7 +769,7 @@ func (w *watchCache) getAllEventsSinceLocked(resourceVersion uint64, opts storag
// current state and only then start watching from that point.
//
// TODO: In v2 api, we should stop returning the current state - #13969.
return w.getIntervalFromStoreLocked()
return w.getIntervalFromStoreLocked(key, matchesSingle)
}
// SendInitialEvents = false and resourceVersion = 0
// means that the request would like to start watching
@ -753,15 +788,15 @@ func (w *watchCache) getAllEventsSinceLocked(resourceVersion uint64, opts storag
indexerFunc := func(i int) *watchCacheEvent {
return w.cache[i%w.capacity]
}
ci := newCacheInterval(w.startIndex+first, w.endIndex, indexerFunc, w.indexValidator, &w.RWMutex)
ci := newCacheInterval(w.startIndex+first, w.endIndex, indexerFunc, w.indexValidator, w.RWMutex.RLocker())
return ci, nil
}
// getIntervalFromStoreLocked returns a watchCacheInterval
// that covers the entire storage state.
// This function assumes to be called under the watchCache lock.
func (w *watchCache) getIntervalFromStoreLocked() (*watchCacheInterval, error) {
ci, err := newCacheIntervalFromStore(w.resourceVersion, w.store, w.getAttrsFunc)
func (w *watchCache) getIntervalFromStoreLocked(key string, matchesSingle bool) (*watchCacheInterval, error) {
ci, err := newCacheIntervalFromStore(w.resourceVersion, w.store, w.getAttrsFunc, key, matchesSingle)
if err != nil {
return nil, err
}

View File

@ -133,9 +133,22 @@ func (s sortableWatchCacheEvents) Swap(i, j int) {
// returned by Next() need to be events from a List() done on the underlying store of
// the watch cache.
// The items returned in the interval will be sorted by Key.
func newCacheIntervalFromStore(resourceVersion uint64, store cache.Indexer, getAttrsFunc attrFunc) (*watchCacheInterval, error) {
func newCacheIntervalFromStore(resourceVersion uint64, store cache.Indexer, getAttrsFunc attrFunc, key string, matchesSingle bool) (*watchCacheInterval, error) {
buffer := &watchCacheIntervalBuffer{}
allItems := store.List()
var allItems []interface{}
if matchesSingle {
item, exists, err := store.GetByKey(key)
if err != nil {
return nil, err
}
if exists {
allItems = append(allItems, item)
}
} else {
allItems = store.List()
}
buffer.buffer = make([]*watchCacheEvent, len(allItems))
for i, item := range allItems {
elem, ok := item.(*storeElement)

View File

@ -42,14 +42,14 @@ func newConditionalProgressRequester(requestWatchProgress WatchProgressRequester
requestWatchProgress: requestWatchProgress,
contextMetadata: contextMetadata,
}
pr.cond = sync.NewCond(pr.mux.RLocker())
pr.cond = sync.NewCond(&pr.mux)
return pr
}
type WatchProgressRequester func(ctx context.Context) error
type TickerFactory interface {
NewTicker(time.Duration) clock.Ticker
NewTimer(time.Duration) clock.Timer
}
// conditionalProgressRequester will request progress notification if there
@ -59,7 +59,7 @@ type conditionalProgressRequester struct {
requestWatchProgress WatchProgressRequester
contextMetadata metadata.MD
mux sync.RWMutex
mux sync.Mutex
cond *sync.Cond
waiting int
stopped bool
@ -78,12 +78,12 @@ func (pr *conditionalProgressRequester) Run(stopCh <-chan struct{}) {
pr.stopped = true
pr.cond.Signal()
}()
ticker := pr.clock.NewTicker(progressRequestPeriod)
defer ticker.Stop()
timer := pr.clock.NewTimer(progressRequestPeriod)
defer timer.Stop()
for {
stopped := func() bool {
pr.mux.RLock()
defer pr.mux.RUnlock()
pr.mux.Lock()
defer pr.mux.Unlock()
for pr.waiting == 0 && !pr.stopped {
pr.cond.Wait()
}
@ -94,15 +94,17 @@ func (pr *conditionalProgressRequester) Run(stopCh <-chan struct{}) {
}
select {
case <-ticker.C():
case <-timer.C():
shouldRequest := func() bool {
pr.mux.RLock()
defer pr.mux.RUnlock()
pr.mux.Lock()
defer pr.mux.Unlock()
return pr.waiting > 0 && !pr.stopped
}()
if !shouldRequest {
timer.Reset(0)
continue
}
timer.Reset(progressRequestPeriod)
err := pr.requestWatchProgress(ctx)
if err != nil {
klog.V(4).InfoS("Error requesting bookmark", "err", err)
@ -124,5 +126,4 @@ func (pr *conditionalProgressRequester) Remove() {
pr.mux.Lock()
defer pr.mux.Unlock()
pr.waiting -= 1
pr.cond.Signal()
}

View File

@ -91,3 +91,30 @@ func EncodeContinue(key, keyPrefix string, resourceVersion int64) (string, error
}
return base64.RawURLEncoding.EncodeToString(out), nil
}
// PrepareContinueToken prepares optional
// parameters for retrieving additional results for a paginated request.
//
// This function sets up parameters that a client can use to fetch the remaining results
// from the server if they are available.
func PrepareContinueToken(keyLastItem, keyPrefix string, resourceVersion int64, itemsCount int64, hasMoreItems bool, opts ListOptions) (string, *int64, error) {
var remainingItemCount *int64
var continueValue string
var err error
if hasMoreItems {
// Instruct the client to begin querying from immediately after the last key.
continueValue, err = EncodeContinue(keyLastItem+"\x00", keyPrefix, resourceVersion)
if err != nil {
return "", remainingItemCount, err
}
// Etcd response counts in objects that do not match the pred.
// Instead of returning inaccurate count for non-empty selectors, we return nil.
// We only set remainingItemCount if the predicate is empty.
if opts.Predicate.Empty() {
remainingItems := itemsCount - opts.Predicate.Limit
remainingItemCount = &remainingItems
}
}
return continueValue, remainingItemCount, err
}

View File

@ -25,7 +25,10 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
)
var ErrResourceVersionSetOnCreate = errors.New("resourceVersion should not be set on objects to be created")
var (
ErrResourceVersionSetOnCreate = errors.New("resourceVersion should not be set on objects to be created")
ErrStorageNotReady = errors.New("storage not ready")
)
const (
ErrCodeKeyNotFound int = iota + 1

View File

@ -2,3 +2,6 @@
reviewers:
- wojtek-t
- serathius
labels:
- sig/etcd

View File

@ -39,9 +39,12 @@ import (
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/audit"
endpointsrequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
etcdfeature "k8s.io/apiserver/pkg/storage/feature"
"k8s.io/apiserver/pkg/storage/value"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/tracing"
"k8s.io/klog/v2"
)
@ -139,6 +142,9 @@ func newStore(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func
w.getCurrentStorageRV = func(ctx context.Context) (uint64, error) {
return storage.GetCurrentResourceVersionFromStorage(ctx, s, newListFunc, resourcePrefix, w.objectType)
}
if utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) || utilfeature.DefaultFeatureGate.Enabled(features.WatchList) {
etcdfeature.DefaultFeatureSupportChecker.CheckClient(c.Ctx(), c, storage.RequestWatchProgress)
}
return s
}
@ -585,6 +591,11 @@ func (s *store) Count(key string) (int64, error) {
return getResp.Count, nil
}
// ReadinessCheck implements storage.Interface.
func (s *store) ReadinessCheck() error {
return nil
}
// resolveGetListRev is used by GetList to resolve the rev to use in the client.KV.Get request.
func (s *store) resolveGetListRev(continueKey string, continueRV int64, opts storage.ListOptions) (int64, error) {
var withRev int64
@ -805,27 +816,11 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption
v.Set(reflect.MakeSlice(v.Type(), 0, 0))
}
// instruct the client to begin querying from immediately after the last key we returned
// we never return a key that the client wouldn't be allowed to see
if hasMore {
// we want to start immediately after the last key
next, err := storage.EncodeContinue(string(lastKey)+"\x00", keyPrefix, withRev)
if err != nil {
return err
}
var remainingItemCount *int64
// getResp.Count counts in objects that do not match the pred.
// Instead of returning inaccurate count for non-empty selectors, we return nil.
// Only set remainingItemCount if the predicate is empty.
if opts.Predicate.Empty() {
c := int64(getResp.Count - opts.Predicate.Limit)
remainingItemCount = &c
}
return s.versioner.UpdateList(listObj, uint64(withRev), next, remainingItemCount)
continueValue, remainingItemCount, err := storage.PrepareContinueToken(string(lastKey), keyPrefix, withRev, getResp.Count, hasMore, opts)
if err != nil {
return err
}
// no continuation
return s.versioner.UpdateList(listObj, uint64(withRev), "", nil)
return s.versioner.UpdateList(listObj, uint64(withRev), continueValue, remainingItemCount)
}
// growSlice takes a slice value and grows its capacity up

View File

@ -46,8 +46,9 @@ import (
const (
// We have set a buffer in order to reduce times of context switches.
incomingBufSize = 100
outgoingBufSize = 100
incomingBufSize = 100
outgoingBufSize = 100
processEventConcurrency = 10
)
// defaultWatcherMaxLimit is used to facilitate construction tests
@ -230,8 +231,7 @@ func (wc *watchChan) run(initialEventsEndBookmarkRequired, forceInitialEvents bo
go wc.startWatching(watchClosedCh, initialEventsEndBookmarkRequired, forceInitialEvents)
var resultChanWG sync.WaitGroup
resultChanWG.Add(1)
go wc.processEvent(&resultChanWG)
wc.processEvents(&resultChanWG)
select {
case err := <-wc.errChan:
@ -424,10 +424,17 @@ func (wc *watchChan) startWatching(watchClosedCh chan struct{}, initialEventsEnd
close(watchClosedCh)
}
// processEvent processes events from etcd watcher and sends results to resultChan.
func (wc *watchChan) processEvent(wg *sync.WaitGroup) {
// processEvents processes events from etcd watcher and sends results to resultChan.
func (wc *watchChan) processEvents(wg *sync.WaitGroup) {
if utilfeature.DefaultFeatureGate.Enabled(features.ConcurrentWatchObjectDecode) {
wc.concurrentProcessEvents(wg)
} else {
wg.Add(1)
go wc.serialProcessEvents(wg)
}
}
func (wc *watchChan) serialProcessEvents(wg *sync.WaitGroup) {
defer wg.Done()
for {
select {
case e := <-wc.incomingEventChan:
@ -435,7 +442,7 @@ func (wc *watchChan) processEvent(wg *sync.WaitGroup) {
if res == nil {
continue
}
if len(wc.resultChan) == outgoingBufSize {
if len(wc.resultChan) == cap(wc.resultChan) {
klog.V(3).InfoS("Fast watcher, slow processing. Probably caused by slow dispatching events to watchers", "outgoingEvents", outgoingBufSize, "objectType", wc.watcher.objectType, "groupResource", wc.watcher.groupResource)
}
// If user couldn't receive results fast enough, we also block incoming events from watcher.
@ -452,6 +459,95 @@ func (wc *watchChan) processEvent(wg *sync.WaitGroup) {
}
}
func (wc *watchChan) concurrentProcessEvents(wg *sync.WaitGroup) {
p := concurrentOrderedEventProcessing{
input: wc.incomingEventChan,
processFunc: wc.transform,
output: wc.resultChan,
processingQueue: make(chan chan *watch.Event, processEventConcurrency-1),
objectType: wc.watcher.objectType,
groupResource: wc.watcher.groupResource,
}
wg.Add(1)
go func() {
defer wg.Done()
p.scheduleEventProcessing(wc.ctx, wg)
}()
wg.Add(1)
go func() {
defer wg.Done()
p.collectEventProcessing(wc.ctx)
}()
}
type concurrentOrderedEventProcessing struct {
input chan *event
processFunc func(*event) *watch.Event
output chan watch.Event
processingQueue chan chan *watch.Event
// Metadata for logging
objectType string
groupResource schema.GroupResource
}
func (p *concurrentOrderedEventProcessing) scheduleEventProcessing(ctx context.Context, wg *sync.WaitGroup) {
var e *event
for {
select {
case <-ctx.Done():
return
case e = <-p.input:
}
processingResponse := make(chan *watch.Event, 1)
select {
case <-ctx.Done():
return
case p.processingQueue <- processingResponse:
}
wg.Add(1)
go func(e *event, response chan<- *watch.Event) {
defer wg.Done()
select {
case <-ctx.Done():
case response <- p.processFunc(e):
}
}(e, processingResponse)
}
}
func (p *concurrentOrderedEventProcessing) collectEventProcessing(ctx context.Context) {
var processingResponse chan *watch.Event
var e *watch.Event
for {
select {
case <-ctx.Done():
return
case processingResponse = <-p.processingQueue:
}
select {
case <-ctx.Done():
return
case e = <-processingResponse:
}
if e == nil {
continue
}
if len(p.output) == cap(p.output) {
klog.V(3).InfoS("Fast watcher, slow processing. Probably caused by slow dispatching events to watchers", "outgoingEvents", outgoingBufSize, "objectType", p.objectType, "groupResource", p.groupResource)
}
// If user couldn't receive results fast enough, we also block incoming events from watcher.
// Because storing events in local will cause more memory usage.
// The worst case would be closing the fast watcher.
select {
case <-ctx.Done():
return
case p.output <- *e:
}
}
}
func (wc *watchChan) filter(obj runtime.Object) bool {
if wc.internalPred.Empty() {
return true

View File

@ -0,0 +1,172 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package feature
import (
"context"
"fmt"
"sync"
"time"
clientv3 "go.etcd.io/etcd/client/v3"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/storage"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
)
var (
// Define these static versions to use for checking version of etcd, issue on kubernetes #123192
v3_4_31 = version.MustParseSemantic("3.4.31")
v3_5_0 = version.MustParseSemantic("3.5.0")
v3_5_13 = version.MustParseSemantic("3.5.13")
// DefaultFeatureSupportChecker is a shared global etcd FeatureSupportChecker.
DefaultFeatureSupportChecker FeatureSupportChecker = newDefaultFeatureSupportChecker()
)
// FeatureSupportChecker to define Supports functions.
type FeatureSupportChecker interface {
// Supports check if the feature is supported or not by checking internal cache.
// By default all calls to this function before calling CheckClient returns false.
// Returns true if all endpoints in etcd clients are supporting the feature.
// If client A supports and client B doesn't support the feature, the `Supports` will
// first return true at client A initializtion and then return false on client B
// initialzation, it can flip the support at runtime.
Supports(feature storage.Feature) bool
// CheckClient works with etcd client to recalcualte feature support and cache it internally.
// All etcd clients should support feature to cause `Supports` return true.
// If client A supports and client B doesn't support the feature, the `Supports` will
// first return true at client A initializtion and then return false on client B
// initialzation, it can flip the support at runtime.
CheckClient(ctx context.Context, c client, feature storage.Feature)
}
type defaultFeatureSupportChecker struct {
lock sync.Mutex
progressNotifySupported *bool
checkingEndpoint map[string]struct{}
}
func newDefaultFeatureSupportChecker() *defaultFeatureSupportChecker {
return &defaultFeatureSupportChecker{
checkingEndpoint: make(map[string]struct{}),
}
}
// Supports can check the featue from anywhere without storage if it was cached before.
func (f *defaultFeatureSupportChecker) Supports(feature storage.Feature) bool {
switch feature {
case storage.RequestWatchProgress:
f.lock.Lock()
defer f.lock.Unlock()
return ptr.Deref(f.progressNotifySupported, false)
default:
runtime.HandleError(fmt.Errorf("feature %q is not implemented in DefaultFeatureSupportChecker", feature))
return false
}
}
// CheckClient accepts client and calculate the support per endpoint and caches it.
func (f *defaultFeatureSupportChecker) CheckClient(ctx context.Context, c client, feature storage.Feature) {
switch feature {
case storage.RequestWatchProgress:
f.checkClient(ctx, c)
default:
runtime.HandleError(fmt.Errorf("feature %q is not implemented in DefaultFeatureSupportChecker", feature))
}
}
func (f *defaultFeatureSupportChecker) checkClient(ctx context.Context, c client) {
// start with 10 ms, multiply by 2 each step, until 15 s and stays on 15 seconds.
delayFunc := wait.Backoff{
Duration: 10 * time.Millisecond,
Cap: 15 * time.Second,
Factor: 2.0,
Steps: 11}.DelayFunc()
f.lock.Lock()
defer f.lock.Unlock()
for _, ep := range c.Endpoints() {
if _, found := f.checkingEndpoint[ep]; found {
continue
}
f.checkingEndpoint[ep] = struct{}{}
go func(ep string) {
defer runtime.HandleCrash()
err := delayFunc.Until(ctx, true, true, func(ctx context.Context) (done bool, err error) {
internalErr := f.clientSupportsRequestWatchProgress(ctx, c, ep)
return internalErr == nil, nil
})
if err != nil {
klog.ErrorS(err, "Failed to check if RequestWatchProgress is supported by etcd after retrying")
}
}(ep)
}
}
func (f *defaultFeatureSupportChecker) clientSupportsRequestWatchProgress(ctx context.Context, c client, ep string) error {
supported, err := endpointSupportsRequestWatchProgress(ctx, c, ep)
if err != nil {
return err
}
f.lock.Lock()
defer f.lock.Unlock()
if !supported {
klog.Infof("RequestWatchProgress feature is not supported by %q endpoint", ep)
f.progressNotifySupported = ptr.To(false)
return nil
}
if f.progressNotifySupported == nil {
f.progressNotifySupported = ptr.To(true)
}
return nil
}
// Sub interface of etcd client.
type client interface {
// Endpoints returns list of endpoints in etcd client.
Endpoints() []string
// Status retrieves the status information from the etcd client connected to the specified endpoint.
// It takes a context.Context parameter for cancellation or timeout control.
// It returns a clientv3.StatusResponse containing the status information or an error if the operation fails.
Status(ctx context.Context, endpoint string) (*clientv3.StatusResponse, error)
}
// endpointSupportsRequestWatchProgress evaluates whether RequestWatchProgress supported by current version of etcd endpoint.
// Based on this issues:
// - https://github.com/etcd-io/etcd/issues/15220 - Fixed in etcd v3.4.25+ and v3.5.8+
// - https://github.com/etcd-io/etcd/issues/17507 - Fixed in etcd v3.4.31+ and v3.5.13+
func endpointSupportsRequestWatchProgress(ctx context.Context, c client, endpoint string) (bool, error) {
resp, err := c.Status(ctx, endpoint)
if err != nil {
return false, fmt.Errorf("failed checking etcd version, endpoint: %q: %w", endpoint, err)
}
ver, err := version.ParseSemantic(resp.Version)
if err != nil {
// Assume feature is not supported if etcd version cannot be parsed.
klog.ErrorS(err, "Failed to parse etcd version", "version", resp.Version)
return false, nil
}
if ver.LessThan(v3_4_31) || ver.AtLeast(v3_5_0) && ver.LessThan(v3_5_13) {
return false, nil
}
return true, nil
}

View File

@ -29,6 +29,12 @@ import (
"k8s.io/apimachinery/pkg/watch"
)
// Feature is the name of each feature in storage that we check in feature_support_checker.
type Feature = string
// RequestWatchProgress is an etcd feature that may use to check if it supported or not.
var RequestWatchProgress Feature = "RequestWatchProgress"
// Versioner abstracts setting and retrieving metadata fields from database response
// onto the object ot list. It is required to maintain storage invariants - updating an
// object twice with the same data except for the ResourceVersion and SelfLink must be
@ -237,6 +243,9 @@ type Interface interface {
// Count returns number of different entries under the key (generally being path prefix).
Count(key string) (int64, error)
// ReadinessCheck checks if the storage is ready for accepting requests.
ReadinessCheck() error
// RequestWatchProgress requests the a watch stream progress status be sent in the
// watch response stream as soon as possible.
// Used for monitor watch progress even if watching resources with no changes.

View File

@ -118,7 +118,7 @@ func (s *SelectionPredicate) MatchesObjectAttributes(l labels.Set, f fields.Set)
// MatchesSingleNamespace will return (namespace, true) if and only if s.Field matches on the object's
// namespace.
func (s *SelectionPredicate) MatchesSingleNamespace() (string, bool) {
if len(s.Continue) > 0 {
if len(s.Continue) > 0 || s.Field == nil {
return "", false
}
if namespace, ok := s.Field.RequiresExactMatch("metadata.namespace"); ok {
@ -130,7 +130,7 @@ func (s *SelectionPredicate) MatchesSingleNamespace() (string, bool) {
// MatchesSingle will return (name, true) if and only if s.Field matches on the object's
// name.
func (s *SelectionPredicate) MatchesSingle() (string, bool) {
if len(s.Continue) > 0 {
if len(s.Continue) > 0 || s.Field == nil {
return "", false
}
// TODO: should be namespace.name

View File

@ -20,6 +20,7 @@ import (
"time"
oteltrace "go.opentelemetry.io/otel/trace"
noopoteltrace "go.opentelemetry.io/otel/trace/noop"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -117,6 +118,6 @@ func NewDefaultConfig(prefix string, codec runtime.Codec) *Config {
HealthcheckTimeout: DefaultHealthcheckTimeout,
ReadycheckTimeout: DefaultReadinessTimeout,
LeaseManagerConfig: etcd3.NewDefaultLeaseManagerConfig(),
Transport: TransportConfig{TracerProvider: oteltrace.NewNoopTracerProvider()},
Transport: TransportConfig{TracerProvider: noopoteltrace.NewTracerProvider()},
}
}

View File

@ -317,6 +317,7 @@ var newETCD3Client = func(c storagebackend.TransportConfig) (*clientv3.Client, e
}
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerTracing) {
tracingOpts := []otelgrpc.Option{
otelgrpc.WithMessageEvents(otelgrpc.ReceivedEvents, otelgrpc.SentEvents),
otelgrpc.WithPropagators(tracing.Propagators()),
otelgrpc.WithTracerProvider(c.TracerProvider),
}

View File

@ -24,18 +24,12 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/validation/path"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
)
const (
// initialEventsAnnotationKey the name of the key
// under which an annotation marking the end of list stream
// is kept.
initialEventsAnnotationKey = "k8s.io/initial-events-end"
)
type SimpleUpdateFunc func(runtime.Object) (runtime.Object, error)
// SimpleUpdateFunc converts SimpleUpdateFunc into UpdateFunc
@ -46,10 +40,6 @@ func SimpleUpdate(fn SimpleUpdateFunc) UpdateFunc {
}
}
func EverythingFunc(runtime.Object) bool {
return true
}
func NamespaceKeyFunc(prefix string, obj runtime.Object) (string, error) {
meta, err := meta.Accessor(obj)
if err != nil {
@ -144,7 +134,7 @@ func AnnotateInitialEventsEndBookmark(obj runtime.Object) error {
if objAnnotations == nil {
objAnnotations = map[string]string{}
}
objAnnotations[initialEventsAnnotationKey] = "true"
objAnnotations[metav1.InitialEventsAnnotationKey] = "true"
objMeta.SetAnnotations(objAnnotations)
return nil
}
@ -157,5 +147,5 @@ func HasInitialEventsEndBookmarkAnnotation(obj runtime.Object) (bool, error) {
return false, err
}
objAnnotations := objMeta.GetAnnotations()
return objAnnotations[initialEventsAnnotationKey] == "true", nil
return objAnnotations[metav1.InitialEventsAnnotationKey] == "true", nil
}

View File

@ -24,8 +24,8 @@ var (
// DefaultMutableFeatureGate is a mutable version of DefaultFeatureGate.
// Only top-level commands/options setup and the k8s.io/component-base/featuregate/testing package should make use of this.
// Tests that need to modify feature gates for the duration of their test should use:
// defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.<FeatureName>, <value>)()
DefaultMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate()
// featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.<FeatureName>, <value>)
DefaultMutableFeatureGate featuregate.MutableVersionedFeatureGate = featuregate.NewFeatureGate()
// DefaultFeatureGate is a shared global FeatureGate.
// Top-level commands/options setup that needs to modify this feature gate should use DefaultMutableFeatureGate.

View File

@ -135,7 +135,7 @@ type configController struct {
// configQueue holds `(interface{})(0)` when the configuration
// objects need to be reprocessed.
configQueue workqueue.RateLimitingInterface
configQueue workqueue.TypedRateLimitingInterface[int]
plLister flowcontrollister.PriorityLevelConfigurationLister
plInformerSynced cache.InformerSynced
@ -204,7 +204,7 @@ type priorityLevelState struct {
// reached through this pointer is mutable.
pl *flowcontrol.PriorityLevelConfiguration
// qsCompleter holds the QueueSetCompleter derived from `config`
// qsCompleter holds the QueueSetCompleter derived from `pl`
// and `queues`.
qsCompleter fq.QueueSetCompleter
@ -255,12 +255,12 @@ type priorityLevelState struct {
type seatDemandStats struct {
avg float64
stdDev float64
highWatermark float64
highWatermark int
smoothed float64
}
func (stats *seatDemandStats) update(obs fq.IntegratorResults) {
stats.highWatermark = obs.Max
stats.highWatermark = int(math.Round(obs.Max))
if obs.Duration <= 0 {
return
}
@ -292,7 +292,10 @@ func newTestableController(config TestableConfig) *configController {
klog.V(2).Infof("NewTestableController %q with serverConcurrencyLimit=%d, name=%s, asFieldManager=%q", cfgCtlr.name, cfgCtlr.serverConcurrencyLimit, cfgCtlr.name, cfgCtlr.asFieldManager)
// Start with longish delay because conflicts will be between
// different processes, so take some time to go away.
cfgCtlr.configQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 8*time.Hour), "priority_and_fairness_config_queue")
cfgCtlr.configQueue = workqueue.NewTypedRateLimitingQueueWithConfig(
workqueue.NewTypedItemExponentialFailureRateLimiter[int](200*time.Millisecond, 8*time.Hour),
workqueue.TypedRateLimitingQueueConfig[int]{Name: "priority_and_fairness_config_queue"},
)
// ensure the data structure reflects the mandatory config
cfgCtlr.lockAndDigestConfigObjects(nil, nil)
fci := config.InformerFactory.Flowcontrol().V1()
@ -395,38 +398,63 @@ func (cfgCtlr *configController) updateBorrowing() {
func (cfgCtlr *configController) updateBorrowingLocked(setCompleters bool, plStates map[string]*priorityLevelState) {
items := make([]allocProblemItem, 0, len(plStates))
plNames := make([]string, 0, len(plStates))
nonExemptPLNames := make([]string, 0, len(plStates))
idxOfNonExempt := map[string]int{} // items index of non-exempt classes
cclOfExempt := map[string]int{} // minCurrentCL of exempt classes
var minCLSum, minCurrentCLSum int // sums over non-exempt classes
remainingServerCL := cfgCtlr.nominalCLSum
for plName, plState := range plStates {
obs := plState.seatDemandIntegrator.Reset()
plState.seatDemandStats.update(obs)
// Lower bound on this priority level's adjusted concurreny limit is the lesser of:
// - its seat demamd high watermark over the last adjustment period, and
// - its configured concurrency limit.
// BUT: we do not want this to be lower than the lower bound from configuration.
// See KEP-1040 for a more detailed explanation.
minCurrentCL := math.Max(float64(plState.minCL), math.Min(float64(plState.nominalCL), plState.seatDemandStats.highWatermark))
plNames = append(plNames, plName)
items = append(items, allocProblemItem{
lowerBound: minCurrentCL,
upperBound: float64(plState.maxCL),
target: math.Max(minCurrentCL, plState.seatDemandStats.smoothed),
})
var minCurrentCL int
if plState.pl.Spec.Type == flowcontrol.PriorityLevelEnablementExempt {
minCurrentCL = max(plState.minCL, plState.seatDemandStats.highWatermark)
cclOfExempt[plName] = minCurrentCL
remainingServerCL -= minCurrentCL
} else {
// Lower bound on this priority level's adjusted concurreny limit is the lesser of:
// - its seat demamd high watermark over the last adjustment period, and
// - its configured concurrency limit.
// BUT: we do not want this to be lower than the lower bound from configuration.
// See KEP-1040 for a more detailed explanation.
minCurrentCL = max(plState.minCL, min(plState.nominalCL, plState.seatDemandStats.highWatermark))
idxOfNonExempt[plName] = len(items)
nonExemptPLNames = append(nonExemptPLNames, plName)
items = append(items, allocProblemItem{
lowerBound: float64(minCurrentCL),
upperBound: float64(plState.maxCL),
target: math.Max(float64(minCurrentCL), plState.seatDemandStats.smoothed),
})
minCLSum += plState.minCL
minCurrentCLSum += minCurrentCL
}
}
if len(items) == 0 && cfgCtlr.nominalCLSum > 0 {
klog.ErrorS(nil, "Impossible: no priority levels", "plStates", cfgCtlr.priorityLevelStates)
return
}
allocs, fairFrac, err := computeConcurrencyAllocation(cfgCtlr.nominalCLSum, items)
if err != nil {
klog.ErrorS(err, "Unable to derive new concurrency limits", "plNames", plNames, "items", items)
allocs = make([]float64, len(items))
for idx, plName := range plNames {
plState := plStates[plName]
allocs[idx] = float64(plState.currentCL)
var allocs []float64
var shareFrac, fairFrac float64
var err error
if remainingServerCL <= minCLSum {
metrics.SetFairFrac(0)
} else if remainingServerCL <= minCurrentCLSum {
shareFrac = float64(remainingServerCL-minCLSum) / float64(minCurrentCLSum-minCLSum)
metrics.SetFairFrac(0)
} else {
allocs, fairFrac, err = computeConcurrencyAllocation(cfgCtlr.nominalCLSum, items)
if err != nil {
klog.ErrorS(err, "Unable to derive new concurrency limits", "plNames", nonExemptPLNames, "items", items)
allocs = make([]float64, len(items))
for idx, plName := range nonExemptPLNames {
plState := plStates[plName]
allocs[idx] = float64(plState.currentCL)
}
}
metrics.SetFairFrac(float64(fairFrac))
}
for idx, plName := range plNames {
plState := plStates[plName]
for plName, plState := range plStates {
idx, isNonExempt := idxOfNonExempt[plName]
if setCompleters {
qsCompleter, err := queueSetCompleterForPL(cfgCtlr.queueSetFactory, plState.queues,
plState.pl, plState.reqsGaugePair, plState.execSeatsObs,
@ -437,10 +465,20 @@ func (cfgCtlr *configController) updateBorrowingLocked(setCompleters bool, plSta
}
plState.qsCompleter = qsCompleter
}
currentCL := int(math.Round(float64(allocs[idx])))
var currentCL int
if !isNonExempt {
currentCL = cclOfExempt[plName]
} else if remainingServerCL <= minCLSum {
currentCL = plState.minCL
} else if remainingServerCL <= minCurrentCLSum {
minCurrentCL := max(plState.minCL, min(plState.nominalCL, plState.seatDemandStats.highWatermark))
currentCL = plState.minCL + int(math.Round(float64(minCurrentCL-plState.minCL)*shareFrac))
} else {
currentCL = int(math.Round(float64(allocs[idx])))
}
relChange := relDiff(float64(currentCL), float64(plState.currentCL))
plState.currentCL = currentCL
metrics.NotePriorityLevelConcurrencyAdjustment(plState.pl.Name, plState.seatDemandStats.highWatermark, plState.seatDemandStats.avg, plState.seatDemandStats.stdDev, plState.seatDemandStats.smoothed, float64(items[idx].target), currentCL)
metrics.NotePriorityLevelConcurrencyAdjustment(plState.pl.Name, float64(plState.seatDemandStats.highWatermark), plState.seatDemandStats.avg, plState.seatDemandStats.stdDev, plState.seatDemandStats.smoothed, float64(items[idx].target), currentCL)
logLevel := klog.Level(4)
if relChange >= 0.05 {
logLevel = 2
@ -455,7 +493,6 @@ func (cfgCtlr *configController) updateBorrowingLocked(setCompleters bool, plSta
klog.V(logLevel).InfoS("Update CurrentCL", "plName", plName, "seatDemandHighWatermark", plState.seatDemandStats.highWatermark, "seatDemandAvg", plState.seatDemandStats.avg, "seatDemandStdev", plState.seatDemandStats.stdDev, "seatDemandSmoothed", plState.seatDemandStats.smoothed, "fairFrac", fairFrac, "currentCL", currentCL, "concurrencyDenominator", concurrencyDenominator, "backstop", err != nil)
plState.queues = plState.qsCompleter.Complete(fq.DispatchingConfig{ConcurrencyLimit: currentCL, ConcurrencyDenominator: concurrencyDenominator})
}
metrics.SetFairFrac(float64(fairFrac))
}
// runWorker is the logic of the one and only worker goroutine. We
@ -474,7 +511,7 @@ func (cfgCtlr *configController) processNextWorkItem() bool {
return false
}
func(obj interface{}) {
func(obj int) {
defer cfgCtlr.configQueue.Done(obj)
specificDelay, err := cfgCtlr.syncOne()
switch {

View File

@ -25,6 +25,8 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage"
etcdfeature "k8s.io/apiserver/pkg/storage/feature"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
)
@ -165,15 +167,16 @@ func shouldListFromStorage(query url.Values, opts *metav1.ListOptions) bool {
resourceVersion := opts.ResourceVersion
match := opts.ResourceVersionMatch
consistentListFromCacheEnabled := utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache)
requestWatchProgressSupported := etcdfeature.DefaultFeatureSupportChecker.Supports(storage.RequestWatchProgress)
// Serve consistent reads from storage if ConsistentListFromCache is disabled
consistentReadFromStorage := resourceVersion == "" && !consistentListFromCacheEnabled
consistentReadFromStorage := resourceVersion == "" && !(consistentListFromCacheEnabled && requestWatchProgressSupported)
// Watch cache doesn't support continuations, so serve them from etcd.
hasContinuation := len(opts.Continue) > 0
// Serve paginated requests about revision "0" from watch cache to avoid overwhelming etcd.
hasLimit := opts.Limit > 0 && resourceVersion != "0"
// Watch cache only supports ResourceVersionMatchNotOlderThan (default).
unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan
// see https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-get-and-list
isLegacyExactMatch := opts.Limit > 0 && match == "" && len(resourceVersion) > 0 && resourceVersion != "0"
unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan || isLegacyExactMatch
return consistentReadFromStorage || hasContinuation || hasLimit || unsupportedMatch
return consistentReadFromStorage || hasContinuation || unsupportedMatch
}

454
vendor/k8s.io/apiserver/pkg/util/version/registry.go generated vendored Normal file
View File

@ -0,0 +1,454 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package version
import (
"fmt"
"sort"
"strings"
"sync"
"github.com/spf13/pflag"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/version"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/featuregate"
"k8s.io/klog/v2"
)
// DefaultComponentGlobalsRegistry is the global var to store the effective versions and feature gates for all components for easy access.
// Example usage:
// // register the component effective version and feature gate first
// _, _ = utilversion.DefaultComponentGlobalsRegistry.ComponentGlobalsOrRegister(utilversion.DefaultKubeComponent, utilversion.DefaultKubeEffectiveVersion(), utilfeature.DefaultMutableFeatureGate)
// wardleEffectiveVersion := utilversion.NewEffectiveVersion("1.2")
// wardleFeatureGate := featuregate.NewFeatureGate()
// utilruntime.Must(utilversion.DefaultComponentGlobalsRegistry.Register(apiserver.WardleComponentName, wardleEffectiveVersion, wardleFeatureGate, false))
//
// cmd := &cobra.Command{
// ...
// // call DefaultComponentGlobalsRegistry.Set() in PersistentPreRunE
// PersistentPreRunE: func(*cobra.Command, []string) error {
// if err := utilversion.DefaultComponentGlobalsRegistry.Set(); err != nil {
// return err
// }
// ...
// },
// RunE: func(c *cobra.Command, args []string) error {
// // call utilversion.DefaultComponentGlobalsRegistry.Validate() somewhere
// },
// }
//
// flags := cmd.Flags()
// // add flags
// utilversion.DefaultComponentGlobalsRegistry.AddFlags(flags)
var DefaultComponentGlobalsRegistry ComponentGlobalsRegistry = NewComponentGlobalsRegistry()
const (
DefaultKubeComponent = "kube"
klogLevel = 2
)
type VersionMapping func(from *version.Version) *version.Version
// ComponentGlobals stores the global variables for a component for easy access.
type ComponentGlobals struct {
effectiveVersion MutableEffectiveVersion
featureGate featuregate.MutableVersionedFeatureGate
// emulationVersionMapping contains the mapping from the emulation version of this component
// to the emulation version of another component.
emulationVersionMapping map[string]VersionMapping
// dependentEmulationVersion stores whether or not this component's EmulationVersion is dependent through mapping on another component.
// If true, the emulation version cannot be set from the flag, or version mapping from another component.
dependentEmulationVersion bool
// minCompatibilityVersionMapping contains the mapping from the min compatibility version of this component
// to the min compatibility version of another component.
minCompatibilityVersionMapping map[string]VersionMapping
// dependentMinCompatibilityVersion stores whether or not this component's MinCompatibilityVersion is dependent through mapping on another component
// If true, the min compatibility version cannot be set from the flag, or version mapping from another component.
dependentMinCompatibilityVersion bool
}
type ComponentGlobalsRegistry interface {
// EffectiveVersionFor returns the EffectiveVersion registered under the component.
// Returns nil if the component is not registered.
EffectiveVersionFor(component string) EffectiveVersion
// FeatureGateFor returns the FeatureGate registered under the component.
// Returns nil if the component is not registered.
FeatureGateFor(component string) featuregate.FeatureGate
// Register registers the EffectiveVersion and FeatureGate for a component.
// returns error if the component is already registered.
Register(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) error
// ComponentGlobalsOrRegister would return the registered global variables for the component if it already exists in the registry.
// Otherwise, the provided variables would be registered under the component, and the same variables would be returned.
ComponentGlobalsOrRegister(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) (MutableEffectiveVersion, featuregate.MutableVersionedFeatureGate)
// AddFlags adds flags of "--emulated-version" and "--feature-gates"
AddFlags(fs *pflag.FlagSet)
// Set sets the flags for all global variables for all components registered.
Set() error
// SetFallback calls Set() if it has never been called.
SetFallback() error
// Validate calls the Validate() function for all the global variables for all components registered.
Validate() []error
// Reset removes all stored ComponentGlobals, configurations, and version mappings.
Reset()
// SetEmulationVersionMapping sets the mapping from the emulation version of one component
// to the emulation version of another component.
// Once set, the emulation version of the toComponent will be determined by the emulation version of the fromComponent,
// and cannot be set from cmd flags anymore.
// For a given component, its emulation version can only depend on one other component, no multiple dependency is allowed.
SetEmulationVersionMapping(fromComponent, toComponent string, f VersionMapping) error
}
type componentGlobalsRegistry struct {
componentGlobals map[string]*ComponentGlobals
mutex sync.RWMutex
// list of component name to emulation version set from the flag.
emulationVersionConfig []string
// map of component name to the list of feature gates set from the flag.
featureGatesConfig map[string][]string
// set stores if the Set() function for the registry is already called.
set bool
}
func NewComponentGlobalsRegistry() *componentGlobalsRegistry {
return &componentGlobalsRegistry{
componentGlobals: make(map[string]*ComponentGlobals),
emulationVersionConfig: nil,
featureGatesConfig: nil,
}
}
func (r *componentGlobalsRegistry) Reset() {
r.mutex.Lock()
defer r.mutex.Unlock()
r.componentGlobals = make(map[string]*ComponentGlobals)
r.emulationVersionConfig = nil
r.featureGatesConfig = nil
r.set = false
}
func (r *componentGlobalsRegistry) EffectiveVersionFor(component string) EffectiveVersion {
r.mutex.RLock()
defer r.mutex.RUnlock()
globals, ok := r.componentGlobals[component]
if !ok {
return nil
}
return globals.effectiveVersion
}
func (r *componentGlobalsRegistry) FeatureGateFor(component string) featuregate.FeatureGate {
r.mutex.RLock()
defer r.mutex.RUnlock()
globals, ok := r.componentGlobals[component]
if !ok {
return nil
}
return globals.featureGate
}
func (r *componentGlobalsRegistry) unsafeRegister(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) error {
if _, ok := r.componentGlobals[component]; ok {
return fmt.Errorf("component globals of %s already registered", component)
}
if featureGate != nil {
if err := featureGate.SetEmulationVersion(effectiveVersion.EmulationVersion()); err != nil {
return err
}
}
c := ComponentGlobals{
effectiveVersion: effectiveVersion,
featureGate: featureGate,
emulationVersionMapping: make(map[string]VersionMapping),
minCompatibilityVersionMapping: make(map[string]VersionMapping),
}
r.componentGlobals[component] = &c
return nil
}
func (r *componentGlobalsRegistry) Register(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) error {
if effectiveVersion == nil {
return fmt.Errorf("cannot register nil effectiveVersion")
}
r.mutex.Lock()
defer r.mutex.Unlock()
return r.unsafeRegister(component, effectiveVersion, featureGate)
}
func (r *componentGlobalsRegistry) ComponentGlobalsOrRegister(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) (MutableEffectiveVersion, featuregate.MutableVersionedFeatureGate) {
r.mutex.Lock()
defer r.mutex.Unlock()
globals, ok := r.componentGlobals[component]
if ok {
return globals.effectiveVersion, globals.featureGate
}
utilruntime.Must(r.unsafeRegister(component, effectiveVersion, featureGate))
return effectiveVersion, featureGate
}
func (r *componentGlobalsRegistry) unsafeKnownFeatures() []string {
var known []string
for component, globals := range r.componentGlobals {
if globals.featureGate == nil {
continue
}
for _, f := range globals.featureGate.KnownFeatures() {
known = append(known, component+":"+f)
}
}
sort.Strings(known)
return known
}
func (r *componentGlobalsRegistry) unsafeVersionFlagOptions(isEmulation bool) []string {
var vs []string
for component, globals := range r.componentGlobals {
binaryVer := globals.effectiveVersion.BinaryVersion()
if isEmulation {
if globals.dependentEmulationVersion {
continue
}
// emulated version could be between binaryMajor.{binaryMinor} and binaryMajor.{binaryMinor}
// TODO: change to binaryMajor.{binaryMinor-1} and binaryMajor.{binaryMinor} in 1.32
vs = append(vs, fmt.Sprintf("%s=%s..%s (default=%s)", component,
binaryVer.SubtractMinor(0).String(), binaryVer.String(), globals.effectiveVersion.EmulationVersion().String()))
} else {
if globals.dependentMinCompatibilityVersion {
continue
}
// min compatibility version could be between binaryMajor.{binaryMinor-1} and binaryMajor.{binaryMinor}
vs = append(vs, fmt.Sprintf("%s=%s..%s (default=%s)", component,
binaryVer.SubtractMinor(1).String(), binaryVer.String(), globals.effectiveVersion.MinCompatibilityVersion().String()))
}
}
sort.Strings(vs)
return vs
}
func (r *componentGlobalsRegistry) AddFlags(fs *pflag.FlagSet) {
if r == nil {
return
}
r.mutex.Lock()
defer r.mutex.Unlock()
for _, globals := range r.componentGlobals {
if globals.featureGate != nil {
globals.featureGate.Close()
}
}
if r.emulationVersionConfig != nil || r.featureGatesConfig != nil {
klog.Warning("calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags")
}
r.emulationVersionConfig = []string{}
r.featureGatesConfig = make(map[string][]string)
fs.StringSliceVar(&r.emulationVersionConfig, "emulated-version", r.emulationVersionConfig, ""+
"The versions different components emulate their capabilities (APIs, features, ...) of.\n"+
"If set, the component will emulate the behavior of this version instead of the underlying binary version.\n"+
"Version format could only be major.minor, for example: '--emulated-version=wardle=1.2,kube=1.31'. Options are:\n"+strings.Join(r.unsafeVersionFlagOptions(true), "\n")+
"If the component is not specified, defaults to \"kube\"")
fs.Var(cliflag.NewColonSeparatedMultimapStringStringAllowDefaultEmptyKey(&r.featureGatesConfig), "feature-gates", "Comma-separated list of component:key=value pairs that describe feature gates for alpha/experimental features of different components.\n"+
"If the component is not specified, defaults to \"kube\". This flag can be repeatedly invoked. For example: --feature-gates 'wardle:featureA=true,wardle:featureB=false' --feature-gates 'kube:featureC=true'"+
"Options are:\n"+strings.Join(r.unsafeKnownFeatures(), "\n"))
}
type componentVersion struct {
component string
ver *version.Version
}
// getFullEmulationVersionConfig expands the given version config with version registered version mapping,
// and returns the map of component to Version.
func (r *componentGlobalsRegistry) getFullEmulationVersionConfig(
versionConfigMap map[string]*version.Version) (map[string]*version.Version, error) {
result := map[string]*version.Version{}
setQueue := []componentVersion{}
for comp, ver := range versionConfigMap {
if _, ok := r.componentGlobals[comp]; !ok {
return result, fmt.Errorf("component not registered: %s", comp)
}
klog.V(klogLevel).Infof("setting version %s=%s", comp, ver.String())
setQueue = append(setQueue, componentVersion{comp, ver})
}
for len(setQueue) > 0 {
cv := setQueue[0]
if _, visited := result[cv.component]; visited {
return result, fmt.Errorf("setting version of %s more than once, probably version mapping loop", cv.component)
}
setQueue = setQueue[1:]
result[cv.component] = cv.ver
for toComp, f := range r.componentGlobals[cv.component].emulationVersionMapping {
toVer := f(cv.ver)
if toVer == nil {
return result, fmt.Errorf("got nil version from mapping of %s=%s to component:%s", cv.component, cv.ver.String(), toComp)
}
klog.V(klogLevel).Infof("setting version %s=%s from version mapping of %s=%s", toComp, toVer.String(), cv.component, cv.ver.String())
setQueue = append(setQueue, componentVersion{toComp, toVer})
}
}
return result, nil
}
func toVersionMap(versionConfig []string) (map[string]*version.Version, error) {
m := map[string]*version.Version{}
for _, compVer := range versionConfig {
// default to "kube" of component is not specified
k := "kube"
v := compVer
if strings.Contains(compVer, "=") {
arr := strings.SplitN(compVer, "=", 2)
if len(arr) != 2 {
return m, fmt.Errorf("malformed pair, expect string=string")
}
k = strings.TrimSpace(arr[0])
v = strings.TrimSpace(arr[1])
}
ver, err := version.Parse(v)
if err != nil {
return m, err
}
if ver.Patch() != 0 {
return m, fmt.Errorf("patch version not allowed, got: %s=%s", k, ver.String())
}
if existingVer, ok := m[k]; ok {
return m, fmt.Errorf("duplicate version flag, %s=%s and %s=%s", k, existingVer.String(), k, ver.String())
}
m[k] = ver
}
return m, nil
}
func (r *componentGlobalsRegistry) SetFallback() error {
r.mutex.Lock()
set := r.set
r.mutex.Unlock()
if set {
return nil
}
klog.Warning("setting componentGlobalsRegistry in SetFallback. We recommend calling componentGlobalsRegistry.Set()" +
" right after parsing flags to avoid using feature gates before their final values are set by the flags.")
return r.Set()
}
func (r *componentGlobalsRegistry) Set() error {
r.mutex.Lock()
defer r.mutex.Unlock()
r.set = true
emulationVersionConfigMap, err := toVersionMap(r.emulationVersionConfig)
if err != nil {
return err
}
for comp := range emulationVersionConfigMap {
if _, ok := r.componentGlobals[comp]; !ok {
return fmt.Errorf("component not registered: %s", comp)
}
// only components without any dependencies can be set from the flag.
if r.componentGlobals[comp].dependentEmulationVersion {
return fmt.Errorf("EmulationVersion of %s is set by mapping, cannot set it by flag", comp)
}
}
if emulationVersions, err := r.getFullEmulationVersionConfig(emulationVersionConfigMap); err != nil {
return err
} else {
for comp, ver := range emulationVersions {
r.componentGlobals[comp].effectiveVersion.SetEmulationVersion(ver)
}
}
// Set feature gate emulation version before setting feature gate flag values.
for comp, globals := range r.componentGlobals {
if globals.featureGate == nil {
continue
}
klog.V(klogLevel).Infof("setting %s:feature gate emulation version to %s", comp, globals.effectiveVersion.EmulationVersion().String())
if err := globals.featureGate.SetEmulationVersion(globals.effectiveVersion.EmulationVersion()); err != nil {
return err
}
}
for comp, fg := range r.featureGatesConfig {
if comp == "" {
if _, ok := r.featureGatesConfig[DefaultKubeComponent]; ok {
return fmt.Errorf("set kube feature gates with default empty prefix or kube: prefix consistently, do not mix use")
}
comp = DefaultKubeComponent
}
if _, ok := r.componentGlobals[comp]; !ok {
return fmt.Errorf("component not registered: %s", comp)
}
featureGate := r.componentGlobals[comp].featureGate
if featureGate == nil {
return fmt.Errorf("component featureGate not registered: %s", comp)
}
flagVal := strings.Join(fg, ",")
klog.V(klogLevel).Infof("setting %s:feature-gates=%s", comp, flagVal)
if err := featureGate.Set(flagVal); err != nil {
return err
}
}
return nil
}
func (r *componentGlobalsRegistry) Validate() []error {
var errs []error
r.mutex.Lock()
defer r.mutex.Unlock()
for _, globals := range r.componentGlobals {
errs = append(errs, globals.effectiveVersion.Validate()...)
if globals.featureGate != nil {
errs = append(errs, globals.featureGate.Validate()...)
}
}
return errs
}
func (r *componentGlobalsRegistry) SetEmulationVersionMapping(fromComponent, toComponent string, f VersionMapping) error {
if f == nil {
return nil
}
klog.V(klogLevel).Infof("setting EmulationVersion mapping from %s to %s", fromComponent, toComponent)
r.mutex.Lock()
defer r.mutex.Unlock()
if _, ok := r.componentGlobals[fromComponent]; !ok {
return fmt.Errorf("component not registered: %s", fromComponent)
}
if _, ok := r.componentGlobals[toComponent]; !ok {
return fmt.Errorf("component not registered: %s", toComponent)
}
// check multiple dependency
if r.componentGlobals[toComponent].dependentEmulationVersion {
return fmt.Errorf("mapping of %s already exists from another component", toComponent)
}
r.componentGlobals[toComponent].dependentEmulationVersion = true
versionMapping := r.componentGlobals[fromComponent].emulationVersionMapping
if _, ok := versionMapping[toComponent]; ok {
return fmt.Errorf("EmulationVersion from %s to %s already exists", fromComponent, toComponent)
}
versionMapping[toComponent] = f
klog.V(klogLevel).Infof("setting the default EmulationVersion of %s based on mapping from the default EmulationVersion of %s", fromComponent, toComponent)
defaultFromVersion := r.componentGlobals[fromComponent].effectiveVersion.EmulationVersion()
emulationVersions, err := r.getFullEmulationVersionConfig(map[string]*version.Version{fromComponent: defaultFromVersion})
if err != nil {
return err
}
for comp, ver := range emulationVersions {
r.componentGlobals[comp].effectiveVersion.SetEmulationVersion(ver)
}
return nil
}

169
vendor/k8s.io/apiserver/pkg/util/version/version.go generated vendored Normal file
View File

@ -0,0 +1,169 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package version
import (
"fmt"
"sync/atomic"
"k8s.io/apimachinery/pkg/util/version"
baseversion "k8s.io/component-base/version"
)
type EffectiveVersion interface {
BinaryVersion() *version.Version
EmulationVersion() *version.Version
MinCompatibilityVersion() *version.Version
EqualTo(other EffectiveVersion) bool
String() string
Validate() []error
}
type MutableEffectiveVersion interface {
EffectiveVersion
Set(binaryVersion, emulationVersion, minCompatibilityVersion *version.Version)
SetEmulationVersion(emulationVersion *version.Version)
SetMinCompatibilityVersion(minCompatibilityVersion *version.Version)
}
type effectiveVersion struct {
binaryVersion atomic.Pointer[version.Version]
// If the emulationVersion is set by the users, it could only contain major and minor versions.
// In tests, emulationVersion could be the same as the binary version, or set directly,
// which can have "alpha" as pre-release to continue serving expired apis while we clean up the test.
emulationVersion atomic.Pointer[version.Version]
// minCompatibilityVersion could only contain major and minor versions.
minCompatibilityVersion atomic.Pointer[version.Version]
}
func (m *effectiveVersion) BinaryVersion() *version.Version {
return m.binaryVersion.Load()
}
func (m *effectiveVersion) EmulationVersion() *version.Version {
ver := m.emulationVersion.Load()
if ver != nil {
// Emulation version can have "alpha" as pre-release to continue serving expired apis while we clean up the test.
// The pre-release should not be accessible to the users.
return ver.WithPreRelease(m.BinaryVersion().PreRelease())
}
return ver
}
func (m *effectiveVersion) MinCompatibilityVersion() *version.Version {
return m.minCompatibilityVersion.Load()
}
func (m *effectiveVersion) EqualTo(other EffectiveVersion) bool {
return m.BinaryVersion().EqualTo(other.BinaryVersion()) && m.EmulationVersion().EqualTo(other.EmulationVersion()) && m.MinCompatibilityVersion().EqualTo(other.MinCompatibilityVersion())
}
func (m *effectiveVersion) String() string {
if m == nil {
return "<nil>"
}
return fmt.Sprintf("{BinaryVersion: %s, EmulationVersion: %s, MinCompatibilityVersion: %s}",
m.BinaryVersion().String(), m.EmulationVersion().String(), m.MinCompatibilityVersion().String())
}
func majorMinor(ver *version.Version) *version.Version {
if ver == nil {
return ver
}
return version.MajorMinor(ver.Major(), ver.Minor())
}
func (m *effectiveVersion) Set(binaryVersion, emulationVersion, minCompatibilityVersion *version.Version) {
m.binaryVersion.Store(binaryVersion)
m.emulationVersion.Store(majorMinor(emulationVersion))
m.minCompatibilityVersion.Store(majorMinor(minCompatibilityVersion))
}
func (m *effectiveVersion) SetEmulationVersion(emulationVersion *version.Version) {
m.emulationVersion.Store(majorMinor(emulationVersion))
}
func (m *effectiveVersion) SetMinCompatibilityVersion(minCompatibilityVersion *version.Version) {
m.minCompatibilityVersion.Store(majorMinor(minCompatibilityVersion))
}
func (m *effectiveVersion) Validate() []error {
var errs []error
// Validate only checks the major and minor versions.
binaryVersion := m.binaryVersion.Load().WithPatch(0)
emulationVersion := m.emulationVersion.Load()
minCompatibilityVersion := m.minCompatibilityVersion.Load()
// emulationVersion can only be 1.{binaryMinor-1}...1.{binaryMinor}.
maxEmuVer := binaryVersion
minEmuVer := binaryVersion.SubtractMinor(1)
if emulationVersion.GreaterThan(maxEmuVer) || emulationVersion.LessThan(minEmuVer) {
errs = append(errs, fmt.Errorf("emulation version %s is not between [%s, %s]", emulationVersion.String(), minEmuVer.String(), maxEmuVer.String()))
}
// minCompatibilityVersion can only be 1.{binaryMinor-1} for alpha.
maxCompVer := binaryVersion.SubtractMinor(1)
minCompVer := binaryVersion.SubtractMinor(1)
if minCompatibilityVersion.GreaterThan(maxCompVer) || minCompatibilityVersion.LessThan(minCompVer) {
errs = append(errs, fmt.Errorf("minCompatibilityVersion version %s is not between [%s, %s]", minCompatibilityVersion.String(), minCompVer.String(), maxCompVer.String()))
}
return errs
}
func newEffectiveVersion(binaryVersion *version.Version) MutableEffectiveVersion {
effective := &effectiveVersion{}
compatVersion := binaryVersion.SubtractMinor(1)
effective.Set(binaryVersion, binaryVersion, compatVersion)
return effective
}
func NewEffectiveVersion(binaryVer string) MutableEffectiveVersion {
if binaryVer == "" {
return &effectiveVersion{}
}
binaryVersion := version.MustParse(binaryVer)
return newEffectiveVersion(binaryVersion)
}
// DefaultBuildEffectiveVersion returns the MutableEffectiveVersion based on the
// current build information.
func DefaultBuildEffectiveVersion() MutableEffectiveVersion {
verInfo := baseversion.Get()
binaryVersion := version.MustParse(verInfo.String()).WithInfo(verInfo)
if binaryVersion.Major() == 0 && binaryVersion.Minor() == 0 {
return DefaultKubeEffectiveVersion()
}
return newEffectiveVersion(binaryVersion)
}
// DefaultKubeEffectiveVersion returns the MutableEffectiveVersion based on the
// latest K8s release.
func DefaultKubeEffectiveVersion() MutableEffectiveVersion {
binaryVersion := version.MustParse(baseversion.DefaultKubeBinaryVersion).WithInfo(baseversion.Get())
return newEffectiveVersion(binaryVersion)
}
// ValidateKubeEffectiveVersion validates the EmulationVersion is equal to the binary version at 1.31 for kube components.
// TODO: remove in 1.32
// emulationVersion is introduced in 1.31, so it is only allowed to be equal to the binary version at 1.31.
func ValidateKubeEffectiveVersion(effectiveVersion EffectiveVersion) error {
binaryVersion := version.MajorMinor(effectiveVersion.BinaryVersion().Major(), effectiveVersion.BinaryVersion().Minor())
if binaryVersion.EqualTo(version.MajorMinor(1, 31)) && !effectiveVersion.EmulationVersion().EqualTo(binaryVersion) {
return fmt.Errorf("emulation version needs to be equal to binary version(%s) in compatibility-version alpha, got %s",
binaryVersion.String(), effectiveVersion.EmulationVersion().String())
}
return nil
}

View File

@ -23,8 +23,18 @@ import (
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/transport"
)
func ValidateCABundle(fldPath *field.Path, caBundle []byte) field.ErrorList {
var allErrors field.ErrorList
_, err := transport.TLSConfigFor(&transport.Config{TLS: transport.TLSConfig{CAData: caBundle}})
if err != nil {
allErrors = append(allErrors, field.Invalid(fldPath, caBundle, err.Error()))
}
return allErrors
}
// ValidateWebhookURL validates webhook's URL.
func ValidateWebhookURL(fldPath *field.Path, URL string, forceHttps bool) field.ErrorList {
var allErrors field.ErrorList