rebase: update kubernetes to 1.28.0 in main

updating kubernetes to 1.28.0
in the main repo.

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2023-08-17 07:15:28 +02:00
committed by mergify[bot]
parent b2fdc269c3
commit ff3e84ad67
706 changed files with 45252 additions and 16346 deletions

View File

@ -65,6 +65,7 @@ import (
"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/apiserver/pkg/server/routes"
serverstore "k8s.io/apiserver/pkg/server/storage"
storagevalue "k8s.io/apiserver/pkg/storage/value"
"k8s.io/apiserver/pkg/storageversion"
utilfeature "k8s.io/apiserver/pkg/util/feature"
utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol"
@ -85,6 +86,13 @@ import (
_ "k8s.io/apiserver/pkg/apis/apiserver/install"
)
// hostnameFunc is a function to set the hostnameFunc of this apiserver.
// To be used for testing purpose only, to simulate scenarios where multiple apiservers
// exist. In such cases we want to ensure unique apiserver IDs which are a hash of hostnameFunc.
var (
hostnameFunc = os.Hostname
)
const (
// DefaultLegacyAPIPrefix is where the legacy APIs will be located.
DefaultLegacyAPIPrefix = "/api"
@ -190,6 +198,8 @@ type Config struct {
// SkipOpenAPIInstallation avoids installing the OpenAPI handler if set to true.
SkipOpenAPIInstallation bool
// ResourceTransformers are used to transform resources from and to etcd, e.g. encryption.
ResourceTransformers storagevalue.ResourceTransformers
// RESTOptionsGetter is used to construct RESTStorage types via the generic registry.
RESTOptionsGetter genericregistry.RESTOptionsGetter
@ -364,7 +374,7 @@ func NewConfig(codecs serializer.CodecFactory) *Config {
defaultHealthChecks := []healthz.HealthChecker{healthz.PingHealthz, healthz.LogHealthz}
var id string
if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIServerIdentity) {
hostname, err := os.Hostname()
hostname, err := hostnameFunc()
if err != nil {
klog.Fatalf("error getting hostname for apiserver identity: %v", err)
}
@ -894,14 +904,16 @@ func BuildHandlerChainWithStorageVersionPrecondition(apiHandler http.Handler, c
}
func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler {
handler := filterlatency.TrackCompleted(apiHandler)
handler := apiHandler
handler = filterlatency.TrackCompleted(handler)
handler = genericapifilters.WithAuthorization(handler, c.Authorization.Authorizer, c.Serializer)
handler = filterlatency.TrackStarted(handler, c.TracerProvider, "authorization")
if c.FlowControl != nil {
workEstimatorCfg := flowcontrolrequest.DefaultWorkEstimatorConfig()
requestWorkEstimator := flowcontrolrequest.NewWorkEstimator(
c.StorageObjectCountTracker.Get, c.FlowControl.GetInterestedWatchCount, workEstimatorCfg)
c.StorageObjectCountTracker.Get, c.FlowControl.GetInterestedWatchCount, workEstimatorCfg, c.FlowControl.GetMaxSeats)
handler = filterlatency.TrackCompleted(handler)
handler = genericfilters.WithPriorityAndFairness(handler, c.LongRunningFunc, c.FlowControl, requestWorkEstimator)
handler = filterlatency.TrackStarted(handler, c.TracerProvider, "priorityandfairness")
@ -1067,3 +1079,12 @@ func AuthorizeClientBearerToken(loopback *restclient.Config, authn *Authenticati
tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens, authn.APIAudiences)
authn.Authenticator = authenticatorunion.New(tokenAuthenticator, authn.Authenticator)
}
// For testing purpose only
func SetHostnameFuncForTests(name string) {
hostnameFunc = func() (host string, err error) {
host = name
err = nil
return
}
}

View File

@ -34,7 +34,6 @@ import (
const (
// Constant for the retry-after interval on rate limiting.
// TODO: maybe make this dynamic? or user-adjustable?
retryAfter = "1"
// How often inflight usage metric should be updated. Because
@ -210,7 +209,7 @@ func WithMaxInFlightLimit(
// We need to split this data between buckets used for throttling.
metrics.RecordDroppedRequest(r, requestInfo, metrics.APIServerComponent, isMutatingRequest)
metrics.RecordRequestTermination(r, requestInfo, metrics.APIServerComponent, http.StatusTooManyRequests)
tooManyRequests(r, w)
tooManyRequests(r, w, retryAfter)
}
}
})
@ -221,9 +220,3 @@ func WithMaxInFlightLimit(
func StartMaxInFlightWatermarkMaintenance(stopCh <-chan struct{}) {
startWatermarkMaintenance(watermark, stopCh)
}
func tooManyRequests(req *http.Request, w http.ResponseWriter) {
// Return a 429 status indicating "Too Many Requests"
w.Header().Set("Retry-After", retryAfter)
http.Error(w, "Too many requests, please try again later.", http.StatusTooManyRequests)
}

View File

@ -21,6 +21,7 @@ import (
"fmt"
"net/http"
"runtime"
"strconv"
"sync"
"sync/atomic"
"time"
@ -67,6 +68,240 @@ func truncateLogField(s string) string {
var initAPFOnce sync.Once
type priorityAndFairnessHandler struct {
handler http.Handler
longRunningRequestCheck apirequest.LongRunningRequestCheck
fcIfc utilflowcontrol.Interface
workEstimator flowcontrolrequest.WorkEstimatorFunc
// droppedRequests tracks the history of dropped requests for
// the purpose of computing RetryAfter header to avoid system
// overload.
droppedRequests utilflowcontrol.DroppedRequestsTracker
}
func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
if !ok {
handleError(w, r, fmt.Errorf("no RequestInfo found in context"))
return
}
user, ok := apirequest.UserFrom(ctx)
if !ok {
handleError(w, r, fmt.Errorf("no User found in context"))
return
}
isWatchRequest := watchVerbs.Has(requestInfo.Verb)
// Skip tracking long running non-watch requests.
if h.longRunningRequestCheck != nil && h.longRunningRequestCheck(r, requestInfo) && !isWatchRequest {
klog.V(6).Infof("Serving RequestInfo=%#+v, user.Info=%#+v as longrunning\n", requestInfo, user)
h.handler.ServeHTTP(w, r)
return
}
var classification *PriorityAndFairnessClassification
noteFn := func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration, flowDistinguisher string) {
classification = &PriorityAndFairnessClassification{
FlowSchemaName: fs.Name,
FlowSchemaUID: fs.UID,
PriorityLevelName: pl.Name,
PriorityLevelUID: pl.UID,
}
httplog.AddKeyValue(ctx, "apf_pl", truncateLogField(pl.Name))
httplog.AddKeyValue(ctx, "apf_fs", truncateLogField(fs.Name))
}
// estimateWork is called, if at all, after noteFn
estimateWork := func() flowcontrolrequest.WorkEstimate {
if classification == nil {
// workEstimator is being invoked before classification of
// the request has completed, we should never be here though.
klog.ErrorS(fmt.Errorf("workEstimator is being invoked before classification of the request has completed"),
"Using empty FlowSchema and PriorityLevelConfiguration name", "verb", r.Method, "URI", r.RequestURI)
return h.workEstimator(r, "", "")
}
workEstimate := h.workEstimator(r, classification.FlowSchemaName, classification.PriorityLevelName)
fcmetrics.ObserveWorkEstimatedSeats(classification.PriorityLevelName, classification.FlowSchemaName, workEstimate.MaxSeats())
httplog.AddKeyValue(ctx, "apf_iseats", workEstimate.InitialSeats)
httplog.AddKeyValue(ctx, "apf_fseats", workEstimate.FinalSeats)
httplog.AddKeyValue(ctx, "apf_additionalLatency", workEstimate.AdditionalLatency)
return workEstimate
}
var served bool
isMutatingRequest := !nonMutatingRequestVerbs.Has(requestInfo.Verb)
noteExecutingDelta := func(delta int32) {
if isMutatingRequest {
watermark.recordMutating(int(atomic.AddInt32(&atomicMutatingExecuting, delta)))
} else {
watermark.recordReadOnly(int(atomic.AddInt32(&atomicReadOnlyExecuting, delta)))
}
}
noteWaitingDelta := func(delta int32) {
if isMutatingRequest {
waitingMark.recordMutating(int(atomic.AddInt32(&atomicMutatingWaiting, delta)))
} else {
waitingMark.recordReadOnly(int(atomic.AddInt32(&atomicReadOnlyWaiting, delta)))
}
}
queueNote := func(inQueue bool) {
if inQueue {
noteWaitingDelta(1)
} else {
noteWaitingDelta(-1)
}
}
digest := utilflowcontrol.RequestDigest{
RequestInfo: requestInfo,
User: user,
}
if isWatchRequest {
// This channel blocks calling handler.ServeHTTP() until closed, and is closed inside execute().
// If APF rejects the request, it is never closed.
shouldStartWatchCh := make(chan struct{})
watchInitializationSignal := newInitializationSignal()
// This wraps the request passed to handler.ServeHTTP(),
// setting a context that plumbs watchInitializationSignal to storage
var watchReq *http.Request
// This is set inside execute(), prior to closing shouldStartWatchCh.
// If the request is rejected by APF it is left nil.
var forgetWatch utilflowcontrol.ForgetWatchFunc
defer func() {
// Protect from the situation when request will not reach storage layer
// and the initialization signal will not be send.
if watchInitializationSignal != nil {
watchInitializationSignal.Signal()
}
// Forget the watcher if it was registered.
//
// This is race-free because by this point, one of the following occurred:
// case <-shouldStartWatchCh: execute() completed the assignment to forgetWatch
// case <-resultCh: Handle() completed, and Handle() does not return
// while execute() is running
if forgetWatch != nil {
forgetWatch()
}
}()
execute := func() {
startedAt := time.Now()
defer func() {
httplog.AddKeyValue(ctx, "apf_init_latency", time.Since(startedAt))
}()
noteExecutingDelta(1)
defer noteExecutingDelta(-1)
served = true
setResponseHeaders(classification, w)
forgetWatch = h.fcIfc.RegisterWatch(r)
// Notify the main thread that we're ready to start the watch.
close(shouldStartWatchCh)
// Wait until the request is finished from the APF point of view
// (which is when its initialization is done).
watchInitializationSignal.Wait()
}
// Ensure that an item can be put to resultCh asynchronously.
resultCh := make(chan interface{}, 1)
// Call Handle in a separate goroutine.
// The reason for it is that from APF point of view, the request processing
// finishes as soon as watch is initialized (which is generally orders of
// magnitude faster then the watch request itself). This means that Handle()
// call finishes much faster and for performance reasons we want to reduce
// the number of running goroutines - so we run the shorter thing in a
// dedicated goroutine and the actual watch handler in the main one.
go func() {
defer func() {
err := recover()
// do not wrap the sentinel ErrAbortHandler panic value
if err != nil && err != http.ErrAbortHandler {
// Same as stdlib http server code. Manually allocate stack
// trace buffer size to prevent excessively large logs
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
err = fmt.Sprintf("%v\n%s", err, buf)
}
// Ensure that the result is put into resultCh independently of the panic.
resultCh <- err
}()
// We create handleCtx with explicit cancelation function.
// The reason for it is that Handle() underneath may start additional goroutine
// that is blocked on context cancellation. However, from APF point of view,
// we don't want to wait until the whole watch request is processed (which is
// when it context is actually cancelled) - we want to unblock the goroutine as
// soon as the request is processed from the APF point of view.
//
// Note that we explicitly do NOT call the actuall handler using that context
// to avoid cancelling request too early.
handleCtx, handleCtxCancel := context.WithCancel(ctx)
defer handleCtxCancel()
// Note that Handle will return irrespective of whether the request
// executes or is rejected. In the latter case, the function will return
// without calling the passed `execute` function.
h.fcIfc.Handle(handleCtx, digest, noteFn, estimateWork, queueNote, execute)
}()
select {
case <-shouldStartWatchCh:
watchCtx := utilflowcontrol.WithInitializationSignal(ctx, watchInitializationSignal)
watchReq = r.WithContext(watchCtx)
h.handler.ServeHTTP(w, watchReq)
// Protect from the situation when request will not reach storage layer
// and the initialization signal will not be send.
// It has to happen before waiting on the resultCh below.
watchInitializationSignal.Signal()
// TODO: Consider finishing the request as soon as Handle call panics.
if err := <-resultCh; err != nil {
panic(err)
}
case err := <-resultCh:
if err != nil {
panic(err)
}
}
} else {
execute := func() {
noteExecutingDelta(1)
defer noteExecutingDelta(-1)
served = true
setResponseHeaders(classification, w)
h.handler.ServeHTTP(w, r)
}
h.fcIfc.Handle(ctx, digest, noteFn, estimateWork, queueNote, execute)
}
if !served {
setResponseHeaders(classification, w)
epmetrics.RecordDroppedRequest(r, requestInfo, epmetrics.APIServerComponent, isMutatingRequest)
epmetrics.RecordRequestTermination(r, requestInfo, epmetrics.APIServerComponent, http.StatusTooManyRequests)
h.droppedRequests.RecordDroppedRequest(classification.PriorityLevelName)
// TODO(wojtek-t): Idea from deads2k: we can consider some jittering and in case of non-int
// number, just return the truncated result and sleep the remainder server-side.
tooManyRequests(r, w, strconv.Itoa(int(h.droppedRequests.GetRetryAfter(classification.PriorityLevelName))))
}
}
// WithPriorityAndFairness limits the number of in-flight
// requests in a fine-grained way.
func WithPriorityAndFairness(
@ -86,223 +321,15 @@ func WithPriorityAndFairness(
waitingMark.readOnlyObserver = fcmetrics.GetWaitingReadonlyConcurrency()
waitingMark.mutatingObserver = fcmetrics.GetWaitingMutatingConcurrency()
})
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
if !ok {
handleError(w, r, fmt.Errorf("no RequestInfo found in context"))
return
}
user, ok := apirequest.UserFrom(ctx)
if !ok {
handleError(w, r, fmt.Errorf("no User found in context"))
return
}
isWatchRequest := watchVerbs.Has(requestInfo.Verb)
// Skip tracking long running non-watch requests.
if longRunningRequestCheck != nil && longRunningRequestCheck(r, requestInfo) && !isWatchRequest {
klog.V(6).Infof("Serving RequestInfo=%#+v, user.Info=%#+v as longrunning\n", requestInfo, user)
handler.ServeHTTP(w, r)
return
}
var classification *PriorityAndFairnessClassification
noteFn := func(fs *flowcontrol.FlowSchema, pl *flowcontrol.PriorityLevelConfiguration, flowDistinguisher string) {
classification = &PriorityAndFairnessClassification{
FlowSchemaName: fs.Name,
FlowSchemaUID: fs.UID,
PriorityLevelName: pl.Name,
PriorityLevelUID: pl.UID}
httplog.AddKeyValue(ctx, "apf_pl", truncateLogField(pl.Name))
httplog.AddKeyValue(ctx, "apf_fs", truncateLogField(fs.Name))
}
// estimateWork is called, if at all, after noteFn
estimateWork := func() flowcontrolrequest.WorkEstimate {
if classification == nil {
// workEstimator is being invoked before classification of
// the request has completed, we should never be here though.
klog.ErrorS(fmt.Errorf("workEstimator is being invoked before classification of the request has completed"),
"Using empty FlowSchema and PriorityLevelConfiguration name", "verb", r.Method, "URI", r.RequestURI)
return workEstimator(r, "", "")
}
workEstimate := workEstimator(r, classification.FlowSchemaName, classification.PriorityLevelName)
fcmetrics.ObserveWorkEstimatedSeats(classification.PriorityLevelName, classification.FlowSchemaName, workEstimate.MaxSeats())
httplog.AddKeyValue(ctx, "apf_iseats", workEstimate.InitialSeats)
httplog.AddKeyValue(ctx, "apf_fseats", workEstimate.FinalSeats)
httplog.AddKeyValue(ctx, "apf_additionalLatency", workEstimate.AdditionalLatency)
return workEstimate
}
var served bool
isMutatingRequest := !nonMutatingRequestVerbs.Has(requestInfo.Verb)
noteExecutingDelta := func(delta int32) {
if isMutatingRequest {
watermark.recordMutating(int(atomic.AddInt32(&atomicMutatingExecuting, delta)))
} else {
watermark.recordReadOnly(int(atomic.AddInt32(&atomicReadOnlyExecuting, delta)))
}
}
noteWaitingDelta := func(delta int32) {
if isMutatingRequest {
waitingMark.recordMutating(int(atomic.AddInt32(&atomicMutatingWaiting, delta)))
} else {
waitingMark.recordReadOnly(int(atomic.AddInt32(&atomicReadOnlyWaiting, delta)))
}
}
queueNote := func(inQueue bool) {
if inQueue {
noteWaitingDelta(1)
} else {
noteWaitingDelta(-1)
}
}
digest := utilflowcontrol.RequestDigest{
RequestInfo: requestInfo,
User: user,
}
if isWatchRequest {
// This channel blocks calling handler.ServeHTTP() until closed, and is closed inside execute().
// If APF rejects the request, it is never closed.
shouldStartWatchCh := make(chan struct{})
watchInitializationSignal := newInitializationSignal()
// This wraps the request passed to handler.ServeHTTP(),
// setting a context that plumbs watchInitializationSignal to storage
var watchReq *http.Request
// This is set inside execute(), prior to closing shouldStartWatchCh.
// If the request is rejected by APF it is left nil.
var forgetWatch utilflowcontrol.ForgetWatchFunc
defer func() {
// Protect from the situation when request will not reach storage layer
// and the initialization signal will not be send.
if watchInitializationSignal != nil {
watchInitializationSignal.Signal()
}
// Forget the watcher if it was registered.
//
// // This is race-free because by this point, one of the following occurred:
// case <-shouldStartWatchCh: execute() completed the assignment to forgetWatch
// case <-resultCh: Handle() completed, and Handle() does not return
// while execute() is running
if forgetWatch != nil {
forgetWatch()
}
}()
execute := func() {
startedAt := time.Now()
defer func() {
httplog.AddKeyValue(ctx, "apf_init_latency", time.Since(startedAt))
}()
noteExecutingDelta(1)
defer noteExecutingDelta(-1)
served = true
setResponseHeaders(classification, w)
forgetWatch = fcIfc.RegisterWatch(r)
// Notify the main thread that we're ready to start the watch.
close(shouldStartWatchCh)
// Wait until the request is finished from the APF point of view
// (which is when its initialization is done).
watchInitializationSignal.Wait()
}
// Ensure that an item can be put to resultCh asynchronously.
resultCh := make(chan interface{}, 1)
// Call Handle in a separate goroutine.
// The reason for it is that from APF point of view, the request processing
// finishes as soon as watch is initialized (which is generally orders of
// magnitude faster then the watch request itself). This means that Handle()
// call finishes much faster and for performance reasons we want to reduce
// the number of running goroutines - so we run the shorter thing in a
// dedicated goroutine and the actual watch handler in the main one.
go func() {
defer func() {
err := recover()
// do not wrap the sentinel ErrAbortHandler panic value
if err != nil && err != http.ErrAbortHandler {
// Same as stdlib http server code. Manually allocate stack
// trace buffer size to prevent excessively large logs
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
err = fmt.Sprintf("%v\n%s", err, buf)
}
// Ensure that the result is put into resultCh independently of the panic.
resultCh <- err
}()
// We create handleCtx with explicit cancelation function.
// The reason for it is that Handle() underneath may start additional goroutine
// that is blocked on context cancellation. However, from APF point of view,
// we don't want to wait until the whole watch request is processed (which is
// when it context is actually cancelled) - we want to unblock the goroutine as
// soon as the request is processed from the APF point of view.
//
// Note that we explicitly do NOT call the actuall handler using that context
// to avoid cancelling request too early.
handleCtx, handleCtxCancel := context.WithCancel(ctx)
defer handleCtxCancel()
// Note that Handle will return irrespective of whether the request
// executes or is rejected. In the latter case, the function will return
// without calling the passed `execute` function.
fcIfc.Handle(handleCtx, digest, noteFn, estimateWork, queueNote, execute)
}()
select {
case <-shouldStartWatchCh:
watchCtx := utilflowcontrol.WithInitializationSignal(ctx, watchInitializationSignal)
watchReq = r.WithContext(watchCtx)
handler.ServeHTTP(w, watchReq)
// Protect from the situation when request will not reach storage layer
// and the initialization signal will not be send.
// It has to happen before waiting on the resultCh below.
watchInitializationSignal.Signal()
// TODO: Consider finishing the request as soon as Handle call panics.
if err := <-resultCh; err != nil {
panic(err)
}
case err := <-resultCh:
if err != nil {
panic(err)
}
}
} else {
execute := func() {
noteExecutingDelta(1)
defer noteExecutingDelta(-1)
served = true
setResponseHeaders(classification, w)
handler.ServeHTTP(w, r)
}
fcIfc.Handle(ctx, digest, noteFn, estimateWork, queueNote, execute)
}
if !served {
setResponseHeaders(classification, w)
epmetrics.RecordDroppedRequest(r, requestInfo, epmetrics.APIServerComponent, isMutatingRequest)
epmetrics.RecordRequestTermination(r, requestInfo, epmetrics.APIServerComponent, http.StatusTooManyRequests)
tooManyRequests(r, w)
}
})
priorityAndFairnessHandler := &priorityAndFairnessHandler{
handler: handler,
longRunningRequestCheck: longRunningRequestCheck,
fcIfc: fcIfc,
workEstimator: workEstimator,
droppedRequests: utilflowcontrol.NewDroppedRequestsTracker(),
}
return http.HandlerFunc(priorityAndFairnessHandler.Handle)
}
// StartPriorityAndFairnessWatermarkMaintenance starts the goroutines to observe and maintain watermarks for
@ -323,3 +350,9 @@ func setResponseHeaders(classification *PriorityAndFairnessClassification, w htt
w.Header().Set(flowcontrol.ResponseHeaderMatchedPriorityLevelConfigurationUID, string(classification.PriorityLevelUID))
w.Header().Set(flowcontrol.ResponseHeaderMatchedFlowSchemaUID, string(classification.FlowSchemaUID))
}
func tooManyRequests(req *http.Request, w http.ResponseWriter, retryAfter string) {
// Return a 429 status indicating "Too Many Requests"
w.Header().Set("Retry-After", retryAfter)
http.Error(w, "Too many requests, please try again later.", http.StatusTooManyRequests)
}

View File

@ -18,6 +18,7 @@ package server
import (
"context"
"errors"
"fmt"
"net/http"
gpath "path"
@ -736,16 +737,7 @@ func (s preparedGenericAPIServer) NonBlockingRun(stopCh <-chan struct{}, shutdow
}
// installAPIResources is a private method for installing the REST storage backing each api groupversionresource
func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *APIGroupInfo, openAPIModels map[string]*spec.Schema) error {
var typeConverter managedfields.TypeConverter
if len(openAPIModels) > 0 {
var err error
typeConverter, err = managedfields.NewTypeConverter(openAPIModels, false)
if err != nil {
return err
}
}
func (s *GenericAPIServer) installAPIResources(apiPrefix string, apiGroupInfo *APIGroupInfo, typeConverter managedfields.TypeConverter) error {
var resourceInfos []*storageversion.ResourceInfo
for _, groupVersion := range apiGroupInfo.PrioritizedVersions {
if len(apiGroupInfo.VersionedResourcesStorageMap[groupVersion.Version]) == 0 {
@ -844,6 +836,9 @@ func (s *GenericAPIServer) InstallLegacyAPIGroup(apiPrefix string, apiGroupInfo
// underlying storage will be destroyed on this servers shutdown.
func (s *GenericAPIServer) InstallAPIGroups(apiGroupInfos ...*APIGroupInfo) error {
for _, apiGroupInfo := range apiGroupInfos {
if len(apiGroupInfo.PrioritizedVersions) == 0 {
return fmt.Errorf("no version priority set for %#v", *apiGroupInfo)
}
// Do not register empty group or empty version. Doing so claims /apis/ for the wrong entity to be returned.
// Catching these here places the error much closer to its origin
if len(apiGroupInfo.PrioritizedVersions[0].Group) == 0 {
@ -916,9 +911,22 @@ func (s *GenericAPIServer) getAPIGroupVersion(apiGroupInfo *APIGroupInfo, groupV
}
func (s *GenericAPIServer) newAPIGroupVersion(apiGroupInfo *APIGroupInfo, groupVersion schema.GroupVersion) *genericapi.APIGroupVersion {
allServedVersionsByResource := map[string][]string{}
for version, resourcesInVersion := range apiGroupInfo.VersionedResourcesStorageMap {
for resource := range resourcesInVersion {
if len(groupVersion.Group) == 0 {
allServedVersionsByResource[resource] = append(allServedVersionsByResource[resource], version)
} else {
allServedVersionsByResource[resource] = append(allServedVersionsByResource[resource], fmt.Sprintf("%s/%s", groupVersion.Group, version))
}
}
}
return &genericapi.APIGroupVersion{
GroupVersion: groupVersion,
MetaGroupVersion: apiGroupInfo.MetaGroupVersion,
GroupVersion: groupVersion,
AllServedVersionsByResource: allServedVersionsByResource,
MetaGroupVersion: apiGroupInfo.MetaGroupVersion,
ParameterCodec: apiGroupInfo.ParameterCodec,
Serializer: apiGroupInfo.NegotiatedSerializer,
@ -953,13 +961,13 @@ func NewDefaultAPIGroupInfo(group string, scheme *runtime.Scheme, parameterCodec
}
// getOpenAPIModels is a private method for getting the OpenAPI models
func (s *GenericAPIServer) getOpenAPIModels(apiPrefix string, apiGroupInfos ...*APIGroupInfo) (map[string]*spec.Schema, error) {
func (s *GenericAPIServer) getOpenAPIModels(apiPrefix string, apiGroupInfos ...*APIGroupInfo) (managedfields.TypeConverter, error) {
if s.openAPIV3Config == nil {
//!TODO: A future work should add a requirement that
// OpenAPIV3 config is required. May require some refactoring of tests.
return nil, nil
// SSA is GA and requires OpenAPI config to be set
// to create models.
return nil, errors.New("OpenAPIV3 config must not be nil")
}
pathsToIgnore := openapiutil.NewTrie(s.openAPIConfig.IgnorePrefixes)
pathsToIgnore := openapiutil.NewTrie(s.openAPIV3Config.IgnorePrefixes)
resourceNames := make([]string, 0)
for _, apiGroupInfo := range apiGroupInfos {
groupResources, err := getResourceNamesForGroup(apiPrefix, apiGroupInfo, pathsToIgnore)
@ -977,7 +985,13 @@ func (s *GenericAPIServer) getOpenAPIModels(apiPrefix string, apiGroupInfos ...*
for _, apiGroupInfo := range apiGroupInfos {
apiGroupInfo.StaticOpenAPISpec = openAPISpec
}
return openAPISpec, nil
typeConverter, err := managedfields.NewTypeConverter(openAPISpec, false)
if err != nil {
return nil, err
}
return typeConverter, nil
}
// getResourceNamesForGroup is a private method for getting the canonical names for each resource to build in an api group

View File

@ -53,13 +53,13 @@ type APIServerHandler struct {
// Director is here so that we can properly handle fall through and proxy cases.
// This looks a bit bonkers, but here's what's happening. We need to have /apis handling registered in gorestful in order to have
// swagger generated for compatibility. Doing that with `/apis` as a webservice, means that it forcibly 404s (no defaulting allowed)
// all requests which are not /apis or /apis/. We need those calls to fall through behind goresful for proper delegation. Trying to
// all requests which are not /apis or /apis/. We need those calls to fall through behind gorestful for proper delegation. Trying to
// register for a pattern which includes everything behind it doesn't work because gorestful negotiates for verbs and content encoding
// and all those things go crazy when gorestful really just needs to pass through. In addition, openapi enforces unique verb constraints
// which we don't fit into and it still muddies up swagger. Trying to switch the webservices into a route doesn't work because the
// containing webservice faces all the same problems listed above.
// This leads to the crazy thing done here. Our mux does what we need, so we'll place it in front of gorestful. It will introspect to
// decide if the route is likely to be handled by goresful and route there if needed. Otherwise, it goes to NonGoRestfulMux mux in
// decide if the route is likely to be handled by gorestful and route there if needed. Otherwise, it goes to NonGoRestfulMux mux in
// order to handle "normal" paths and delegation. Hopefully no API consumers will ever have to deal with this level of detail. I think
// we should consider completely removing gorestful.
// Other servers should only use this opaquely to delegate to an API server.

View File

@ -1,5 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- enj
reviewers:
- smarterclayton
- wojtek-t

View File

@ -39,7 +39,6 @@ import (
"k8s.io/client-go/dynamic"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/component-base/featuregate"
)
@ -123,7 +122,8 @@ func (a *AdmissionOptions) AddFlags(fs *pflag.FlagSet) {
func (a *AdmissionOptions) ApplyTo(
c *server.Config,
informers informers.SharedInformerFactory,
kubeAPIServerClientConfig *rest.Config,
kubeClient kubernetes.Interface,
dynamicClient dynamic.Interface,
features featuregate.FeatureGate,
pluginInitializers ...admission.PluginInitializer,
) error {
@ -143,15 +143,8 @@ func (a *AdmissionOptions) ApplyTo(
return fmt.Errorf("failed to read plugin config: %v", err)
}
clientset, err := kubernetes.NewForConfig(kubeAPIServerClientConfig)
if err != nil {
return err
}
dynamicClient, err := dynamic.NewForConfig(kubeAPIServerClientConfig)
if err != nil {
return err
}
genericInitializer := initializer.New(clientset, dynamicClient, informers, c.Authorization.Authorizer, features, c.DrainedNotify())
genericInitializer := initializer.New(kubeClient, dynamicClient, informers, c.Authorization.Authorizer, features,
c.DrainedNotify())
initializersChain := admission.PluginInitializers{genericInitializer}
initializersChain = append(initializersChain, pluginInitializers...)

View File

@ -142,16 +142,6 @@ type AuditWebhookOptions struct {
GroupVersionString string
}
// AuditDynamicOptions control the configuration of dynamic backends for audit events
type AuditDynamicOptions struct {
// Enabled tells whether the dynamic audit capability is enabled.
Enabled bool
// Configuration for batching backend. This is currently only used as an override
// for integration tests
BatchConfig *pluginbuffered.BatchConfig
}
func NewAuditOptions() *AuditOptions {
return &AuditOptions{
WebhookOptions: AuditWebhookOptions{

View File

@ -67,7 +67,7 @@ func (s *DeprecatedInsecureServingOptions) AddFlags(fs *pflag.FlagSet) {
}
fs.IPVar(&s.BindAddress, "insecure-bind-address", s.BindAddress, ""+
"The IP address on which to serve the --insecure-port (set to 0.0.0.0 or :: for listening in all interfaces and IP families).")
"The IP address on which to serve the --insecure-port (set to 0.0.0.0 or :: for listening on all interfaces and IP address families).")
// Though this flag is deprecated, we discovered security concerns over how to do health checks without it e.g. #43784
fs.MarkDeprecated("insecure-bind-address", "This flag will be removed in a future version.")
fs.Lookup("insecure-bind-address").Hidden = false
@ -86,7 +86,7 @@ func (s *DeprecatedInsecureServingOptions) AddUnqualifiedFlags(fs *pflag.FlagSet
}
fs.IPVar(&s.BindAddress, "address", s.BindAddress,
"The IP address on which to serve the insecure --port (set to '0.0.0.0' or '::' for listening in all interfaces and IP families).")
"The IP address on which to serve the insecure --port (set to '0.0.0.0' or '::' for listening on all interfaces and IP address families).")
fs.MarkDeprecated("address", "see --bind-address instead.")
fs.Lookup("address").Hidden = false

View File

@ -43,10 +43,11 @@ import (
"k8s.io/apiserver/pkg/apis/config/validation"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/apiserver/pkg/storage/value"
storagevalue "k8s.io/apiserver/pkg/storage/value"
aestransformer "k8s.io/apiserver/pkg/storage/value/encrypt/aes"
"k8s.io/apiserver/pkg/storage/value/encrypt/envelope"
envelopekmsv2 "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2"
kmstypes "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2"
"k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics"
"k8s.io/apiserver/pkg/storage/value/encrypt/identity"
"k8s.io/apiserver/pkg/storage/value/encrypt/secretbox"
@ -63,13 +64,13 @@ const (
kmsTransformerPrefixV2 = "k8s:enc:kms:v2:"
// these constants relate to how the KMS v2 plugin status poll logic
// and the DEK generation logic behave. In particular, the positive
// and the DEK/seed generation logic behave. In particular, the positive
// interval and max TTL are closely related as the difference between
// these values defines the worst case window in which the write DEK
// these values defines the worst case window in which the write DEK/seed
// could expire due to the plugin going into an error state. The
// worst case window divided by the negative interval defines the
// minimum amount of times the server will attempt to return to a
// healthy state before the DEK expires and writes begin to fail.
// healthy state before the DEK/seed expires and writes begin to fail.
//
// For now, these values are kept small and hardcoded to support being
// able to perform a "passive" storage migration while tolerating some
@ -82,13 +83,13 @@ const (
// At that point, they are guaranteed to either migrate to the new key
// or get errors during the migration.
//
// If the API server coasted forever on the last DEK, they would need
// If the API server coasted forever on the last DEK/seed, they would need
// to actively check if it had observed the new key ID before starting
// a migration - otherwise it could keep using the old DEK and their
// a migration - otherwise it could keep using the old DEK/seed and their
// storage migration would not do what they thought it did.
kmsv2PluginHealthzPositiveInterval = 1 * time.Minute
kmsv2PluginHealthzNegativeInterval = 10 * time.Second
kmsv2PluginWriteDEKMaxTTL = 3 * time.Minute
kmsv2PluginWriteDEKSourceMaxTTL = 3 * time.Minute
kmsPluginHealthzNegativeTTL = 3 * time.Second
kmsPluginHealthzPositiveTTL = 20 * time.Second
@ -159,7 +160,7 @@ func (h *kmsv2PluginProbe) toHealthzCheck(idx int) healthz.HealthChecker {
// EncryptionConfiguration represents the parsed and normalized encryption configuration for the apiserver.
type EncryptionConfiguration struct {
// Transformers is a list of value.Transformer that will be used to encrypt and decrypt data.
Transformers map[schema.GroupResource]value.Transformer
Transformers map[schema.GroupResource]storagevalue.Transformer
// HealthChecks is a list of healthz.HealthChecker that will be used to check the health of the encryption providers.
HealthChecks []healthz.HealthChecker
@ -207,7 +208,7 @@ func LoadEncryptionConfig(ctx context.Context, filepath string, reload bool) (*E
// getTransformerOverridesAndKMSPluginHealthzCheckers creates the set of transformers and KMS healthz checks based on the given config.
// It may launch multiple go routines whose lifecycle is controlled by ctx.
// In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched.
func getTransformerOverridesAndKMSPluginHealthzCheckers(ctx context.Context, config *apiserverconfig.EncryptionConfiguration) (map[schema.GroupResource]value.Transformer, []healthz.HealthChecker, *kmsState, error) {
func getTransformerOverridesAndKMSPluginHealthzCheckers(ctx context.Context, config *apiserverconfig.EncryptionConfiguration) (map[schema.GroupResource]storagevalue.Transformer, []healthz.HealthChecker, *kmsState, error) {
var kmsHealthChecks []healthz.HealthChecker
transformers, probes, kmsUsed, err := getTransformerOverridesAndKMSPluginProbes(ctx, config)
if err != nil {
@ -228,8 +229,8 @@ type healthChecker interface {
// getTransformerOverridesAndKMSPluginProbes creates the set of transformers and KMS probes based on the given config.
// It may launch multiple go routines whose lifecycle is controlled by ctx.
// In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched.
func getTransformerOverridesAndKMSPluginProbes(ctx context.Context, config *apiserverconfig.EncryptionConfiguration) (map[schema.GroupResource]value.Transformer, []healthChecker, *kmsState, error) {
resourceToPrefixTransformer := map[schema.GroupResource][]value.PrefixTransformer{}
func getTransformerOverridesAndKMSPluginProbes(ctx context.Context, config *apiserverconfig.EncryptionConfiguration) (map[schema.GroupResource]storagevalue.Transformer, []healthChecker, *kmsState, error) {
resourceToPrefixTransformer := map[schema.GroupResource][]storagevalue.PrefixTransformer{}
var probes []healthChecker
var kmsUsed kmsState
@ -268,11 +269,11 @@ func getTransformerOverridesAndKMSPluginProbes(ctx context.Context, config *apis
probes = append(probes, p...)
}
transformers := make(map[schema.GroupResource]value.Transformer, len(resourceToPrefixTransformer))
transformers := make(map[schema.GroupResource]storagevalue.Transformer, len(resourceToPrefixTransformer))
for gr, transList := range resourceToPrefixTransformer {
gr := gr
transList := transList
transformers[gr] = value.NewPrefixTransformers(fmt.Errorf("no matching prefix found"), transList...)
transformers[gr] = storagevalue.NewPrefixTransformers(fmt.Errorf("no matching prefix found"), transList...)
}
return transformers, probes, &kmsUsed, nil
@ -332,8 +333,8 @@ func (h *kmsv2PluginProbe) check(ctx context.Context) error {
return nil
}
// rotateDEKOnKeyIDChange tries to rotate to a new DEK if the key ID returned by Status does not match the
// current state. If a successful rotation is performed, the new DEK and keyID overwrite the existing state.
// rotateDEKOnKeyIDChange tries to rotate to a new DEK/seed if the key ID returned by Status does not match the
// current state. If a successful rotation is performed, the new DEK/seed and keyID overwrite the existing state.
// On any failure during rotation (including mismatch between status and encrypt calls), the current state is
// preserved and will remain valid to use for encryption until its expiration (the system attempts to coast).
// If the key ID returned by Status matches the current state, the expiration of the current state is extended
@ -346,47 +347,62 @@ func (h *kmsv2PluginProbe) rotateDEKOnKeyIDChange(ctx context.Context, statusKey
// allow reads indefinitely in all cases
// allow writes indefinitely as long as there is no error
// allow writes for only up to kmsv2PluginWriteDEKMaxTTL from now when there are errors
// we start the timer before we make the network call because kmsv2PluginWriteDEKMaxTTL is meant to be the upper bound
expirationTimestamp := envelopekmsv2.NowFunc().Add(kmsv2PluginWriteDEKMaxTTL)
// allow writes for only up to kmsv2PluginWriteDEKSourceMaxTTL from now when there are errors
// we start the timer before we make the network call because kmsv2PluginWriteDEKSourceMaxTTL is meant to be the upper bound
expirationTimestamp := envelopekmsv2.NowFunc().Add(kmsv2PluginWriteDEKSourceMaxTTL)
// state is valid and status keyID is unchanged from when we generated this DEK so there is no need to rotate it
// dynamically check if we want to use KDF seed to derive DEKs or just a single DEK
// this gate can only change during tests, but the check is cheap enough to always make
// this allows us to easily exercise both modes without restarting the API server
// TODO integration test that this dynamically takes effect
useSeed := utilfeature.DefaultFeatureGate.Enabled(features.KMSv2KDF)
stateUseSeed := state.EncryptedObject.EncryptedDEKSourceType == kmstypes.EncryptedDEKSourceType_HKDF_SHA256_XNONCE_AES_GCM_SEED
// state is valid and status keyID is unchanged from when we generated this DEK/seed so there is no need to rotate it
// just move the expiration of the current state forward by the reuse interval
if errState == nil && state.KeyID == statusKeyID {
// useSeed can only change at runtime during tests, so we check it here to allow us to easily exercise both modes
if errState == nil && state.EncryptedObject.KeyID == statusKeyID && stateUseSeed == useSeed {
state.ExpirationTimestamp = expirationTimestamp
h.state.Store(&state)
return nil
}
transformer, resp, cacheKey, errGen := envelopekmsv2.GenerateTransformer(ctx, uid, h.service)
transformer, encObject, cacheKey, errGen := envelopekmsv2.GenerateTransformer(ctx, uid, h.service, useSeed)
if resp == nil {
resp = &kmsservice.EncryptResponse{} // avoid nil panics
if encObject == nil {
encObject = &kmstypes.EncryptedObject{} // avoid nil panics
}
// happy path, should be the common case
// TODO maybe add success metrics?
if errGen == nil && resp.KeyID == statusKeyID {
if errGen == nil && encObject.KeyID == statusKeyID {
h.state.Store(&envelopekmsv2.State{
Transformer: transformer,
EncryptedDEK: resp.Ciphertext,
KeyID: resp.KeyID,
Annotations: resp.Annotations,
EncryptedObject: *encObject,
UID: uid,
ExpirationTimestamp: expirationTimestamp,
CacheKey: cacheKey,
})
klog.V(6).InfoS("successfully rotated DEK",
"uid", uid,
"newKeyID", resp.KeyID,
"oldKeyID", state.KeyID,
"expirationTimestamp", expirationTimestamp.Format(time.RFC3339),
)
return nil
// it should be logically impossible for the new state to be invalid but check just in case
_, errGen = h.getCurrentState()
if errGen == nil {
klogV6 := klog.V(6)
if klogV6.Enabled() {
klogV6.InfoS("successfully rotated DEK",
"uid", uid,
"useSeed", useSeed,
"newKeyIDHash", envelopekmsv2.GetHashIfNotEmpty(encObject.KeyID),
"oldKeyIDHash", envelopekmsv2.GetHashIfNotEmpty(state.EncryptedObject.KeyID),
"expirationTimestamp", expirationTimestamp.Format(time.RFC3339),
)
}
return nil
}
}
return fmt.Errorf("failed to rotate DEK uid=%q, errState=%v, errGen=%v, statusKeyID=%q, encryptKeyID=%q, stateKeyID=%q, expirationTimestamp=%s",
uid, errState, errGen, statusKeyID, resp.KeyID, state.KeyID, state.ExpirationTimestamp.Format(time.RFC3339))
return fmt.Errorf("failed to rotate DEK uid=%q, useSeed=%v, errState=%v, errGen=%v, statusKeyIDHash=%q, encryptKeyIDHash=%q, stateKeyIDHash=%q, expirationTimestamp=%s",
uid, useSeed, errState, errGen, envelopekmsv2.GetHashIfNotEmpty(statusKeyID), envelopekmsv2.GetHashIfNotEmpty(encObject.KeyID), envelopekmsv2.GetHashIfNotEmpty(state.EncryptedObject.KeyID), state.ExpirationTimestamp.Format(time.RFC3339))
}
// getCurrentState returns the latest state from the last status and encrypt calls.
@ -399,12 +415,13 @@ func (h *kmsv2PluginProbe) getCurrentState() (envelopekmsv2.State, error) {
return envelopekmsv2.State{}, fmt.Errorf("got unexpected nil transformer")
}
if len(state.EncryptedDEK) == 0 {
return envelopekmsv2.State{}, fmt.Errorf("got unexpected empty EncryptedDEK")
encryptedObjectCopy := state.EncryptedObject
if len(encryptedObjectCopy.EncryptedData) != 0 {
return envelopekmsv2.State{}, fmt.Errorf("got unexpected non-empty EncryptedData")
}
if len(state.KeyID) == 0 {
return envelopekmsv2.State{}, fmt.Errorf("got unexpected empty keyID")
encryptedObjectCopy.EncryptedData = []byte{0} // any non-empty value to pass validation
if err := envelopekmsv2.ValidateEncryptedObject(&encryptedObjectCopy); err != nil {
return envelopekmsv2.State{}, fmt.Errorf("got invalid EncryptedObject: %w", err)
}
if state.ExpirationTimestamp.IsZero() {
@ -429,7 +446,7 @@ func (h *kmsv2PluginProbe) isKMSv2ProviderHealthyAndMaybeRotateDEK(ctx context.C
if errCode, err := envelopekmsv2.ValidateKeyID(response.KeyID); err != nil {
metrics.RecordInvalidKeyIDFromStatus(h.name, string(errCode))
errs = append(errs, fmt.Errorf("got invalid KMSv2 KeyID %q: %w", response.KeyID, err))
errs = append(errs, fmt.Errorf("got invalid KMSv2 KeyID hash %q: %w", envelopekmsv2.GetHashIfNotEmpty(response.KeyID), err))
} else {
metrics.RecordKeyIDFromStatus(h.name, response.KeyID)
// unconditionally append as we filter out nil errors below
@ -478,15 +495,15 @@ func loadConfig(filepath string, reload bool) (*apiserverconfig.EncryptionConfig
// prefixTransformersAndProbes creates the set of transformers and KMS probes based on the given resource config.
// It may launch multiple go routines whose lifecycle is controlled by ctx.
// In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched.
func prefixTransformersAndProbes(ctx context.Context, config apiserverconfig.ResourceConfiguration) ([]value.PrefixTransformer, []healthChecker, *kmsState, error) {
var transformers []value.PrefixTransformer
func prefixTransformersAndProbes(ctx context.Context, config apiserverconfig.ResourceConfiguration) ([]storagevalue.PrefixTransformer, []healthChecker, *kmsState, error) {
var transformers []storagevalue.PrefixTransformer
var probes []healthChecker
var kmsUsed kmsState
for _, provider := range config.Providers {
provider := provider
var (
transformer value.PrefixTransformer
transformer storagevalue.PrefixTransformer
transformerErr error
probe healthChecker
used *kmsState
@ -497,7 +514,7 @@ func prefixTransformersAndProbes(ctx context.Context, config apiserverconfig.Res
transformer, transformerErr = aesPrefixTransformer(provider.AESGCM, aestransformer.NewGCMTransformer, aesGCMTransformerPrefixV1)
case provider.AESCBC != nil:
cbcTransformer := func(block cipher.Block) (value.Transformer, error) {
cbcTransformer := func(block cipher.Block) (storagevalue.Transformer, error) {
return aestransformer.NewCBCTransformer(block), nil
}
transformer, transformerErr = aesPrefixTransformer(provider.AESCBC, cbcTransformer, aesCBCTransformerPrefixV1)
@ -513,7 +530,7 @@ func prefixTransformersAndProbes(ctx context.Context, config apiserverconfig.Res
}
case provider.Identity != nil:
transformer = value.PrefixTransformer{
transformer = storagevalue.PrefixTransformer{
Transformer: identity.NewEncryptCheckTransformer(),
Prefix: []byte{},
}
@ -532,10 +549,10 @@ func prefixTransformersAndProbes(ctx context.Context, config apiserverconfig.Res
return transformers, probes, &kmsUsed, nil
}
type blockTransformerFunc func(cipher.Block) (value.Transformer, error)
type blockTransformerFunc func(cipher.Block) (storagevalue.Transformer, error)
func aesPrefixTransformer(config *apiserverconfig.AESConfiguration, fn blockTransformerFunc, prefix string) (value.PrefixTransformer, error) {
var result value.PrefixTransformer
func aesPrefixTransformer(config *apiserverconfig.AESConfiguration, fn blockTransformerFunc, prefix string) (storagevalue.PrefixTransformer, error) {
var result storagevalue.PrefixTransformer
if len(config.Keys) == 0 {
return result, fmt.Errorf("aes provider has no valid keys")
@ -550,7 +567,7 @@ func aesPrefixTransformer(config *apiserverconfig.AESConfiguration, fn blockTran
}
}
keyTransformers := []value.PrefixTransformer{}
keyTransformers := []storagevalue.PrefixTransformer{}
for _, keyData := range config.Keys {
keyData := keyData
@ -569,26 +586,26 @@ func aesPrefixTransformer(config *apiserverconfig.AESConfiguration, fn blockTran
// Create a new PrefixTransformer for this key
keyTransformers = append(keyTransformers,
value.PrefixTransformer{
storagevalue.PrefixTransformer{
Transformer: transformer,
Prefix: []byte(keyData.Name + ":"),
})
}
// Create a prefixTransformer which can choose between these keys
keyTransformer := value.NewPrefixTransformers(
keyTransformer := storagevalue.NewPrefixTransformers(
fmt.Errorf("no matching key was found for the provided AES transformer"), keyTransformers...)
// Create a PrefixTransformer which shall later be put in a list with other providers
result = value.PrefixTransformer{
result = storagevalue.PrefixTransformer{
Transformer: keyTransformer,
Prefix: []byte(prefix),
}
return result, nil
}
func secretboxPrefixTransformer(config *apiserverconfig.SecretboxConfiguration) (value.PrefixTransformer, error) {
var result value.PrefixTransformer
func secretboxPrefixTransformer(config *apiserverconfig.SecretboxConfiguration) (storagevalue.PrefixTransformer, error) {
var result storagevalue.PrefixTransformer
if len(config.Keys) == 0 {
return result, fmt.Errorf("secretbox provider has no valid keys")
@ -603,7 +620,7 @@ func secretboxPrefixTransformer(config *apiserverconfig.SecretboxConfiguration)
}
}
keyTransformers := []value.PrefixTransformer{}
keyTransformers := []storagevalue.PrefixTransformer{}
for _, keyData := range config.Keys {
keyData := keyData
@ -621,18 +638,18 @@ func secretboxPrefixTransformer(config *apiserverconfig.SecretboxConfiguration)
// Create a new PrefixTransformer for this key
keyTransformers = append(keyTransformers,
value.PrefixTransformer{
storagevalue.PrefixTransformer{
Transformer: secretbox.NewSecretboxTransformer(keyArray),
Prefix: []byte(keyData.Name + ":"),
})
}
// Create a prefixTransformer which can choose between these keys
keyTransformer := value.NewPrefixTransformers(
keyTransformer := storagevalue.NewPrefixTransformers(
fmt.Errorf("no matching key was found for the provided Secretbox transformer"), keyTransformers...)
// Create a PrefixTransformer which shall later be put in a list with other providers
result = value.PrefixTransformer{
result = storagevalue.PrefixTransformer{
Transformer: keyTransformer,
Prefix: []byte(secretboxTransformerPrefixV1),
}
@ -665,13 +682,18 @@ func (s *kmsState) accumulate(other *kmsState) {
// kmsPrefixTransformer creates a KMS transformer and probe based on the given KMS config.
// It may launch multiple go routines whose lifecycle is controlled by ctx.
// In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched.
func kmsPrefixTransformer(ctx context.Context, config *apiserverconfig.KMSConfiguration) (value.PrefixTransformer, healthChecker, *kmsState, error) {
func kmsPrefixTransformer(ctx context.Context, config *apiserverconfig.KMSConfiguration) (storagevalue.PrefixTransformer, healthChecker, *kmsState, error) {
kmsName := config.Name
switch config.APIVersion {
case kmsAPIVersionV1:
if !utilfeature.DefaultFeatureGate.Enabled(features.KMSv1) {
return storagevalue.PrefixTransformer{}, nil, nil, fmt.Errorf("KMSv1 is deprecated and will only receive security updates going forward. Use KMSv2 instead. Set --feature-gates=KMSv1=true to use the deprecated KMSv1 feature.")
}
klog.InfoS("KMSv1 is deprecated and will only receive security updates going forward. Use KMSv2 instead.")
envelopeService, err := envelopeServiceFactory(ctx, config.Endpoint, config.Timeout.Duration)
if err != nil {
return value.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMSv1-Plugin's probe %q, error: %w", kmsName, err)
return storagevalue.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMSv1-Plugin's probe %q, error: %w", kmsName, err)
}
probe := &kmsPluginProbe{
@ -692,12 +714,12 @@ func kmsPrefixTransformer(ctx context.Context, config *apiserverconfig.KMSConfig
case kmsAPIVersionV2:
if !utilfeature.DefaultFeatureGate.Enabled(features.KMSv2) {
return value.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMSv2 plugin %q, KMSv2 feature is not enabled", kmsName)
return storagevalue.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMSv2 plugin %q, KMSv2 feature is not enabled", kmsName)
}
envelopeService, err := EnvelopeKMSv2ServiceFactory(ctx, config.Endpoint, config.Name, config.Timeout.Duration)
if err != nil {
return value.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMSv2-Plugin's probe %q, error: %w", kmsName, err)
return storagevalue.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMSv2-Plugin's probe %q, error: %w", kmsName, err)
}
probe := &kmsv2PluginProbe{
@ -710,45 +732,9 @@ func kmsPrefixTransformer(ctx context.Context, config *apiserverconfig.KMSConfig
// initialize state so that Load always works
probe.state.Store(&envelopekmsv2.State{})
runProbeCheckAndLog := func(ctx context.Context) error {
if err := probe.check(ctx); err != nil {
klog.VDepth(1, 2).ErrorS(err, "kms plugin failed health check probe", "name", kmsName)
return err
}
return nil
}
primeAndProbeKMSv2(ctx, probe, kmsName)
// on the happy path where the plugin is healthy and available on server start,
// prime keyID and DEK by running the check inline once (this also prevents unit tests from flaking)
// ignore the error here since we want to support the plugin starting up async with the API server
_ = runProbeCheckAndLog(ctx)
// make sure that the plugin's key ID is reasonably up-to-date
// also, make sure that our DEK is up-to-date to with said key ID (if it expires the server will fail all writes)
// if this background loop ever stops running, the server will become unfunctional after kmsv2PluginWriteDEKMaxTTL
go wait.PollUntilWithContext(
ctx,
kmsv2PluginHealthzPositiveInterval,
func(ctx context.Context) (bool, error) {
if err := runProbeCheckAndLog(ctx); err == nil {
return false, nil
}
// TODO add integration test for quicker error poll on failure
// if we fail, block the outer polling and start a new quicker poll inline
// this limits the chance that our DEK expires during a transient failure
_ = wait.PollUntilWithContext(
ctx,
kmsv2PluginHealthzNegativeInterval,
func(ctx context.Context) (bool, error) {
return runProbeCheckAndLog(ctx) == nil, nil
},
)
return false, nil
})
// using AES-GCM by default for encrypting data with KMSv2
transformer := value.PrefixTransformer{
transformer := storagevalue.PrefixTransformer{
Transformer: envelopekmsv2.NewEnvelopeTransformer(envelopeService, kmsName, probe.getCurrentState),
Prefix: []byte(kmsTransformerPrefixV2 + kmsName + ":"),
}
@ -759,12 +745,62 @@ func kmsPrefixTransformer(ctx context.Context, config *apiserverconfig.KMSConfig
}, nil
default:
return value.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMS plugin %q, unsupported KMS API version %q", kmsName, config.APIVersion)
return storagevalue.PrefixTransformer{}, nil, nil, fmt.Errorf("could not configure KMS plugin %q, unsupported KMS API version %q", kmsName, config.APIVersion)
}
}
func envelopePrefixTransformer(config *apiserverconfig.KMSConfiguration, envelopeService envelope.Service, prefix string) value.PrefixTransformer {
baseTransformerFunc := func(block cipher.Block) (value.Transformer, error) {
func primeAndProbeKMSv2(ctx context.Context, probe *kmsv2PluginProbe, kmsName string) {
runProbeCheckAndLog := func(ctx context.Context, depth int) error {
if err := probe.check(ctx); err != nil {
klog.VDepth(1+depth, 2).ErrorS(err, "kms plugin failed health check probe", "name", kmsName)
return err
}
return nil
}
blockAndProbeFastUntilSuccess := func(ctx context.Context) {
_ = wait.PollUntilWithContext(
ctx,
kmsv2PluginHealthzNegativeInterval,
func(ctx context.Context) (bool, error) {
return runProbeCheckAndLog(ctx, 1) == nil, nil
},
)
}
// on the happy path where the plugin is healthy and available on server start,
// prime keyID and DEK by running the check inline once (this also prevents unit tests from flaking)
errPrime := runProbeCheckAndLog(ctx, 0)
// if our initial attempt to prime failed, start trying to get to a valid state in the background ASAP
// this prevents a slow start when the external healthz checker is configured to ignore the KMS healthz endpoint
// since we want to support the plugin starting up async with the API server, this error is not fatal
if errPrime != nil {
go blockAndProbeFastUntilSuccess(ctx) // separate go routine to avoid blocking
}
// make sure that the plugin's key ID is reasonably up-to-date
// also, make sure that our DEK is up-to-date to with said key ID (if it expires the server will fail all writes)
// if this background loop ever stops running, the server will become unfunctional after kmsv2PluginWriteDEKSourceMaxTTL
go wait.PollUntilWithContext(
ctx,
kmsv2PluginHealthzPositiveInterval,
func(ctx context.Context) (bool, error) {
if err := runProbeCheckAndLog(ctx, 0); err == nil {
return false, nil
}
// TODO add integration test for quicker error poll on failure
// if we fail, block the outer polling and start a new quicker poll inline
// this limits the chance that our DEK expires during a transient failure
blockAndProbeFastUntilSuccess(ctx)
return false, nil
})
}
func envelopePrefixTransformer(config *apiserverconfig.KMSConfiguration, envelopeService envelope.Service, prefix string) storagevalue.PrefixTransformer {
baseTransformerFunc := func(block cipher.Block) (storagevalue.Transformer, error) {
gcm, err := aestransformer.NewGCMTransformer(block)
if err != nil {
return nil, err
@ -777,15 +813,15 @@ func envelopePrefixTransformer(config *apiserverconfig.KMSConfiguration, envelop
return unionTransformers{gcm, aestransformer.NewCBCTransformer(block)}, nil
}
return value.PrefixTransformer{
return storagevalue.PrefixTransformer{
Transformer: envelope.NewEnvelopeTransformer(envelopeService, int(*config.CacheSize), baseTransformerFunc),
Prefix: []byte(prefix + config.Name + ":"),
}
}
type unionTransformers []value.Transformer
type unionTransformers []storagevalue.Transformer
func (u unionTransformers) TransformFromStorage(ctx context.Context, data []byte, dataCtx value.Context) (out []byte, stale bool, err error) {
func (u unionTransformers) TransformFromStorage(ctx context.Context, data []byte, dataCtx storagevalue.Context) (out []byte, stale bool, err error) {
var errs []error
for i := range u {
transformer := u[i]
@ -804,7 +840,7 @@ func (u unionTransformers) TransformFromStorage(ctx context.Context, data []byte
return nil, false, fmt.Errorf("unionTransformers: unable to transform from storage")
}
func (u unionTransformers) TransformToStorage(ctx context.Context, data []byte, dataCtx value.Context) (out []byte, err error) {
func (u unionTransformers) TransformToStorage(ctx context.Context, data []byte, dataCtx storagevalue.Context) (out []byte, err error) {
return u[0].TransformToStorage(ctx, data, dataCtx)
}
@ -815,7 +851,7 @@ func computeEncryptionConfigHash(data []byte) string {
return fmt.Sprintf("%x", sha256.Sum256(data))
}
var _ ResourceTransformers = &DynamicTransformers{}
var _ storagevalue.ResourceTransformers = &DynamicTransformers{}
var _ healthz.HealthChecker = &DynamicTransformers{}
// DynamicTransformers holds transformers that may be dynamically updated via a single external actor, likely a controller.
@ -825,7 +861,7 @@ type DynamicTransformers struct {
}
type transformTracker struct {
transformerOverrides map[schema.GroupResource]value.Transformer
transformerOverrides map[schema.GroupResource]storagevalue.Transformer
kmsPluginHealthzCheck healthz.HealthChecker
closeTransformers context.CancelFunc
kmsCloseGracePeriod time.Duration
@ -833,7 +869,7 @@ type transformTracker struct {
// NewDynamicTransformers returns transformers, health checks for kms providers and an ability to close transformers.
func NewDynamicTransformers(
transformerOverrides map[schema.GroupResource]value.Transformer,
transformerOverrides map[schema.GroupResource]storagevalue.Transformer,
kmsPluginHealthzCheck healthz.HealthChecker,
closeTransformers context.CancelFunc,
kmsCloseGracePeriod time.Duration,
@ -864,7 +900,7 @@ func (d *DynamicTransformers) Name() string {
}
// TransformerForResource returns the transformer for the given resource.
func (d *DynamicTransformers) TransformerForResource(resource schema.GroupResource) value.Transformer {
func (d *DynamicTransformers) TransformerForResource(resource schema.GroupResource) storagevalue.Transformer {
return &resourceTransformer{
resource: resource,
transformTracker: d.transformTracker,
@ -873,7 +909,7 @@ func (d *DynamicTransformers) TransformerForResource(resource schema.GroupResour
// Set sets the transformer overrides. This method is not go routine safe and must only be called by the same, single caller throughout the lifetime of this object.
func (d *DynamicTransformers) Set(
transformerOverrides map[schema.GroupResource]value.Transformer,
transformerOverrides map[schema.GroupResource]storagevalue.Transformer,
closeTransformers context.CancelFunc,
kmsPluginHealthzCheck healthz.HealthChecker,
kmsCloseGracePeriod time.Duration,
@ -898,34 +934,30 @@ func (d *DynamicTransformers) Set(
}()
}
var _ value.Transformer = &resourceTransformer{}
var _ storagevalue.Transformer = &resourceTransformer{}
type resourceTransformer struct {
resource schema.GroupResource
transformTracker *atomic.Value
}
func (r *resourceTransformer) TransformFromStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, bool, error) {
func (r *resourceTransformer) TransformFromStorage(ctx context.Context, data []byte, dataCtx storagevalue.Context) ([]byte, bool, error) {
return r.transformer().TransformFromStorage(ctx, data, dataCtx)
}
func (r *resourceTransformer) TransformToStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, error) {
func (r *resourceTransformer) TransformToStorage(ctx context.Context, data []byte, dataCtx storagevalue.Context) ([]byte, error) {
return r.transformer().TransformToStorage(ctx, data, dataCtx)
}
func (r *resourceTransformer) transformer() value.Transformer {
func (r *resourceTransformer) transformer() storagevalue.Transformer {
return transformerFromOverrides(r.transformTracker.Load().(*transformTracker).transformerOverrides, r.resource)
}
type ResourceTransformers interface {
TransformerForResource(resource schema.GroupResource) value.Transformer
}
var _ storagevalue.ResourceTransformers = &StaticTransformers{}
var _ ResourceTransformers = &StaticTransformers{}
type StaticTransformers map[schema.GroupResource]storagevalue.Transformer
type StaticTransformers map[schema.GroupResource]value.Transformer
func (s StaticTransformers) TransformerForResource(resource schema.GroupResource) value.Transformer {
func (s StaticTransformers) TransformerForResource(resource schema.GroupResource) storagevalue.Transformer {
return transformerFromOverrides(s, resource)
}
@ -934,7 +966,7 @@ var anyGroupAnyResource = schema.GroupResource{
Resource: "*",
}
func transformerFromOverrides(transformerOverrides map[schema.GroupResource]value.Transformer, resource schema.GroupResource) value.Transformer {
func transformerFromOverrides(transformerOverrides map[schema.GroupResource]storagevalue.Transformer, resource schema.GroupResource) storagevalue.Transformer {
if transformer := transformerOverrides[resource]; transformer != nil {
return transformer
}

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/apiserver/pkg/server/options/encryptionconfig"
"k8s.io/apiserver/pkg/server/options/encryptionconfig/metrics"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
)
@ -163,16 +164,19 @@ func (d *DynamicKMSEncryptionConfigContent) processNextWorkItem(serverCtx contex
ctx, closeTransformers := context.WithCancel(serverCtx)
defer func() {
// TODO: increment success metric when updatedEffectiveConfig=true
// TODO can work queue metrics help here?
if !updatedEffectiveConfig {
// avoid leaking if we're not using the newly constructed transformers (due to an error or them not being changed)
closeTransformers()
}
if updatedEffectiveConfig && err == nil {
metrics.RecordEncryptionConfigAutomaticReloadSuccess()
}
if err != nil {
// TODO: increment failure metric
metrics.RecordEncryptionConfigAutomaticReloadFailure()
utilruntime.HandleError(fmt.Errorf("error processing encryption config file %s: %v", d.filePath, err))
// add dummy item back to the queue to trigger file content processing.
d.queue.AddRateLimited(key)

View File

@ -0,0 +1,86 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"sync"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
)
const (
namespace = "apiserver"
subsystem = "encryption_config_controller"
)
var (
encryptionConfigAutomaticReloadFailureTotal = metrics.NewCounter(
&metrics.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "automatic_reload_failures_total",
Help: "Total number of failed automatic reloads of encryption configuration.",
StabilityLevel: metrics.ALPHA,
},
)
encryptionConfigAutomaticReloadSuccessTotal = metrics.NewCounter(
&metrics.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "automatic_reload_success_total",
Help: "Total number of successful automatic reloads of encryption configuration.",
StabilityLevel: metrics.ALPHA,
},
)
encryptionConfigAutomaticReloadLastTimestampSeconds = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "automatic_reload_last_timestamp_seconds",
Help: "Timestamp of the last successful or failed automatic reload of encryption configuration.",
StabilityLevel: metrics.ALPHA,
},
[]string{"status"},
)
)
var registerMetrics sync.Once
func RegisterMetrics() {
registerMetrics.Do(func() {
legacyregistry.MustRegister(encryptionConfigAutomaticReloadFailureTotal)
legacyregistry.MustRegister(encryptionConfigAutomaticReloadSuccessTotal)
legacyregistry.MustRegister(encryptionConfigAutomaticReloadLastTimestampSeconds)
})
}
func RecordEncryptionConfigAutomaticReloadFailure() {
encryptionConfigAutomaticReloadFailureTotal.Inc()
recordEncryptionConfigAutomaticReloadTimestamp("failure")
}
func RecordEncryptionConfigAutomaticReloadSuccess() {
encryptionConfigAutomaticReloadSuccessTotal.Inc()
recordEncryptionConfigAutomaticReloadTimestamp("success")
}
func recordEncryptionConfigAutomaticReloadTimestamp(result string) {
encryptionConfigAutomaticReloadLastTimestampSeconds.WithLabelValues(result).SetToCurrentTime()
}

View File

@ -36,9 +36,10 @@ import (
"k8s.io/apiserver/pkg/server/options/encryptionconfig"
encryptionconfigcontroller "k8s.io/apiserver/pkg/server/options/encryptionconfig/controller"
serverstorage "k8s.io/apiserver/pkg/server/storage"
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
"k8s.io/apiserver/pkg/storage/storagebackend"
storagefactory "k8s.io/apiserver/pkg/storage/storagebackend/factory"
flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request"
storagevalue "k8s.io/apiserver/pkg/storage/value"
"k8s.io/klog/v2"
)
@ -64,11 +65,6 @@ type EtcdOptions struct {
// WatchCacheSizes represents override to a given resource
WatchCacheSizes []string
// complete guards fields that must be initialized via Complete before the Apply methods can be used.
complete bool
resourceTransformers encryptionconfig.ResourceTransformers
kmsPluginHealthzChecks []healthz.HealthChecker
// SkipHealthEndpoints, when true, causes the Apply methods to not set up health endpoints.
// This allows multiple invocations of the Apply methods without duplication of said endpoints.
SkipHealthEndpoints bool
@ -212,92 +208,18 @@ func (s *EtcdOptions) AddFlags(fs *pflag.FlagSet) {
"The time in seconds that each lease is reused. A lower value could avoid large number of objects reusing the same lease. Notice that a too small value may cause performance problems at storage layer.")
}
// Complete must be called exactly once before using any of the Apply methods. It is responsible for setting
// up objects that must be created once and reused across multiple invocations such as storage transformers.
// This method mutates the receiver (EtcdOptions). It must never mutate the inputs.
func (s *EtcdOptions) Complete(
storageObjectCountTracker flowcontrolrequest.StorageObjectCountTracker,
stopCh <-chan struct{},
addPostStartHook func(name string, hook server.PostStartHookFunc) error,
) error {
if s == nil {
return nil
}
if s.complete {
return fmt.Errorf("EtcdOptions.Complete called more than once")
}
if len(s.EncryptionProviderConfigFilepath) != 0 {
ctxServer := wait.ContextForChannel(stopCh)
// nolint:govet // The only code path where closeTransformers does not get called is when it gets stored in dynamicTransformers.
ctxTransformers, closeTransformers := context.WithCancel(ctxServer)
encryptionConfiguration, err := encryptionconfig.LoadEncryptionConfig(ctxTransformers, s.EncryptionProviderConfigFilepath, s.EncryptionProviderConfigAutomaticReload)
if err != nil {
// in case of error, we want to close partially initialized (if any) transformers
closeTransformers()
return err
}
// enable kms hot reload controller only if the config file is set to be automatically reloaded
if s.EncryptionProviderConfigAutomaticReload {
// with reload=true we will always have 1 health check
if len(encryptionConfiguration.HealthChecks) != 1 {
// in case of error, we want to close partially initialized (if any) transformers
closeTransformers()
return fmt.Errorf("failed to start kms encryption config hot reload controller. only 1 health check should be available when reload is enabled")
}
// Here the dynamic transformers take ownership of the transformers and their cancellation.
dynamicTransformers := encryptionconfig.NewDynamicTransformers(encryptionConfiguration.Transformers, encryptionConfiguration.HealthChecks[0], closeTransformers, encryptionConfiguration.KMSCloseGracePeriod)
// add post start hook to start hot reload controller
// adding this hook here will ensure that it gets configured exactly once
err = addPostStartHook(
"start-encryption-provider-config-automatic-reload",
func(_ server.PostStartHookContext) error {
dynamicEncryptionConfigController := encryptionconfigcontroller.NewDynamicEncryptionConfiguration(
"encryption-provider-config-automatic-reload-controller",
s.EncryptionProviderConfigFilepath,
dynamicTransformers,
encryptionConfiguration.EncryptionFileContentHash,
)
go dynamicEncryptionConfigController.Run(ctxServer)
return nil
},
)
if err != nil {
// in case of error, we want to close partially initialized (if any) transformers
closeTransformers()
return fmt.Errorf("failed to add post start hook for kms encryption config hot reload controller: %w", err)
}
s.resourceTransformers = dynamicTransformers
s.kmsPluginHealthzChecks = []healthz.HealthChecker{dynamicTransformers}
} else {
s.resourceTransformers = encryptionconfig.StaticTransformers(encryptionConfiguration.Transformers)
s.kmsPluginHealthzChecks = encryptionConfiguration.HealthChecks
}
}
s.StorageConfig.StorageObjectCountTracker = storageObjectCountTracker
s.complete = true
// nolint:govet // The only code path where closeTransformers does not get called is when it gets stored in dynamicTransformers.
return nil
}
// ApplyTo mutates the provided server.Config. It must never mutate the receiver (EtcdOptions).
func (s *EtcdOptions) ApplyTo(c *server.Config) error {
if s == nil {
return nil
}
return s.ApplyWithStorageFactoryTo(&SimpleStorageFactory{StorageConfig: s.StorageConfig}, c)
storageConfigCopy := s.StorageConfig
if storageConfigCopy.StorageObjectCountTracker == nil {
storageConfigCopy.StorageObjectCountTracker = c.StorageObjectCountTracker
}
return s.ApplyWithStorageFactoryTo(&SimpleStorageFactory{StorageConfig: storageConfigCopy}, c)
}
// ApplyWithStorageFactoryTo mutates the provided server.Config. It must never mutate the receiver (EtcdOptions).
@ -306,24 +228,118 @@ func (s *EtcdOptions) ApplyWithStorageFactoryTo(factory serverstorage.StorageFac
return nil
}
if !s.complete {
return fmt.Errorf("EtcdOptions.Apply called without completion")
}
if !s.SkipHealthEndpoints {
if err := s.addEtcdHealthEndpoint(c); err != nil {
return err
}
}
if s.resourceTransformers != nil {
// setup encryption
if err := s.maybeApplyResourceTransformers(c); err != nil {
return err
}
metrics.SetStorageMonitorGetter(monitorGetter(factory))
c.RESTOptionsGetter = s.CreateRESTOptionsGetter(factory, c.ResourceTransformers)
return nil
}
func monitorGetter(factory serverstorage.StorageFactory) func() (monitors []metrics.Monitor, err error) {
return func() (monitors []metrics.Monitor, err error) {
defer func() {
if err != nil {
for _, m := range monitors {
m.Close()
}
}
}()
var m metrics.Monitor
for _, cfg := range factory.Configs() {
m, err = storagefactory.CreateMonitor(cfg)
if err != nil {
return nil, err
}
monitors = append(monitors, m)
}
return monitors, nil
}
}
func (s *EtcdOptions) CreateRESTOptionsGetter(factory serverstorage.StorageFactory, resourceTransformers storagevalue.ResourceTransformers) generic.RESTOptionsGetter {
if resourceTransformers != nil {
factory = &transformerStorageFactory{
delegate: factory,
resourceTransformers: s.resourceTransformers,
resourceTransformers: resourceTransformers,
}
}
return &StorageFactoryRestOptionsFactory{Options: *s, StorageFactory: factory}
}
func (s *EtcdOptions) maybeApplyResourceTransformers(c *server.Config) (err error) {
if c.ResourceTransformers != nil {
return nil
}
if len(s.EncryptionProviderConfigFilepath) == 0 {
return nil
}
ctxServer := wait.ContextForChannel(c.DrainedNotify())
ctxTransformers, closeTransformers := context.WithCancel(ctxServer)
defer func() {
// in case of error, we want to close partially initialized (if any) transformers
if err != nil {
closeTransformers()
}
}()
encryptionConfiguration, err := encryptionconfig.LoadEncryptionConfig(ctxTransformers, s.EncryptionProviderConfigFilepath, s.EncryptionProviderConfigAutomaticReload)
if err != nil {
return err
}
if s.EncryptionProviderConfigAutomaticReload {
// with reload=true we will always have 1 health check
if len(encryptionConfiguration.HealthChecks) != 1 {
return fmt.Errorf("failed to start kms encryption config hot reload controller. only 1 health check should be available when reload is enabled")
}
// Here the dynamic transformers take ownership of the transformers and their cancellation.
dynamicTransformers := encryptionconfig.NewDynamicTransformers(encryptionConfiguration.Transformers, encryptionConfiguration.HealthChecks[0], closeTransformers, encryptionConfiguration.KMSCloseGracePeriod)
// add post start hook to start hot reload controller
// adding this hook here will ensure that it gets configured exactly once
err = c.AddPostStartHook(
"start-encryption-provider-config-automatic-reload",
func(_ server.PostStartHookContext) error {
dynamicEncryptionConfigController := encryptionconfigcontroller.NewDynamicEncryptionConfiguration(
"encryption-provider-config-automatic-reload-controller",
s.EncryptionProviderConfigFilepath,
dynamicTransformers,
encryptionConfiguration.EncryptionFileContentHash,
)
go dynamicEncryptionConfigController.Run(ctxServer)
return nil
},
)
if err != nil {
return fmt.Errorf("failed to add post start hook for kms encryption config hot reload controller: %w", err)
}
c.ResourceTransformers = dynamicTransformers
if !s.SkipHealthEndpoints {
c.AddHealthChecks(dynamicTransformers)
}
} else {
c.ResourceTransformers = encryptionconfig.StaticTransformers(encryptionConfiguration.Transformers)
if !s.SkipHealthEndpoints {
c.AddHealthChecks(encryptionConfiguration.HealthChecks...)
}
}
c.RESTOptionsGetter = &StorageFactoryRestOptionsFactory{Options: *s, StorageFactory: factory}
return nil
}
@ -344,8 +360,6 @@ func (s *EtcdOptions) addEtcdHealthEndpoint(c *server.Config) error {
return readyCheck()
}))
c.AddHealthChecks(s.kmsPluginHealthzChecks...)
return nil
}
@ -457,7 +471,7 @@ var _ serverstorage.StorageFactory = &transformerStorageFactory{}
type transformerStorageFactory struct {
delegate serverstorage.StorageFactory
resourceTransformers encryptionconfig.ResourceTransformers
resourceTransformers storagevalue.ResourceTransformers
}
func (t *transformerStorageFactory) NewConfig(resource schema.GroupResource) (*storagebackend.ConfigForResource, error) {

View File

@ -20,7 +20,6 @@ import (
"fmt"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/features"
@ -28,6 +27,7 @@ import (
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/apiserver/pkg/util/feature"
utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/component-base/featuregate"
"k8s.io/klog/v2"
@ -101,9 +101,6 @@ func (o *RecommendedOptions) AddFlags(fs *pflag.FlagSet) {
// ApplyTo adds RecommendedOptions to the server configuration.
// pluginInitializers can be empty, it is only need for additional initializers.
func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error {
if err := o.Etcd.Complete(config.Config.StorageObjectCountTracker, config.Config.DrainedNotify(), config.Config.AddPostStartHook); err != nil {
return err
}
if err := o.Etcd.ApplyTo(&config.Config); err != nil {
return err
}
@ -131,9 +128,20 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error {
if err := o.CoreAPI.ApplyTo(config); err != nil {
return err
}
if initializers, err := o.ExtraAdmissionInitializers(config); err != nil {
initializers, err := o.ExtraAdmissionInitializers(config)
if err != nil {
return err
} else if err := o.Admission.ApplyTo(&config.Config, config.SharedInformerFactory, config.ClientConfig, o.FeatureGate, initializers...); err != nil {
}
kubeClient, err := kubernetes.NewForConfig(config.ClientConfig)
if err != nil {
return err
}
dynamicClient, err := dynamic.NewForConfig(config.ClientConfig)
if err != nil {
return err
}
if err := o.Admission.ApplyTo(&config.Config, config.SharedInformerFactory, kubeClient, dynamicClient, o.FeatureGate,
initializers...); err != nil {
return err
}
if feature.DefaultFeatureGate.Enabled(features.APIPriorityAndFairness) {

View File

@ -153,7 +153,7 @@ func (s *SecureServingOptions) AddFlags(fs *pflag.FlagSet) {
fs.IPVar(&s.BindAddress, "bind-address", s.BindAddress, ""+
"The IP address on which to listen for the --secure-port port. The "+
"associated interface(s) must be reachable by the rest of the cluster, and by CLI/web "+
"clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used.")
"clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces and IP address families will be used.")
desc := "The port on which to serve HTTPS with authentication and authorization."
if s.Required {

View File

@ -22,6 +22,7 @@ import (
cachermetrics "k8s.io/apiserver/pkg/storage/cacher/metrics"
etcd3metrics "k8s.io/apiserver/pkg/storage/etcd3/metrics"
flowcontrolmetrics "k8s.io/apiserver/pkg/util/flowcontrol/metrics"
peerproxymetrics "k8s.io/apiserver/pkg/util/peerproxy/metrics"
"k8s.io/component-base/metrics/legacyregistry"
)
@ -50,4 +51,5 @@ func register() {
cachermetrics.Register()
etcd3metrics.Register()
flowcontrolmetrics.Register()
peerproxymetrics.Register()
}

View File

@ -43,10 +43,7 @@ func (oa OpenAPI) InstallV2(c *restful.Container, mux *mux.PathRecorderMux) (*ha
}
spec.Definitions = handler.PruneDefaults(spec.Definitions)
openAPIVersionedService := handler.NewOpenAPIService(spec)
err = openAPIVersionedService.RegisterOpenAPIVersionedService("/openapi/v2", mux)
if err != nil {
klog.Fatalf("Failed to register versioned open api spec for root: %v", err)
}
openAPIVersionedService.RegisterOpenAPIVersionedService("/openapi/v2", mux)
return openAPIVersionedService, spec
}

View File

@ -291,28 +291,17 @@ func Configs(storageConfig storagebackend.Config) []storagebackend.Config {
// Returns all storage configurations including those for group resource overrides
func configs(storageConfig storagebackend.Config, grOverrides map[schema.GroupResource]groupResourceOverrides) []storagebackend.Config {
locations := sets.NewString()
configs := []storagebackend.Config{}
for _, loc := range storageConfig.Transport.ServerList {
// copy
newConfig := storageConfig
newConfig.Transport.ServerList = []string{loc}
configs = append(configs, newConfig)
locations.Insert(loc)
}
configs := []storagebackend.Config{storageConfig}
for _, override := range grOverrides {
for _, loc := range override.etcdLocation {
if locations.Has(loc) {
continue
}
// copy
newConfig := storageConfig
override.Apply(&newConfig, &StorageCodecConfig{})
newConfig.Transport.ServerList = []string{loc}
configs = append(configs, newConfig)
locations.Insert(loc)
if len(override.etcdLocation) == 0 {
continue
}
// copy
newConfig := storageConfig
override.Apply(&newConfig, &StorageCodecConfig{})
newConfig.Transport.ServerList = override.etcdLocation
configs = append(configs, newConfig)
}
return configs
}