rebase: update kubernetes to 1.26.1

update kubernetes and its dependencies
to v1.26.1

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2023-02-01 18:06:36 +01:00
committed by mergify[bot]
parent e9e33fb851
commit 9c8de9471e
937 changed files with 75539 additions and 33050 deletions

View File

@ -29,6 +29,9 @@ import (
"google.golang.org/grpc"
"k8s.io/klog/v2"
"sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics"
commonmetrics "sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics"
"sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client"
)
@ -131,6 +134,9 @@ type grpcTunnel struct {
// closing should only be accessed through atomic methods.
// TODO: switch this to an atomic.Bool once the client is exclusively buit with go1.19+
closing uint32
// Stores the current metrics.ClientConnectionStatus
prevStatus atomic.Value
}
type clientConn interface {
@ -139,6 +145,11 @@ type clientConn interface {
var _ clientConn = &grpc.ClientConn{}
var (
// Expose metrics for client to register.
Metrics = metrics.Metrics
)
// CreateSingleUseGrpcTunnel creates a Tunnel to dial to a remote server through a
// gRPC based proxy service.
// Currently, a single tunnel supports a single connection, and the tunnel is closed when the connection is terminated
@ -177,7 +188,7 @@ func CreateSingleUseGrpcTunnelWithContext(createCtx, tunnelCtx context.Context,
}
func newUnstartedTunnel(stream client.ProxyService_ProxyClient, c clientConn) *grpcTunnel {
return &grpcTunnel{
t := grpcTunnel{
stream: stream,
clientConn: c,
pendingDial: pendingDialManager{pendingDials: make(map[int64]pendingDial)},
@ -185,6 +196,36 @@ func newUnstartedTunnel(stream client.ProxyService_ProxyClient, c clientConn) *g
readTimeoutSeconds: 10,
done: make(chan struct{}),
}
s := metrics.ClientConnectionStatusCreated
t.prevStatus.Store(s)
metrics.Metrics.GetClientConnectionsMetric().WithLabelValues(string(s)).Inc()
return &t
}
func (t *grpcTunnel) updateMetric(status metrics.ClientConnectionStatus) {
select {
case <-t.Done():
return
default:
}
prevStatus := t.prevStatus.Swap(status).(metrics.ClientConnectionStatus)
m := metrics.Metrics.GetClientConnectionsMetric()
m.WithLabelValues(string(prevStatus)).Dec()
m.WithLabelValues(string(status)).Inc()
}
// closeMetric should be called exactly once to finalize client_connections metric.
func (t *grpcTunnel) closeMetric() {
select {
case <-t.Done():
return
default:
}
prevStatus := t.prevStatus.Load().(metrics.ClientConnectionStatus)
metrics.Metrics.GetClientConnectionsMetric().WithLabelValues(string(prevStatus)).Dec()
}
func (t *grpcTunnel) serve(tunnelCtx context.Context) {
@ -196,19 +237,29 @@ func (t *grpcTunnel) serve(tunnelCtx context.Context) {
// close any channels remaining for these connections.
t.conns.closeAll()
t.closeMetric()
close(t.done)
}()
for {
pkt, err := t.stream.Recv()
if err == io.EOF || t.isClosing() {
if err == io.EOF {
return
}
const segment = commonmetrics.SegmentToClient
isClosing := t.isClosing()
if err != nil || pkt == nil {
klog.ErrorS(err, "stream read failure")
if !isClosing {
klog.ErrorS(err, "stream read failure")
}
metrics.Metrics.ObserveStreamErrorNoPacket(segment, err)
return
}
metrics.Metrics.ObservePacket(segment, pkt.Type)
if isClosing {
return
}
klog.V(5).InfoS("[tracing] recv packet", "type", pkt.Type)
switch pkt.Type {
@ -222,13 +273,19 @@ func (t *grpcTunnel) serve(tunnelCtx context.Context) {
// 2. grpcTunnel.DialContext() returned early due to a dial timeout or the client canceling the context
//
// In either scenario, we should return here and close the tunnel as it is no longer needed.
klog.V(1).InfoS("DialResp not recognized; dropped", "connectionID", resp.ConnectID, "dialID", resp.Random)
kvs := []interface{}{"dialID", resp.Random, "connectID", resp.ConnectID}
if resp.Error != "" {
kvs = append(kvs, "error", resp.Error)
}
klog.V(1).InfoS("DialResp not recognized; dropped", kvs...)
return
}
result := dialResult{connid: resp.ConnectID}
if resp.Error != "" {
result.err = &dialFailure{resp.Error, DialFailureEndpoint}
result.err = &dialFailure{resp.Error, metrics.DialFailureEndpoint}
} else {
t.updateMetric(metrics.ClientConnectionStatusOk)
}
select {
// try to send to the result channel
@ -263,7 +320,7 @@ func (t *grpcTunnel) serve(tunnelCtx context.Context) {
klog.V(1).InfoS("DIAL_CLS after dial finished", "dialID", resp.Random)
} else {
result := dialResult{
err: &dialFailure{"dial closed", DialFailureDialClosed},
err: &dialFailure{"dial closed", metrics.DialFailureDialClosed},
}
select {
case pendingDial.resultCh <- result:
@ -316,6 +373,15 @@ func (t *grpcTunnel) serve(tunnelCtx context.Context) {
// Dial connects to the address on the named network, similar to
// what net.Dial does. The only supported protocol is tcp.
func (t *grpcTunnel) DialContext(requestCtx context.Context, protocol, address string) (net.Conn, error) {
conn, err := t.dialContext(requestCtx, protocol, address)
if err != nil {
_, reason := GetDialFailureReason(err)
metrics.Metrics.ObserveDialFailure(reason)
}
return conn, err
}
func (t *grpcTunnel) dialContext(requestCtx context.Context, protocol, address string) (net.Conn, error) {
select {
case <-t.done:
return nil, errors.New("tunnel is closed")
@ -326,6 +392,8 @@ func (t *grpcTunnel) DialContext(requestCtx context.Context, protocol, address s
return nil, errors.New("protocol not supported")
}
t.updateMetric(metrics.ClientConnectionStatusDialing)
random := rand.Int63() /* #nosec G404 */
// This channel is closed once we're returning and no longer waiting on resultCh
@ -350,8 +418,11 @@ func (t *grpcTunnel) DialContext(requestCtx context.Context, protocol, address s
}
klog.V(5).InfoS("[tracing] send packet", "type", req.Type)
const segment = commonmetrics.SegmentFromClient
metrics.Metrics.ObservePacket(segment, req.Type)
err := t.stream.Send(req)
if err != nil {
metrics.Metrics.ObserveStreamError(segment, err, req.Type)
return nil, err
}
@ -375,14 +446,14 @@ func (t *grpcTunnel) DialContext(requestCtx context.Context, protocol, address s
case <-time.After(30 * time.Second):
klog.V(5).InfoS("Timed out waiting for DialResp", "dialID", random)
go t.closeDial(random)
return nil, &dialFailure{"dial timeout, backstop", DialFailureTimeout}
return nil, &dialFailure{"dial timeout, backstop", metrics.DialFailureTimeout}
case <-requestCtx.Done():
klog.V(5).InfoS("Context canceled waiting for DialResp", "ctxErr", requestCtx.Err(), "dialID", random)
go t.closeDial(random)
return nil, &dialFailure{"dial timeout, context", DialFailureContext}
return nil, &dialFailure{"dial timeout, context", metrics.DialFailureContext}
case <-t.done:
klog.V(5).InfoS("Tunnel closed while waiting for DialResp", "dialID", random)
return nil, &dialFailure{"tunnel closed", DialFailureTunnelClosed}
return nil, &dialFailure{"tunnel closed", metrics.DialFailureTunnelClosed}
}
return c, nil
@ -402,7 +473,10 @@ func (t *grpcTunnel) closeDial(dialID int64) {
},
},
}
const segment = commonmetrics.SegmentFromClient
metrics.Metrics.ObservePacket(segment, req.Type)
if err := t.stream.Send(req); err != nil {
metrics.Metrics.ObserveStreamError(segment, err, req.Type)
klog.V(5).InfoS("Failed to send DIAL_CLS", "err", err, "dialID", dialID)
}
t.closeTunnel()
@ -417,38 +491,19 @@ func (t *grpcTunnel) isClosing() bool {
return atomic.LoadUint32(&t.closing) != 0
}
func GetDialFailureReason(err error) (isDialFailure bool, reason DialFailureReason) {
func GetDialFailureReason(err error) (isDialFailure bool, reason metrics.DialFailureReason) {
var df *dialFailure
if errors.As(err, &df) {
return true, df.reason
}
return false, DialFailureUnknown
return false, metrics.DialFailureUnknown
}
type dialFailure struct {
msg string
reason DialFailureReason
reason metrics.DialFailureReason
}
func (df *dialFailure) Error() string {
return df.msg
}
type DialFailureReason string
const (
DialFailureUnknown DialFailureReason = "unknown"
// DialFailureTimeout indicates the hard 30 second timeout was hit.
DialFailureTimeout DialFailureReason = "timeout"
// DialFailureContext indicates that the context was cancelled or reached it's deadline before
// the dial response was returned.
DialFailureContext DialFailureReason = "context"
// DialFailureEndpoint indicates that the konnectivity-agent was unable to reach the backend endpoint.
DialFailureEndpoint DialFailureReason = "endpoint"
// DialFailureDialClosed indicates that the client received a CloseDial response, indicating the
// connection was closed before the dial could complete.
DialFailureDialClosed DialFailureReason = "dialclosed"
// DialFailureTunnelClosed indicates that the client connection was closed before the dial could
// complete.
DialFailureTunnelClosed DialFailureReason = "tunnelclosed"
)

View File

@ -23,6 +23,9 @@ import (
"time"
"k8s.io/klog/v2"
"sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics"
commonmetrics "sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics"
"sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client"
)
@ -62,8 +65,11 @@ func (c *conn) Write(data []byte) (n int, err error) {
klog.V(5).InfoS("[tracing] send req", "type", req.Type)
const segment = commonmetrics.SegmentFromClient
metrics.Metrics.ObservePacket(segment, req.Type)
err = c.stream.Send(req)
if err != nil {
metrics.Metrics.ObserveStreamError(segment, err, req.Type)
return 0, err
}
return len(data), err
@ -147,7 +153,10 @@ func (c *conn) Close() error {
klog.V(5).InfoS("[tracing] send req", "type", req.Type)
const segment = commonmetrics.SegmentFromClient
metrics.Metrics.ObservePacket(segment, req.Type)
if err := c.stream.Send(req); err != nil {
metrics.Metrics.ObserveStreamError(segment, err, req.Type)
return err
}

View File

@ -0,0 +1,162 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
commonmetrics "sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics"
"sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client"
)
const (
Namespace = "konnectivity_network_proxy"
Subsystem = "client"
)
var (
// Metrics provides access to all client metrics. The client
// application is responsible for registering (via Metrics.RegisterMetrics).
Metrics = newMetrics()
)
// ClientMetrics includes all the metrics of the konnectivity-client.
type ClientMetrics struct {
registerOnce sync.Once
streamPackets *prometheus.CounterVec
streamErrors *prometheus.CounterVec
dialFailures *prometheus.CounterVec
clientConns *prometheus.GaugeVec
}
type DialFailureReason string
const (
DialFailureUnknown DialFailureReason = "unknown"
// DialFailureTimeout indicates the hard 30 second timeout was hit.
DialFailureTimeout DialFailureReason = "timeout"
// DialFailureContext indicates that the context was cancelled or reached it's deadline before
// the dial response was returned.
DialFailureContext DialFailureReason = "context"
// DialFailureEndpoint indicates that the konnectivity-agent was unable to reach the backend endpoint.
DialFailureEndpoint DialFailureReason = "endpoint"
// DialFailureDialClosed indicates that the client received a CloseDial response, indicating the
// connection was closed before the dial could complete.
DialFailureDialClosed DialFailureReason = "dialclosed"
// DialFailureTunnelClosed indicates that the client connection was closed before the dial could
// complete.
DialFailureTunnelClosed DialFailureReason = "tunnelclosed"
)
type ClientConnectionStatus string
const (
// The connection is created but has not yet been dialed.
ClientConnectionStatusCreated ClientConnectionStatus = "created"
// The connection is pending dial response.
ClientConnectionStatusDialing ClientConnectionStatus = "dialing"
// The connection is established.
ClientConnectionStatusOk ClientConnectionStatus = "ok"
// The connection is closing.
ClientConnectionStatusClosing ClientConnectionStatus = "closing"
)
func newMetrics() *ClientMetrics {
// The denominator (total dials started) for both
// dial_failure_total and dial_duration_seconds is the
// stream_packets_total (common metric), where segment is
// "from_client" and packet_type is "DIAL_REQ".
dialFailures := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: Namespace,
Subsystem: Subsystem,
Name: "dial_failure_total",
Help: "Number of dial failures observed, by reason (example: remote endpoint error)",
},
[]string{
"reason",
},
)
clientConns := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: Namespace,
Subsystem: Subsystem,
Name: "client_connections",
Help: "Number of open client connections, by status (Example: dialing)",
},
[]string{
"status",
},
)
return &ClientMetrics{
streamPackets: commonmetrics.MakeStreamPacketsTotalMetric(Namespace, Subsystem),
streamErrors: commonmetrics.MakeStreamErrorsTotalMetric(Namespace, Subsystem),
dialFailures: dialFailures,
clientConns: clientConns,
}
}
// RegisterMetrics registers all metrics with the client application.
func (c *ClientMetrics) RegisterMetrics(r prometheus.Registerer) {
c.registerOnce.Do(func() {
r.MustRegister(c.streamPackets)
r.MustRegister(c.streamErrors)
r.MustRegister(c.dialFailures)
r.MustRegister(c.clientConns)
})
}
// LegacyRegisterMetrics registers all metrics via MustRegister func.
// TODO: remove this once https://github.com/kubernetes/kubernetes/pull/114293 is available.
func (c *ClientMetrics) LegacyRegisterMetrics(mustRegisterFn func(...prometheus.Collector)) {
c.registerOnce.Do(func() {
mustRegisterFn(c.streamPackets)
mustRegisterFn(c.streamErrors)
mustRegisterFn(c.dialFailures)
mustRegisterFn(c.clientConns)
})
}
// Reset resets the metrics.
func (c *ClientMetrics) Reset() {
c.streamPackets.Reset()
c.streamErrors.Reset()
c.dialFailures.Reset()
c.clientConns.Reset()
}
func (c *ClientMetrics) ObserveDialFailure(reason DialFailureReason) {
c.dialFailures.WithLabelValues(string(reason)).Inc()
}
func (c *ClientMetrics) GetClientConnectionsMetric() *prometheus.GaugeVec {
return c.clientConns
}
func (c *ClientMetrics) ObservePacket(segment commonmetrics.Segment, packetType client.PacketType) {
commonmetrics.ObservePacket(c.streamPackets, segment, packetType)
}
func (c *ClientMetrics) ObserveStreamErrorNoPacket(segment commonmetrics.Segment, err error) {
commonmetrics.ObserveStreamErrorNoPacket(c.streamErrors, segment, err)
}
func (c *ClientMetrics) ObserveStreamError(segment commonmetrics.Segment, err error, packetType client.PacketType) {
commonmetrics.ObserveStreamError(c.streamErrors, segment, err, packetType)
}

View File

@ -0,0 +1,78 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package metrics provides metric definitions and helpers used
// across konnectivity client, server, and agent.
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc/status"
"sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client"
)
// Segment identifies one of four tunnel segments (e.g. from server to agent).
type Segment string
const (
// SegmentFromClient indicates a packet from client to server.
SegmentFromClient Segment = "from_client"
// SegmentToClient indicates a packet from server to client.
SegmentToClient Segment = "to_client"
// SegmentFromAgent indicates a packet from agent to server.
SegmentFromAgent Segment = "from_agent"
// SegmentToAgent indicates a packet from server to agent.
SegmentToAgent Segment = "to_agent"
)
func MakeStreamPacketsTotalMetric(namespace, subsystem string) *prometheus.CounterVec {
return prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "stream_packets_total",
Help: "Count of packets processed, by segment and packet type (example: from_client, DIAL_REQ)",
},
[]string{"segment", "packet_type"},
)
}
func MakeStreamErrorsTotalMetric(namespace, subsystem string) *prometheus.CounterVec {
return prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "stream_errors_total",
Help: "Count of gRPC stream errors, by segment, grpc Code, packet type. (example: from_agent, Code.Unavailable, DIAL_RSP)",
},
[]string{"segment", "code", "packet_type"},
)
}
func ObservePacket(m *prometheus.CounterVec, segment Segment, packetType client.PacketType) {
m.WithLabelValues(string(segment), packetType.String()).Inc()
}
func ObserveStreamErrorNoPacket(m *prometheus.CounterVec, segment Segment, err error) {
code := status.Code(err)
m.WithLabelValues(string(segment), code.String(), "Unknown").Inc()
}
func ObserveStreamError(m *prometheus.CounterVec, segment Segment, err error, packetType client.PacketType) {
code := status.Code(err)
m.WithLabelValues(string(segment), code.String(), packetType.String()).Inc()
}

View File

@ -19,14 +19,18 @@ package cache
import (
"context"
"fmt"
"reflect"
"time"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
toolscache "k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/cache/internal"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
@ -74,11 +78,19 @@ type Informer interface {
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
// period. Events to a single handler are delivered sequentially, but there is no coordination
// between different handlers.
AddEventHandler(handler toolscache.ResourceEventHandler)
// It returns a registration handle for the handler that can be used to remove
// the handler again.
AddEventHandler(handler toolscache.ResourceEventHandler) (toolscache.ResourceEventHandlerRegistration, error)
// AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the
// specified resync period. Events to a single handler are delivered sequentially, but there is
// no coordination between different handlers.
AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration)
// It returns a registration handle for the handler that can be used to remove
// the handler again and an error if the handler cannot be added.
AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) (toolscache.ResourceEventHandlerRegistration, error)
// RemoveEventHandler removes a formerly added event handler given by
// its registration handle.
// This function is guaranteed to be idempotent, and thread-safe.
RemoveEventHandler(handle toolscache.ResourceEventHandlerRegistration) error
// AddIndexers adds more indexers to this store. If you call this after you already have data
// in the store, the results are undefined.
AddIndexers(indexers toolscache.Indexers) error
@ -128,6 +140,18 @@ type Options struct {
// Be very careful with this, when enabled you must DeepCopy any object before mutating it,
// otherwise you will mutate the object in the cache.
UnsafeDisableDeepCopyByObject DisableDeepCopyByObject
// TransformByObject is a map from GVKs to transformer functions which
// get applied when objects of the transformation are about to be committed
// to cache.
//
// This function is called both for new objects to enter the cache,
// and for updated objects.
TransformByObject TransformByObject
// DefaultTransform is the transform used for all GVKs which do
// not have an explicit transform func set in TransformByObject
DefaultTransform toolscache.TransformFunc
}
var defaultResyncTime = 10 * time.Hour
@ -138,7 +162,7 @@ func New(config *rest.Config, opts Options) (Cache, error) {
if err != nil {
return nil, err
}
selectorsByGVK, err := convertToSelectorsByGVK(opts.SelectorsByObject, opts.DefaultSelector, opts.Scheme)
selectorsByGVK, err := convertToByGVK(opts.SelectorsByObject, opts.DefaultSelector, opts.Scheme)
if err != nil {
return nil, err
}
@ -146,36 +170,234 @@ func New(config *rest.Config, opts Options) (Cache, error) {
if err != nil {
return nil, err
}
im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace, selectorsByGVK, disableDeepCopyByGVK)
transformByGVK, err := convertToByGVK(opts.TransformByObject, opts.DefaultTransform, opts.Scheme)
if err != nil {
return nil, err
}
transformByObj := internal.TransformFuncByObjectFromMap(transformByGVK)
internalSelectorsByGVK := internal.SelectorsByGVK{}
for gvk, selector := range selectorsByGVK {
internalSelectorsByGVK[gvk] = internal.Selector(selector)
}
im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace, internalSelectorsByGVK, disableDeepCopyByGVK, transformByObj)
return &informerCache{InformersMap: im}, nil
}
// BuilderWithOptions returns a Cache constructor that will build the a cache
// BuilderWithOptions returns a Cache constructor that will build a cache
// honoring the options argument, this is useful to specify options like
// SelectorsByObject
// WARNING: if SelectorsByObject is specified. filtered out resources are not
// returned.
// WARNING: if UnsafeDisableDeepCopy is enabled, you must DeepCopy any object
// returned from cache get/list before mutating it.
// WARNING: If SelectorsByObject is specified, filtered out resources are not
// returned.
// WARNING: If UnsafeDisableDeepCopy is enabled, you must DeepCopy any object
// returned from cache get/list before mutating it.
func BuilderWithOptions(options Options) NewCacheFunc {
return func(config *rest.Config, opts Options) (Cache, error) {
if options.Scheme == nil {
options.Scheme = opts.Scheme
return func(config *rest.Config, inherited Options) (Cache, error) {
var err error
inherited, err = defaultOpts(config, inherited)
if err != nil {
return nil, err
}
if options.Mapper == nil {
options.Mapper = opts.Mapper
options, err = defaultOpts(config, options)
if err != nil {
return nil, err
}
if options.Resync == nil {
options.Resync = opts.Resync
}
if options.Namespace == "" {
options.Namespace = opts.Namespace
}
if opts.Resync == nil {
opts.Resync = options.Resync
combined, err := options.inheritFrom(inherited)
if err != nil {
return nil, err
}
return New(config, *combined)
}
}
return New(config, options)
func (options Options) inheritFrom(inherited Options) (*Options, error) {
var (
combined Options
err error
)
combined.Scheme = combineScheme(inherited.Scheme, options.Scheme)
combined.Mapper = selectMapper(inherited.Mapper, options.Mapper)
combined.Resync = selectResync(inherited.Resync, options.Resync)
combined.Namespace = selectNamespace(inherited.Namespace, options.Namespace)
combined.SelectorsByObject, combined.DefaultSelector, err = combineSelectors(inherited, options, combined.Scheme)
if err != nil {
return nil, err
}
combined.UnsafeDisableDeepCopyByObject, err = combineUnsafeDeepCopy(inherited, options, combined.Scheme)
if err != nil {
return nil, err
}
combined.TransformByObject, combined.DefaultTransform, err = combineTransforms(inherited, options, combined.Scheme)
if err != nil {
return nil, err
}
return &combined, nil
}
func combineScheme(schemes ...*runtime.Scheme) *runtime.Scheme {
var out *runtime.Scheme
for _, sch := range schemes {
if sch == nil {
continue
}
for gvk, t := range sch.AllKnownTypes() {
if out == nil {
out = runtime.NewScheme()
}
out.AddKnownTypeWithName(gvk, reflect.New(t).Interface().(runtime.Object))
}
}
return out
}
func selectMapper(def, override meta.RESTMapper) meta.RESTMapper {
if override != nil {
return override
}
return def
}
func selectResync(def, override *time.Duration) *time.Duration {
if override != nil {
return override
}
return def
}
func selectNamespace(def, override string) string {
if override != "" {
return override
}
return def
}
func combineSelectors(inherited, options Options, scheme *runtime.Scheme) (SelectorsByObject, ObjectSelector, error) {
// Selectors are combined via logical AND.
// - Combined label selector is a union of the selectors requirements from both sets of options.
// - Combined field selector uses fields.AndSelectors with the combined list of non-nil field selectors
// defined in both sets of options.
//
// There is a bunch of complexity here because we need to convert to SelectorsByGVK
// to be able to match keys between options and inherited and then convert back to SelectorsByObject
optionsSelectorsByGVK, err := convertToByGVK(options.SelectorsByObject, options.DefaultSelector, scheme)
if err != nil {
return nil, ObjectSelector{}, err
}
inheritedSelectorsByGVK, err := convertToByGVK(inherited.SelectorsByObject, inherited.DefaultSelector, inherited.Scheme)
if err != nil {
return nil, ObjectSelector{}, err
}
for gvk, inheritedSelector := range inheritedSelectorsByGVK {
optionsSelectorsByGVK[gvk] = combineSelector(inheritedSelector, optionsSelectorsByGVK[gvk])
}
return convertToByObject(optionsSelectorsByGVK, scheme)
}
func combineSelector(selectors ...ObjectSelector) ObjectSelector {
ls := make([]labels.Selector, 0, len(selectors))
fs := make([]fields.Selector, 0, len(selectors))
for _, s := range selectors {
ls = append(ls, s.Label)
fs = append(fs, s.Field)
}
return ObjectSelector{
Label: combineLabelSelectors(ls...),
Field: combineFieldSelectors(fs...),
}
}
func combineLabelSelectors(ls ...labels.Selector) labels.Selector {
var combined labels.Selector
for _, l := range ls {
if l == nil {
continue
}
if combined == nil {
combined = labels.NewSelector()
}
reqs, _ := l.Requirements()
combined = combined.Add(reqs...)
}
return combined
}
func combineFieldSelectors(fs ...fields.Selector) fields.Selector {
nonNil := fs[:0]
for _, f := range fs {
if f == nil {
continue
}
nonNil = append(nonNil, f)
}
if len(nonNil) == 0 {
return nil
}
if len(nonNil) == 1 {
return nonNil[0]
}
return fields.AndSelectors(nonNil...)
}
func combineUnsafeDeepCopy(inherited, options Options, scheme *runtime.Scheme) (DisableDeepCopyByObject, error) {
// UnsafeDisableDeepCopyByObject is combined via precedence. Only if a value for a particular GVK is unset
// in options will a value from inherited be used.
optionsDisableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(options.UnsafeDisableDeepCopyByObject, options.Scheme)
if err != nil {
return nil, err
}
inheritedDisableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(inherited.UnsafeDisableDeepCopyByObject, inherited.Scheme)
if err != nil {
return nil, err
}
for gvk, inheritedDeepCopy := range inheritedDisableDeepCopyByGVK {
if _, ok := optionsDisableDeepCopyByGVK[gvk]; !ok {
if optionsDisableDeepCopyByGVK == nil {
optionsDisableDeepCopyByGVK = map[schema.GroupVersionKind]bool{}
}
optionsDisableDeepCopyByGVK[gvk] = inheritedDeepCopy
}
}
return convertToDisableDeepCopyByObject(optionsDisableDeepCopyByGVK, scheme)
}
func combineTransforms(inherited, options Options, scheme *runtime.Scheme) (TransformByObject, toolscache.TransformFunc, error) {
// Transform functions are combined via chaining. If both inherited and options define a transform
// function, the transform function from inherited will be called first, and the transform function from
// options will be called second.
optionsTransformByGVK, err := convertToByGVK(options.TransformByObject, options.DefaultTransform, options.Scheme)
if err != nil {
return nil, nil, err
}
inheritedTransformByGVK, err := convertToByGVK(inherited.TransformByObject, inherited.DefaultTransform, inherited.Scheme)
if err != nil {
return nil, nil, err
}
for gvk, inheritedTransform := range inheritedTransformByGVK {
if optionsTransformByGVK == nil {
optionsTransformByGVK = map[schema.GroupVersionKind]toolscache.TransformFunc{}
}
optionsTransformByGVK[gvk] = combineTransform(inheritedTransform, optionsTransformByGVK[gvk])
}
return convertToByObject(optionsTransformByGVK, scheme)
}
func combineTransform(inherited, current toolscache.TransformFunc) toolscache.TransformFunc {
if inherited == nil {
return current
}
if current == nil {
return inherited
}
return func(in interface{}) (interface{}, error) {
mid, err := inherited(in)
if err != nil {
return nil, err
}
return current(mid)
}
}
@ -202,17 +424,40 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) {
return opts, nil
}
func convertToSelectorsByGVK(selectorsByObject SelectorsByObject, defaultSelector ObjectSelector, scheme *runtime.Scheme) (internal.SelectorsByGVK, error) {
selectorsByGVK := internal.SelectorsByGVK{}
for object, selector := range selectorsByObject {
func convertToByGVK[T any](byObject map[client.Object]T, def T, scheme *runtime.Scheme) (map[schema.GroupVersionKind]T, error) {
byGVK := map[schema.GroupVersionKind]T{}
for object, value := range byObject {
gvk, err := apiutil.GVKForObject(object, scheme)
if err != nil {
return nil, err
}
selectorsByGVK[gvk] = internal.Selector(selector)
byGVK[gvk] = value
}
selectorsByGVK[schema.GroupVersionKind{}] = internal.Selector(defaultSelector)
return selectorsByGVK, nil
byGVK[schema.GroupVersionKind{}] = def
return byGVK, nil
}
func convertToByObject[T any](byGVK map[schema.GroupVersionKind]T, scheme *runtime.Scheme) (map[client.Object]T, T, error) {
var byObject map[client.Object]T
def := byGVK[schema.GroupVersionKind{}]
for gvk, value := range byGVK {
if gvk == (schema.GroupVersionKind{}) {
continue
}
obj, err := scheme.New(gvk)
if err != nil {
return nil, def, err
}
cObj, ok := obj.(client.Object)
if !ok {
return nil, def, fmt.Errorf("object %T for GVK %q does not implement client.Object", obj, gvk)
}
if byObject == nil {
byObject = map[client.Object]T{}
}
byObject[cObj] = value
}
return byObject, def, nil
}
// DisableDeepCopyByObject associate a client.Object's GVK to disable DeepCopy during get or list from cache.
@ -241,3 +486,31 @@ func convertToDisableDeepCopyByGVK(disableDeepCopyByObject DisableDeepCopyByObje
}
return disableDeepCopyByGVK, nil
}
func convertToDisableDeepCopyByObject(byGVK internal.DisableDeepCopyByGVK, scheme *runtime.Scheme) (DisableDeepCopyByObject, error) {
var byObject DisableDeepCopyByObject
for gvk, value := range byGVK {
if byObject == nil {
byObject = DisableDeepCopyByObject{}
}
if gvk == (schema.GroupVersionKind{}) {
byObject[ObjectAll{}] = value
continue
}
obj, err := scheme.New(gvk)
if err != nil {
return nil, err
}
cObj, ok := obj.(client.Object)
if !ok {
return nil, fmt.Errorf("object %T for GVK %q does not implement client.Object", obj, gvk)
}
byObject[cObj] = value
}
return byObject, nil
}
// TransformByObject associate a client.Object's GVK to a transformer function
// to be applied when storing the object into the cache.
type TransformByObject map[client.Object]toolscache.TransformFunc

View File

@ -51,7 +51,7 @@ type informerCache struct {
}
// Get implements Reader.
func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object) error {
func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error {
gvk, err := apiutil.GVKForObject(out, ip.Scheme)
if err != nil {
return err
@ -96,11 +96,11 @@ func (ip *informerCache) objectTypeForListObject(list client.ObjectList) (*schem
return nil, nil, err
}
if !strings.HasSuffix(gvk.Kind, "List") {
return nil, nil, fmt.Errorf("non-list type %T (kind %q) passed as output", list, gvk)
}
// we need the non-list GVK, so chop off the "List" from the end of the kind
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
if strings.HasSuffix(gvk.Kind, "List") && apimeta.IsListType(list) {
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
}
_, isUnstructured := list.(*unstructured.UnstructuredList)
var cacheTypeObj runtime.Object
if isUnstructured {
@ -193,8 +193,8 @@ func indexByField(indexer Informer, field string, extractor client.IndexerFunc)
rawVals := extractor(obj)
var vals []string
if ns == "" {
// if we're not doubling the keys for the namespaced case, just re-use what was returned to us
vals = rawVals
// if we're not doubling the keys for the namespaced case, just create a new slice with same length
vals = make([]string, len(rawVals))
} else {
// if we need to add non-namespaced versions too, double the length
vals = make([]string, len(rawVals)*2)

View File

@ -23,12 +23,11 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/internal/field/selector"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@ -54,7 +53,7 @@ type CacheReader struct {
}
// Get checks the indexer for the object and writes a copy of it if found.
func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object) error {
func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error {
if c.scopeName == apimeta.RESTScopeNameRoot {
key.Namespace = ""
}
@ -116,7 +115,7 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli
case listOpts.FieldSelector != nil:
// TODO(directxman12): support more complicated field selectors by
// combining multiple indices, GetIndexers, etc
field, val, requiresExact := requiresExactMatch(listOpts.FieldSelector)
field, val, requiresExact := selector.RequiresExactMatch(listOpts.FieldSelector)
if !requiresExact {
return fmt.Errorf("non-exact field matches are not supported by the cache")
}
@ -162,7 +161,7 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli
}
var outObj runtime.Object
if c.disableDeepCopy {
if c.disableDeepCopy || (listOpts.UnsafeDisableDeepCopy != nil && *listOpts.UnsafeDisableDeepCopy) {
// skip deep copy which might be unsafe
// you must DeepCopy any object before mutating it outside
outObj = obj
@ -186,19 +185,6 @@ func objectKeyToStoreKey(k client.ObjectKey) string {
return k.Namespace + "/" + k.Name
}
// requiresExactMatch checks if the given field selector is of the form `k=v` or `k==v`.
func requiresExactMatch(sel fields.Selector) (field, val string, required bool) {
reqs := sel.Requirements()
if len(reqs) != 1 {
return "", "", false
}
req := reqs[0]
if req.Operator != selection.Equals && req.Operator != selection.DoubleEquals {
return "", "", false
}
return req.Field, req.Value, true
}
// FieldIndexName constructs the name of the index over the given field,
// for use with an indexer.
func FieldIndexName(field string) string {

View File

@ -52,11 +52,12 @@ func NewInformersMap(config *rest.Config,
namespace string,
selectors SelectorsByGVK,
disableDeepCopy DisableDeepCopyByGVK,
transformers TransformFuncByObject,
) *InformersMap {
return &InformersMap{
structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy),
unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy),
metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy),
structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers),
unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers),
metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers),
Scheme: scheme,
}
@ -108,18 +109,18 @@ func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj
// newStructuredInformersMap creates a new InformersMap for structured objects.
func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration,
namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, createStructuredListWatch)
namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createStructuredListWatch)
}
// newUnstructuredInformersMap creates a new InformersMap for unstructured objects.
func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration,
namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, createUnstructuredListWatch)
namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createUnstructuredListWatch)
}
// newMetadataInformersMap creates a new InformersMap for metadata-only objects.
func newMetadataInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration,
namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, createMetadataListWatch)
namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap {
return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createMetadataListWatch)
}

View File

@ -54,7 +54,9 @@ func newSpecificInformersMap(config *rest.Config,
namespace string,
selectors SelectorsByGVK,
disableDeepCopy DisableDeepCopyByGVK,
createListWatcher createListWatcherFunc) *specificInformersMap {
transformers TransformFuncByObject,
createListWatcher createListWatcherFunc,
) *specificInformersMap {
ip := &specificInformersMap{
config: config,
Scheme: scheme,
@ -68,6 +70,7 @@ func newSpecificInformersMap(config *rest.Config,
namespace: namespace,
selectors: selectors.forGVK,
disableDeepCopy: disableDeepCopy,
transformers: transformers,
}
return ip
}
@ -135,6 +138,9 @@ type specificInformersMap struct {
// disableDeepCopy indicates not to deep copy objects during get or list objects.
disableDeepCopy DisableDeepCopyByGVK
// transform funcs are applied to objects before they are committed to the cache
transformers TransformFuncByObject
}
// Start calls Run on each of the informers and sets started to true. Blocks on the context.
@ -227,6 +233,12 @@ func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, ob
ni := cache.NewSharedIndexInformer(lw, obj, resyncPeriod(ip.resync)(), cache.Indexers{
cache.NamespaceIndex: cache.MetaNamespaceIndexFunc,
})
// Check to see if there is a transformer for this gvk
if err := ni.SetTransform(ip.transformers.Get(gvk)); err != nil {
return nil, false, err
}
rm, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return nil, false, err
@ -409,41 +421,31 @@ func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersM
}, nil
}
type gvkFixupWatcher struct {
watcher watch.Interface
ch chan watch.Event
gvk schema.GroupVersionKind
wg sync.WaitGroup
}
// newGVKFixupWatcher adds a wrapper that preserves the GVK information when
// events come in.
//
// This works around a bug where GVK information is not passed into mapping
// functions when using the OnlyMetadata option in the builder.
// This issue is most likely caused by kubernetes/kubernetes#80609.
// See kubernetes-sigs/controller-runtime#1484.
//
// This was originally implemented as a cache.ResourceEventHandler wrapper but
// that contained a data race which was resolved by setting the GVK in a watch
// wrapper, before the objects are written to the cache.
// See kubernetes-sigs/controller-runtime#1650.
//
// The original watch wrapper was found to be incompatible with
// k8s.io/client-go/tools/cache.Reflector so it has been re-implemented as a
// watch.Filter which is compatible.
// See kubernetes-sigs/controller-runtime#1789.
func newGVKFixupWatcher(gvk schema.GroupVersionKind, watcher watch.Interface) watch.Interface {
ch := make(chan watch.Event)
w := &gvkFixupWatcher{
gvk: gvk,
watcher: watcher,
ch: ch,
}
w.wg.Add(1)
go w.run()
return w
}
func (w *gvkFixupWatcher) run() {
for e := range w.watcher.ResultChan() {
e.Object.GetObjectKind().SetGroupVersionKind(w.gvk)
w.ch <- e
}
w.wg.Done()
}
func (w *gvkFixupWatcher) Stop() {
w.watcher.Stop()
w.wg.Wait()
close(w.ch)
}
func (w *gvkFixupWatcher) ResultChan() <-chan watch.Event {
return w.ch
return watch.Filter(
watcher,
func(in watch.Event) (watch.Event, bool) {
in.Object.GetObjectKind().SetGroupVersionKind(gvk)
return in, true
},
)
}
// resyncPeriod returns a function which generates a duration each time it is

View File

@ -0,0 +1,55 @@
package internal
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/tools/cache"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
)
// TransformFuncByObject provides access to the correct transform function for
// any given GVK.
type TransformFuncByObject interface {
Set(runtime.Object, *runtime.Scheme, cache.TransformFunc) error
Get(schema.GroupVersionKind) cache.TransformFunc
SetDefault(transformer cache.TransformFunc)
}
type transformFuncByGVK struct {
defaultTransform cache.TransformFunc
transformers map[schema.GroupVersionKind]cache.TransformFunc
}
// TransformFuncByObjectFromMap creates a TransformFuncByObject from a map that
// maps GVKs to TransformFuncs.
func TransformFuncByObjectFromMap(in map[schema.GroupVersionKind]cache.TransformFunc) TransformFuncByObject {
byGVK := &transformFuncByGVK{}
if defaultFunc, hasDefault := in[schema.GroupVersionKind{}]; hasDefault {
byGVK.defaultTransform = defaultFunc
}
delete(in, schema.GroupVersionKind{})
byGVK.transformers = in
return byGVK
}
func (t *transformFuncByGVK) SetDefault(transformer cache.TransformFunc) {
t.defaultTransform = transformer
}
func (t *transformFuncByGVK) Set(obj runtime.Object, scheme *runtime.Scheme, transformer cache.TransformFunc) error {
gvk, err := apiutil.GVKForObject(obj, scheme)
if err != nil {
return err
}
t.transformers[gvk] = transformer
return nil
}
func (t transformFuncByGVK) Get(gvk schema.GroupVersionKind) cache.TransformFunc {
if val, ok := t.transformers[gvk]; ok {
return val
}
return t.defaultTransform
}

View File

@ -55,7 +55,7 @@ func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc {
// create a cache for cluster scoped resources
gCache, err := New(config, opts)
if err != nil {
return nil, fmt.Errorf("error creating global cache %v", err)
return nil, fmt.Errorf("error creating global cache: %w", err)
}
for _, ns := range namespaces {
@ -200,7 +200,7 @@ func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object,
return nil
}
func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error {
func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error {
isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper)
if err != nil {
return err
@ -296,17 +296,47 @@ type multiNamespaceInformer struct {
var _ Informer = &multiNamespaceInformer{}
// AddEventHandler adds the handler to each namespaced informer.
func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) {
for _, informer := range i.namespaceToInformer {
informer.AddEventHandler(handler)
func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) (toolscache.ResourceEventHandlerRegistration, error) {
handles := make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer))
for ns, informer := range i.namespaceToInformer {
registration, err := informer.AddEventHandler(handler)
if err != nil {
return nil, err
}
handles[ns] = registration
}
return handles, nil
}
// AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer.
func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) {
for _, informer := range i.namespaceToInformer {
informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod)
func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) (toolscache.ResourceEventHandlerRegistration, error) {
handles := make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer))
for ns, informer := range i.namespaceToInformer {
registration, err := informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod)
if err != nil {
return nil, err
}
handles[ns] = registration
}
return handles, nil
}
// RemoveEventHandler removes a formerly added event handler given by its registration handle.
func (i *multiNamespaceInformer) RemoveEventHandler(h toolscache.ResourceEventHandlerRegistration) error {
handles, ok := h.(map[string]toolscache.ResourceEventHandlerRegistration)
if !ok {
return fmt.Errorf("it is not the registration returned by multiNamespaceInformer")
}
for ns, informer := range i.namespaceToInformer {
registration, ok := handles[ns]
if !ok {
continue
}
if err := informer.RemoveEventHandler(registration); err != nil {
return err
}
}
return nil
}
// AddIndexers adds the indexer for each namespaced informer.

View File

@ -22,6 +22,7 @@ import (
"sync"
"github.com/fsnotify/fsnotify"
"sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics"
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
)
@ -116,8 +117,10 @@ func (cw *CertWatcher) Watch() {
// and updates the current certificate on the watcher. If a callback is set, it
// is invoked with the new certificate.
func (cw *CertWatcher) ReadCertificate() error {
metrics.ReadCertificateTotal.Inc()
cert, err := tls.LoadX509KeyPair(cw.certPath, cw.keyPath)
if err != nil {
metrics.ReadCertificateErrors.Inc()
return err
}

View File

@ -0,0 +1,45 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"sigs.k8s.io/controller-runtime/pkg/metrics"
)
var (
// ReadCertificateTotal is a prometheus counter metrics which holds the total
// number of certificate reads.
ReadCertificateTotal = prometheus.NewCounter(prometheus.CounterOpts{
Name: "certwatcher_read_certificate_total",
Help: "Total number of certificate reads",
})
// ReadCertificateErrors is a prometheus counter metrics which holds the total
// number of errors from certificate read.
ReadCertificateErrors = prometheus.NewCounter(prometheus.CounterOpts{
Name: "certwatcher_read_certificate_errors_total",
Help: "Total number of certificate read errors",
})
)
func init() {
metrics.Registry.MustRegister(
ReadCertificateTotal,
ReadCertificateErrors,
)
}

View File

@ -81,7 +81,7 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi
// (unstructured, partial, etc)
// check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds
_, isPartial := obj.(*metav1.PartialObjectMetadata) //nolint:ifshort
_, isPartial := obj.(*metav1.PartialObjectMetadata)
_, isPartialList := obj.(*metav1.PartialObjectMetadataList)
if isPartial || isPartialList {
// we require that the GVK be populated in order to recognize the object

View File

@ -17,8 +17,8 @@ limitations under the License.
package apiutil
import (
"errors"
"sync"
"sync/atomic"
"golang.org/x/time/rate"
"k8s.io/apimachinery/pkg/api/meta"
@ -38,7 +38,8 @@ type dynamicRESTMapper struct {
lazy bool
// Used for lazy init.
initOnce sync.Once
inited uint32
initMtx sync.Mutex
}
// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper.
@ -125,18 +126,25 @@ func (drm *dynamicRESTMapper) setStaticMapper() error {
// init initializes drm only once if drm is lazy.
func (drm *dynamicRESTMapper) init() (err error) {
drm.initOnce.Do(func() {
if drm.lazy {
err = drm.setStaticMapper()
// skip init if drm is not lazy or has initialized
if !drm.lazy || atomic.LoadUint32(&drm.inited) != 0 {
return nil
}
drm.initMtx.Lock()
defer drm.initMtx.Unlock()
if drm.inited == 0 {
if err = drm.setStaticMapper(); err == nil {
atomic.StoreUint32(&drm.inited, 1)
}
})
}
return err
}
// checkAndReload attempts to call the given callback, which is assumed to be dependent
// on the data in the restmapper.
//
// If the callback returns an error that matches the given error, it will attempt to reload
// If the callback returns an error matching meta.IsNoMatchErr, it will attempt to reload
// the RESTMapper's data and re-call the callback once that's occurred.
// If the callback returns any other error, the function will return immediately regardless.
//
@ -145,7 +153,7 @@ func (drm *dynamicRESTMapper) init() (err error) {
// the callback.
// It's thread-safe, and worries about thread-safety for the callback (so the callback does
// not need to attempt to lock the restmapper).
func (drm *dynamicRESTMapper) checkAndReload(needsReloadErr error, checkNeedsReload func() error) error {
func (drm *dynamicRESTMapper) checkAndReload(checkNeedsReload func() error) error {
// first, check the common path -- data is fresh enough
// (use an IIFE for the lock's defer)
err := func() error {
@ -155,10 +163,7 @@ func (drm *dynamicRESTMapper) checkAndReload(needsReloadErr error, checkNeedsRel
return checkNeedsReload()
}()
// NB(directxman12): `Is` and `As` have a confusing relationship --
// `Is` is like `== or does this implement .Is`, whereas `As` says
// `can I type-assert into`
needsReload := errors.As(err, &needsReloadErr)
needsReload := meta.IsNoMatchError(err)
if !needsReload {
return err
}
@ -169,7 +174,7 @@ func (drm *dynamicRESTMapper) checkAndReload(needsReloadErr error, checkNeedsRel
// ... and double-check that we didn't reload in the meantime
err = checkNeedsReload()
needsReload = errors.As(err, &needsReloadErr)
needsReload = meta.IsNoMatchError(err)
if !needsReload {
return err
}
@ -197,7 +202,7 @@ func (drm *dynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (sch
return schema.GroupVersionKind{}, err
}
var gvk schema.GroupVersionKind
err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error {
err := drm.checkAndReload(func() error {
var err error
gvk, err = drm.staticMapper.KindFor(resource)
return err
@ -210,7 +215,7 @@ func (drm *dynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]
return nil, err
}
var gvks []schema.GroupVersionKind
err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error {
err := drm.checkAndReload(func() error {
var err error
gvks, err = drm.staticMapper.KindsFor(resource)
return err
@ -224,7 +229,7 @@ func (drm *dynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (sc
}
var gvr schema.GroupVersionResource
err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error {
err := drm.checkAndReload(func() error {
var err error
gvr, err = drm.staticMapper.ResourceFor(input)
return err
@ -237,7 +242,7 @@ func (drm *dynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([
return nil, err
}
var gvrs []schema.GroupVersionResource
err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error {
err := drm.checkAndReload(func() error {
var err error
gvrs, err = drm.staticMapper.ResourcesFor(input)
return err
@ -250,7 +255,7 @@ func (drm *dynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...strin
return nil, err
}
var mapping *meta.RESTMapping
err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error {
err := drm.checkAndReload(func() error {
var err error
mapping, err = drm.staticMapper.RESTMapping(gk, versions...)
return err
@ -263,7 +268,7 @@ func (drm *dynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...stri
return nil, err
}
var mappings []*meta.RESTMapping
err := drm.checkAndReload(&meta.NoKindMatchError{}, func() error {
err := drm.checkAndReload(func() error {
var err error
mappings, err = drm.staticMapper.RESTMappings(gk, versions...)
return err
@ -276,7 +281,7 @@ func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, err
return "", err
}
var singular string
err := drm.checkAndReload(&meta.NoResourceMatchError{}, func() error {
err := drm.checkAndReload(func() error {
var err error
singular, err = drm.staticMapper.ResourceSingularizer(resource)
return err

View File

@ -18,6 +18,7 @@ package client
import (
"context"
"errors"
"fmt"
"strings"
@ -45,7 +46,7 @@ type WarningHandlerOptions struct {
// AllowDuplicateLogs does not deduplicate the to-be
// logged surfaced warnings messages. See
// log.WarningHandlerOptions for considerations
// regarding deuplication
// regarding deduplication
AllowDuplicateLogs bool
}
@ -88,13 +89,12 @@ func newClient(config *rest.Config, options Options) (*client, error) {
// is log.KubeAPIWarningLogger with deduplication enabled.
// See log.KubeAPIWarningLoggerOptions for considerations
// regarding deduplication.
rest.SetDefaultWarningHandler(
log.NewKubeAPIWarningLogger(
logger,
log.KubeAPIWarningLoggerOptions{
Deduplicate: !options.Opts.AllowDuplicateLogs,
},
),
config = rest.CopyConfig(config)
config.WarningHandler = log.NewKubeAPIWarningLogger(
logger,
log.KubeAPIWarningLoggerOptions{
Deduplicate: !options.Opts.AllowDuplicateLogs,
},
)
}
@ -241,16 +241,16 @@ func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...Pat
}
// Get implements client.Client.
func (c *client) Get(ctx context.Context, key ObjectKey, obj Object) error {
func (c *client) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
switch obj.(type) {
case *unstructured.Unstructured:
return c.unstructuredClient.Get(ctx, key, obj)
return c.unstructuredClient.Get(ctx, key, obj, opts...)
case *metav1.PartialObjectMetadata:
// Metadata only object should always preserve the GVK coming in from the caller.
defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
return c.metadataClient.Get(ctx, key, obj)
return c.metadataClient.Get(ctx, key, obj, opts...)
default:
return c.typedClient.Get(ctx, key, obj)
return c.typedClient.Get(ctx, key, obj, opts...)
}
}
@ -289,40 +289,194 @@ func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) e
}
// Status implements client.StatusClient.
func (c *client) Status() StatusWriter {
return &statusWriter{client: c}
func (c *client) Status() SubResourceWriter {
return c.SubResource("status")
}
// statusWriter is client.StatusWriter that writes status subresource.
type statusWriter struct {
client *client
func (c *client) SubResource(subResource string) SubResourceClient {
return &subResourceClient{client: c, subResource: subResource}
}
// ensure statusWriter implements client.StatusWriter.
var _ StatusWriter = &statusWriter{}
// subResourceClient is client.SubResourceWriter that writes to subresources.
type subResourceClient struct {
client *client
subResource string
}
// Update implements client.StatusWriter.
func (sw *statusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
// ensure subResourceClient implements client.SubResourceClient.
var _ SubResourceClient = &subResourceClient{}
// SubResourceGetOptions holds all the possible configuration
// for a subresource Get request.
type SubResourceGetOptions struct {
Raw *metav1.GetOptions
}
// ApplyToSubResourceGet updates the configuaration to the given get options.
func (getOpt *SubResourceGetOptions) ApplyToSubResourceGet(o *SubResourceGetOptions) {
if getOpt.Raw != nil {
o.Raw = getOpt.Raw
}
}
// ApplyOptions applues the given options.
func (getOpt *SubResourceGetOptions) ApplyOptions(opts []SubResourceGetOption) *SubResourceGetOptions {
for _, o := range opts {
o.ApplyToSubResourceGet(getOpt)
}
return getOpt
}
// AsGetOptions returns the configured options as *metav1.GetOptions.
func (getOpt *SubResourceGetOptions) AsGetOptions() *metav1.GetOptions {
if getOpt.Raw == nil {
return &metav1.GetOptions{}
}
return getOpt.Raw
}
// SubResourceUpdateOptions holds all the possible configuration
// for a subresource update request.
type SubResourceUpdateOptions struct {
UpdateOptions
SubResourceBody Object
}
// ApplyToSubResourceUpdate updates the configuration on the given create options
func (uo *SubResourceUpdateOptions) ApplyToSubResourceUpdate(o *SubResourceUpdateOptions) {
uo.UpdateOptions.ApplyToUpdate(&o.UpdateOptions)
if uo.SubResourceBody != nil {
o.SubResourceBody = uo.SubResourceBody
}
}
// ApplyOptions applies the given options.
func (uo *SubResourceUpdateOptions) ApplyOptions(opts []SubResourceUpdateOption) *SubResourceUpdateOptions {
for _, o := range opts {
o.ApplyToSubResourceUpdate(uo)
}
return uo
}
// SubResourceUpdateAndPatchOption is an option that can be used for either
// a subresource update or patch request.
type SubResourceUpdateAndPatchOption interface {
SubResourceUpdateOption
SubResourcePatchOption
}
// WithSubResourceBody returns an option that uses the given body
// for a subresource Update or Patch operation.
func WithSubResourceBody(body Object) SubResourceUpdateAndPatchOption {
return &withSubresourceBody{body: body}
}
type withSubresourceBody struct {
body Object
}
func (wsr *withSubresourceBody) ApplyToSubResourceUpdate(o *SubResourceUpdateOptions) {
o.SubResourceBody = wsr.body
}
func (wsr *withSubresourceBody) ApplyToSubResourcePatch(o *SubResourcePatchOptions) {
o.SubResourceBody = wsr.body
}
// SubResourceCreateOptions are all the possible configurations for a subresource
// create request.
type SubResourceCreateOptions struct {
CreateOptions
}
// ApplyOptions applies the given options.
func (co *SubResourceCreateOptions) ApplyOptions(opts []SubResourceCreateOption) *SubResourceCreateOptions {
for _, o := range opts {
o.ApplyToSubResourceCreate(co)
}
return co
}
// ApplyToSubresourceCreate applies the the configuration on the given create options.
func (co *SubResourceCreateOptions) ApplyToSubresourceCreate(o *SubResourceCreateOptions) {
co.CreateOptions.ApplyToCreate(&co.CreateOptions)
}
// SubResourcePatchOptions holds all possible configurations for a subresource patch
// request.
type SubResourcePatchOptions struct {
PatchOptions
SubResourceBody Object
}
// ApplyOptions applies the given options.
func (po *SubResourcePatchOptions) ApplyOptions(opts []SubResourcePatchOption) *SubResourcePatchOptions {
for _, o := range opts {
o.ApplyToSubResourcePatch(po)
}
return po
}
// ApplyToSubResourcePatch applies the configuration on the given patch options.
func (po *SubResourcePatchOptions) ApplyToSubResourcePatch(o *SubResourcePatchOptions) {
po.PatchOptions.ApplyToPatch(&o.PatchOptions)
if po.SubResourceBody != nil {
o.SubResourceBody = po.SubResourceBody
}
}
func (sc *subResourceClient) Get(ctx context.Context, obj Object, subResource Object, opts ...SubResourceGetOption) error {
switch obj.(type) {
case *unstructured.Unstructured:
return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...)
return sc.client.unstructuredClient.GetSubResource(ctx, obj, subResource, sc.subResource, opts...)
case *metav1.PartialObjectMetadata:
return errors.New("can not get subresource using only metadata")
default:
return sc.client.typedClient.GetSubResource(ctx, obj, subResource, sc.subResource, opts...)
}
}
// Create implements client.SubResourceClient
func (sc *subResourceClient) Create(ctx context.Context, obj Object, subResource Object, opts ...SubResourceCreateOption) error {
defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
defer sc.client.resetGroupVersionKind(subResource, subResource.GetObjectKind().GroupVersionKind())
switch obj.(type) {
case *unstructured.Unstructured:
return sc.client.unstructuredClient.CreateSubResource(ctx, obj, subResource, sc.subResource, opts...)
case *metav1.PartialObjectMetadata:
return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?")
default:
return sw.client.typedClient.UpdateStatus(ctx, obj, opts...)
return sc.client.typedClient.CreateSubResource(ctx, obj, subResource, sc.subResource, opts...)
}
}
// Patch implements client.Client.
func (sw *statusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
// Update implements client.SubResourceClient
func (sc *subResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error {
defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
switch obj.(type) {
case *unstructured.Unstructured:
return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...)
return sc.client.unstructuredClient.UpdateSubResource(ctx, obj, sc.subResource, opts...)
case *metav1.PartialObjectMetadata:
return sw.client.metadataClient.PatchStatus(ctx, obj, patch, opts...)
return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?")
default:
return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...)
return sc.client.typedClient.UpdateSubResource(ctx, obj, sc.subResource, opts...)
}
}
// Patch implements client.SubResourceWriter.
func (sc *subResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error {
defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
switch obj.(type) {
case *unstructured.Unstructured:
return sc.client.unstructuredClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...)
case *metav1.PartialObjectMetadata:
return sc.client.metadataClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...)
default:
return sc.client.typedClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...)
}
}

View File

@ -29,15 +29,32 @@ import (
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
)
// KubeconfigFlagName is the name of the kubeconfig flag
const KubeconfigFlagName = "kubeconfig"
var (
kubeconfig string
log = logf.RuntimeLog.WithName("client").WithName("config")
)
// init registers the "kubeconfig" flag to the default command line FlagSet.
// TODO: This should be removed, as it potentially leads to redefined flag errors for users, if they already
// have registered the "kubeconfig" flag to the command line FlagSet in other parts of their code.
func init() {
// TODO: Fix this to allow double vendoring this library but still register flags on behalf of users
flag.StringVar(&kubeconfig, "kubeconfig", "",
"Paths to a kubeconfig. Only required if out-of-cluster.")
RegisterFlags(flag.CommandLine)
}
// RegisterFlags registers flag variables to the given FlagSet if not already registered.
// It uses the default command line FlagSet, if none is provided. Currently, it only registers the kubeconfig flag.
func RegisterFlags(fs *flag.FlagSet) {
if fs == nil {
fs = flag.CommandLine
}
if f := fs.Lookup(KubeconfigFlagName); f != nil {
kubeconfig = f.Value.String()
} else {
fs.StringVar(&kubeconfig, KubeconfigFlagName, "", "Paths to a kubeconfig. Only required if out-of-cluster.")
}
}
// GetConfig creates a *rest.Config for talking to a Kubernetes API server.
@ -47,7 +64,7 @@ func init() {
// It also applies saner defaults for QPS and burst based on the Kubernetes
// controller manager defaults (20 QPS, 30 burst)
//
// Config precedence
// Config precedence:
//
// * --kubeconfig flag pointing at a file
//
@ -67,7 +84,7 @@ func GetConfig() (*rest.Config, error) {
// It also applies saner defaults for QPS and burst based on the Kubernetes
// controller manager defaults (20 QPS, 30 burst)
//
// Config precedence
// Config precedence:
//
// * --kubeconfig flag pointing at a file
//
@ -96,7 +113,7 @@ func GetConfigWithContext(context string) (*rest.Config, error) {
var loadInClusterConfig = rest.InClusterConfig
// loadConfig loads a REST Config as per the rules specified in GetConfig.
func loadConfig(context string) (*rest.Config, error) {
func loadConfig(context string) (config *rest.Config, configErr error) {
// If a flag is specified with the config location, use that
if len(kubeconfig) > 0 {
return loadConfigWithContext("", &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, context)
@ -106,9 +123,16 @@ func loadConfig(context string) (*rest.Config, error) {
// try the in-cluster config.
kubeconfigPath := os.Getenv(clientcmd.RecommendedConfigPathEnvVar)
if len(kubeconfigPath) == 0 {
if c, err := loadInClusterConfig(); err == nil {
c, err := loadInClusterConfig()
if err == nil {
return c, nil
}
defer func() {
if configErr != nil {
log.Error(err, "unable to load in-cluster config")
}
}()
}
// If the recommended kubeconfig env variable is set, or there
@ -123,7 +147,7 @@ func loadConfig(context string) (*rest.Config, error) {
if _, ok := os.LookupEnv("HOME"); !ok {
u, err := user.Current()
if err != nil {
return nil, fmt.Errorf("could not get current user: %v", err)
return nil, fmt.Errorf("could not get current user: %w", err)
}
loadingRules.Precedence = append(loadingRules.Precedence, filepath.Join(u.HomeDir, clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName))
}

View File

@ -17,7 +17,7 @@ limitations under the License.
// Package client contains functionality for interacting with Kubernetes API
// servers.
//
// Clients
// # Clients
//
// Clients are split into two interfaces -- Readers and Writers. Readers
// get and list, while writers create, update, and delete.
@ -25,18 +25,19 @@ limitations under the License.
// The New function can be used to create a new client that talks directly
// to the API server.
//
// A common pattern in Kubernetes to read from a cache and write to the API
// It is a common pattern in Kubernetes to read from a cache and write to the API
// server. This pattern is covered by the DelegatingClient type, which can
// be used to have a client whose Reader is different from the Writer.
//
// Options
// # Options
//
// Many client operations in Kubernetes support options. These options are
// represented as variadic arguments at the end of a given method call.
// For instance, to use a label selector on list, you can call
// err := someReader.List(context.Background(), &podList, client.MatchingLabels{"somelabel": "someval"})
//
// Indexing
// err := someReader.List(context.Background(), &podList, client.MatchingLabels{"somelabel": "someval"})
//
// # Indexing
//
// Indexes may be added to caches using a FieldIndexer. This allows you to easily
// and efficiently look up objects with certain properties. You can then make

View File

@ -72,8 +72,8 @@ func (c *dryRunClient) Patch(ctx context.Context, obj Object, patch Patch, opts
}
// Get implements client.Client.
func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object) error {
return c.client.Get(ctx, key, obj)
func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
return c.client.Get(ctx, key, obj, opts...)
}
// List implements client.Client.
@ -82,25 +82,38 @@ func (c *dryRunClient) List(ctx context.Context, obj ObjectList, opts ...ListOpt
}
// Status implements client.StatusClient.
func (c *dryRunClient) Status() StatusWriter {
return &dryRunStatusWriter{client: c.client.Status()}
func (c *dryRunClient) Status() SubResourceWriter {
return c.SubResource("status")
}
// ensure dryRunStatusWriter implements client.StatusWriter.
var _ StatusWriter = &dryRunStatusWriter{}
// SubResource implements client.SubResourceClient.
func (c *dryRunClient) SubResource(subResource string) SubResourceClient {
return &dryRunSubResourceClient{client: c.client.SubResource(subResource)}
}
// dryRunStatusWriter is client.StatusWriter that writes status subresource with dryRun mode
// ensure dryRunSubResourceWriter implements client.SubResourceWriter.
var _ SubResourceWriter = &dryRunSubResourceClient{}
// dryRunSubResourceClient is client.SubResourceWriter that writes status subresource with dryRun mode
// enforced.
type dryRunStatusWriter struct {
client StatusWriter
type dryRunSubResourceClient struct {
client SubResourceClient
}
// Update implements client.StatusWriter.
func (sw *dryRunStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
func (sw *dryRunSubResourceClient) Get(ctx context.Context, obj, subResource Object, opts ...SubResourceGetOption) error {
return sw.client.Get(ctx, obj, subResource, opts...)
}
func (sw *dryRunSubResourceClient) Create(ctx context.Context, obj, subResource Object, opts ...SubResourceCreateOption) error {
return sw.client.Create(ctx, obj, subResource, append(opts, DryRunAll)...)
}
// Update implements client.SubResourceWriter.
func (sw *dryRunSubResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error {
return sw.client.Update(ctx, obj, append(opts, DryRunAll)...)
}
// Patch implements client.StatusWriter.
func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
// Patch implements client.SubResourceWriter.
func (sw *dryRunSubResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error {
return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...)
}

View File

@ -50,7 +50,7 @@ type Reader interface {
// Get retrieves an obj for the given object key from the Kubernetes Cluster.
// obj must be a struct pointer so that obj can be updated with the response
// returned by the Server.
Get(ctx context.Context, key ObjectKey, obj Object) error
Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error
// List retrieves list of objects for a given namespace and list options. On a
// successful call, Items field in the list will be populated with the
@ -60,7 +60,8 @@ type Reader interface {
// Writer knows how to create, delete, and update Kubernetes objects.
type Writer interface {
// Create saves the object obj in the Kubernetes cluster.
// Create saves the object obj in the Kubernetes cluster. obj must be a
// struct pointer so that obj can be updated with the content returned by the Server.
Create(ctx context.Context, obj Object, opts ...CreateOption) error
// Delete deletes the given obj from Kubernetes cluster.
@ -81,20 +82,80 @@ type Writer interface {
// StatusClient knows how to create a client which can update status subresource
// for kubernetes objects.
type StatusClient interface {
Status() StatusWriter
Status() SubResourceWriter
}
// StatusWriter knows how to update status subresource of a Kubernetes object.
type StatusWriter interface {
// SubResourceClientConstructor knows how to create a client which can update subresource
// for kubernetes objects.
type SubResourceClientConstructor interface {
// SubResourceClientConstructor returns a subresource client for the named subResource. Known
// upstream subResources usages are:
// - ServiceAccount token creation:
// sa := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}}
// token := &authenticationv1.TokenRequest{}
// c.SubResourceClient("token").Create(ctx, sa, token)
//
// - Pod eviction creation:
// pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}}
// c.SubResourceClient("eviction").Create(ctx, pod, &policyv1.Eviction{})
//
// - Pod binding creation:
// pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}}
// binding := &corev1.Binding{Target: corev1.ObjectReference{Name: "my-node"}}
// c.SubResourceClient("binding").Create(ctx, pod, binding)
//
// - CertificateSigningRequest approval:
// csr := &certificatesv1.CertificateSigningRequest{
// ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"},
// Status: certificatesv1.CertificateSigningRequestStatus{
// Conditions: []certificatesv1.[]CertificateSigningRequestCondition{{
// Type: certificatesv1.CertificateApproved,
// Status: corev1.ConditionTrue,
// }},
// },
// }
// c.SubResourceClient("approval").Update(ctx, csr)
//
// - Scale retrieval:
// dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}}
// scale := &autoscalingv1.Scale{}
// c.SubResourceClient("scale").Get(ctx, dep, scale)
//
// - Scale update:
// dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}}
// scale := &autoscalingv1.Scale{Spec: autoscalingv1.ScaleSpec{Replicas: 2}}
// c.SubResourceClient("scale").Update(ctx, dep, client.WithSubResourceBody(scale))
SubResource(subResource string) SubResourceClient
}
// StatusWriter is kept for backward compatibility.
type StatusWriter = SubResourceWriter
// SubResourceReader knows how to read SubResources
type SubResourceReader interface {
Get(ctx context.Context, obj Object, subResource Object, opts ...SubResourceGetOption) error
}
// SubResourceWriter knows how to update subresource of a Kubernetes object.
type SubResourceWriter interface {
// Create saves the subResource object in the Kubernetes cluster. obj must be a
// struct pointer so that obj can be updated with the content returned by the Server.
Create(ctx context.Context, obj Object, subResource Object, opts ...SubResourceCreateOption) error
// Update updates the fields corresponding to the status subresource for the
// given obj. obj must be a struct pointer so that obj can be updated
// with the content returned by the Server.
Update(ctx context.Context, obj Object, opts ...UpdateOption) error
Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error
// Patch patches the given object's subresource. obj must be a struct
// pointer so that obj can be updated with the content returned by the
// Server.
Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error
Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error
}
// SubResourceClient knows how to perform CRU operations on Kubernetes objects.
type SubResourceClient interface {
SubResourceReader
SubResourceWriter
}
// Client knows how to perform CRUD operations on Kubernetes objects.
@ -102,6 +163,7 @@ type Client interface {
Reader
Writer
StatusClient
SubResourceClientConstructor
// Scheme returns the scheme this client is using.
Scheme() *runtime.Scheme
@ -143,3 +205,13 @@ func IgnoreNotFound(err error) error {
}
return err
}
// IgnoreAlreadyExists returns nil on AlreadyExists errors.
// All other values that are not AlreadyExists errors or nil are returned unmodified.
func IgnoreAlreadyExists(err error) error {
if apierrors.IsAlreadyExists(err) {
return nil
}
return err
}

View File

@ -116,7 +116,7 @@ func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, op
}
// Get implements client.Client.
func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object) error {
func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
metadata, ok := obj.(*metav1.PartialObjectMetadata)
if !ok {
return fmt.Errorf("metadata client did not understand object: %T", obj)
@ -124,12 +124,15 @@ func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object) er
gvk := metadata.GroupVersionKind()
getOpts := GetOptions{}
getOpts.ApplyOptions(opts)
resInt, err := mc.getResourceInterface(gvk, key.Namespace)
if err != nil {
return err
}
res, err := resInt.Get(ctx, key.Name, metav1.GetOptions{})
res, err := resInt.Get(ctx, key.Name, *getOpts.AsGetOptions())
if err != nil {
return err
}
@ -146,9 +149,7 @@ func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...List
}
gvk := metadata.GroupVersionKind()
if strings.HasSuffix(gvk.Kind, "List") {
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
}
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
listOpts := ListOptions{}
listOpts.ApplyOptions(opts)
@ -167,7 +168,7 @@ func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...List
return nil
}
func (mc *metadataClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
func (mc *metadataClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error {
metadata, ok := obj.(*metav1.PartialObjectMetadata)
if !ok {
return fmt.Errorf("metadata client did not understand object: %T", obj)
@ -179,16 +180,24 @@ func (mc *metadataClient) PatchStatus(ctx context.Context, obj Object, patch Pat
return err
}
data, err := patch.Data(obj)
patchOpts := &SubResourcePatchOptions{}
patchOpts.ApplyOptions(opts)
body := obj
if patchOpts.SubResourceBody != nil {
body = patchOpts.SubResourceBody
}
data, err := patch.Data(body)
if err != nil {
return err
}
patchOpts := &PatchOptions{}
res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions(), "status")
res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions(), subResource)
if err != nil {
return err
}
*metadata = *res
metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata
return nil

View File

@ -52,11 +52,11 @@ func (n *namespacedClient) RESTMapper() meta.RESTMapper {
return n.client.RESTMapper()
}
// Create implements clinet.Client.
// Create implements client.Client.
func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %v", err)
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
@ -74,7 +74,7 @@ func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...Creat
func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %v", err)
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
@ -92,7 +92,7 @@ func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...Updat
func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %v", err)
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
@ -110,7 +110,7 @@ func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...Delet
func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %v", err)
return fmt.Errorf("error finding the scope of the object: %w", err)
}
if isNamespaceScoped {
@ -123,7 +123,7 @@ func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...
func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %v", err)
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
@ -138,18 +138,18 @@ func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, o
}
// Get implements client.Client.
func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object) error {
func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %v", err)
return fmt.Errorf("error finding the scope of the object: %w", err)
}
if isNamespaceScoped {
if key.Namespace != "" && key.Namespace != n.namespace {
return fmt.Errorf("namespace %s provided for the object %s does not match the namesapce %s on the client", key.Namespace, obj.GetName(), n.namespace)
return fmt.Errorf("namespace %s provided for the object %s does not match the namespace %s on the client", key.Namespace, obj.GetName(), n.namespace)
}
key.Namespace = n.namespace
}
return n.client.Get(ctx, key, obj)
return n.client.Get(ctx, key, obj, opts...)
}
// List implements client.Client.
@ -161,25 +161,28 @@ func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...Lis
}
// Status implements client.StatusClient.
func (n *namespacedClient) Status() StatusWriter {
return &namespacedClientStatusWriter{StatusClient: n.client.Status(), namespace: n.namespace, namespacedclient: n}
func (n *namespacedClient) Status() SubResourceWriter {
return n.SubResource("status")
}
// ensure namespacedClientStatusWriter implements client.StatusWriter.
var _ StatusWriter = &namespacedClientStatusWriter{}
// SubResource implements client.SubResourceClient.
func (n *namespacedClient) SubResource(subResource string) SubResourceClient {
return &namespacedClientSubResourceClient{client: n.client.SubResource(subResource), namespace: n.namespace, namespacedclient: n}
}
type namespacedClientStatusWriter struct {
StatusClient StatusWriter
// ensure namespacedClientSubResourceClient implements client.SubResourceClient.
var _ SubResourceClient = &namespacedClientSubResourceClient{}
type namespacedClientSubResourceClient struct {
client SubResourceClient
namespace string
namespacedclient Client
}
// Update implements client.StatusWriter.
func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error {
func (nsw *namespacedClientSubResourceClient) Get(ctx context.Context, obj, subResource Object, opts ...SubResourceGetOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %v", err)
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
@ -190,15 +193,14 @@ func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object,
if isNamespaceScoped && objectNamespace == "" {
obj.SetNamespace(nsw.namespace)
}
return nsw.StatusClient.Update(ctx, obj, opts...)
return nsw.client.Get(ctx, obj, subResource, opts...)
}
// Patch implements client.StatusWriter.
func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
func (nsw *namespacedClientSubResourceClient) Create(ctx context.Context, obj, subResource Object, opts ...SubResourceCreateOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %v", err)
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
@ -209,5 +211,43 @@ func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object,
if isNamespaceScoped && objectNamespace == "" {
obj.SetNamespace(nsw.namespace)
}
return nsw.StatusClient.Patch(ctx, obj, patch, opts...)
return nsw.client.Create(ctx, obj, subResource, opts...)
}
// Update implements client.SubResourceWriter.
func (nsw *namespacedClientSubResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
if objectNamespace != nsw.namespace && objectNamespace != "" {
return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace)
}
if isNamespaceScoped && objectNamespace == "" {
obj.SetNamespace(nsw.namespace)
}
return nsw.client.Update(ctx, obj, opts...)
}
// Patch implements client.SubResourceWriter.
func (nsw *namespacedClientSubResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error {
isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper())
if err != nil {
return fmt.Errorf("error finding the scope of the object: %w", err)
}
objectNamespace := obj.GetNamespace()
if objectNamespace != nsw.namespace && objectNamespace != "" {
return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace)
}
if isNamespaceScoped && objectNamespace == "" {
obj.SetNamespace(nsw.namespace)
}
return nsw.client.Patch(ctx, obj, patch, opts...)
}

View File

@ -37,6 +37,12 @@ type DeleteOption interface {
ApplyToDelete(*DeleteOptions)
}
// GetOption is some configuration that modifies options for a get request.
type GetOption interface {
// ApplyToGet applies this configuration to the given get options.
ApplyToGet(*GetOptions)
}
// ListOption is some configuration that modifies options for a list request.
type ListOption interface {
// ApplyToList applies this configuration to the given list options.
@ -61,6 +67,29 @@ type DeleteAllOfOption interface {
ApplyToDeleteAllOf(*DeleteAllOfOptions)
}
// SubResourceGetOption modifies options for a SubResource Get request.
type SubResourceGetOption interface {
ApplyToSubResourceGet(*SubResourceGetOptions)
}
// SubResourceUpdateOption is some configuration that modifies options for a update request.
type SubResourceUpdateOption interface {
// ApplyToSubResourceUpdate applies this configuration to the given update options.
ApplyToSubResourceUpdate(*SubResourceUpdateOptions)
}
// SubResourceCreateOption is some configuration that modifies options for a create request.
type SubResourceCreateOption interface {
// ApplyToSubResourceCreate applies this configuration to the given create options.
ApplyToSubResourceCreate(*SubResourceCreateOptions)
}
// SubResourcePatchOption configures a subresource patch request.
type SubResourcePatchOption interface {
// ApplyToSubResourcePatch applies the configuration on the given patch options.
ApplyToSubResourcePatch(*SubResourcePatchOptions)
}
// }}}
// {{{ Multi-Type Options
@ -90,10 +119,23 @@ func (dryRunAll) ApplyToPatch(opts *PatchOptions) {
func (dryRunAll) ApplyToDelete(opts *DeleteOptions) {
opts.DryRun = []string{metav1.DryRunAll}
}
func (dryRunAll) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
opts.DryRun = []string{metav1.DryRunAll}
}
func (dryRunAll) ApplyToSubResourceCreate(opts *SubResourceCreateOptions) {
opts.DryRun = []string{metav1.DryRunAll}
}
func (dryRunAll) ApplyToSubResourceUpdate(opts *SubResourceUpdateOptions) {
opts.DryRun = []string{metav1.DryRunAll}
}
func (dryRunAll) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) {
opts.DryRun = []string{metav1.DryRunAll}
}
// FieldOwner set the field manager name for the given server-side apply patch.
type FieldOwner string
@ -311,6 +353,45 @@ func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
// }}}
// {{{ Get Options
// GetOptions contains options for get operation.
// Now it only has a Raw field, with support for specific resourceVersion.
type GetOptions struct {
// Raw represents raw GetOptions, as passed to the API server. Note
// that these may not be respected by all implementations of interface.
Raw *metav1.GetOptions
}
var _ GetOption = &GetOptions{}
// ApplyToGet implements GetOption for GetOptions.
func (o *GetOptions) ApplyToGet(lo *GetOptions) {
if o.Raw != nil {
lo.Raw = o.Raw
}
}
// AsGetOptions returns these options as a flattened metav1.GetOptions.
// This may mutate the Raw field.
func (o *GetOptions) AsGetOptions() *metav1.GetOptions {
if o == nil || o.Raw == nil {
return &metav1.GetOptions{}
}
return o.Raw
}
// ApplyOptions applies the given get options on these options,
// and then returns itself (for convenient chaining).
func (o *GetOptions) ApplyOptions(opts []GetOption) *GetOptions {
for _, opt := range opts {
opt.ApplyToGet(o)
}
return o
}
// }}}
// {{{ List Options
// ListOptions contains options for limiting or filtering results.
@ -318,7 +399,7 @@ func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) {
// pre-parsed selectors (since generally, selectors will be executed
// against the cache).
type ListOptions struct {
// LabelSelector filters results by label. Use SetLabelSelector to
// LabelSelector filters results by label. Use labels.Parse() to
// set from raw string form.
LabelSelector labels.Selector
// FieldSelector filters results by a particular field. In order
@ -341,6 +422,12 @@ type ListOptions struct {
// it has expired. This field is not supported if watch is true in the Raw ListOptions.
Continue string
// UnsafeDisableDeepCopy indicates not to deep copy objects during list objects.
// Be very careful with this, when enabled you must DeepCopy any object before mutating it,
// otherwise you will mutate the object in the cache.
// +optional
UnsafeDisableDeepCopy *bool
// Raw represents raw ListOptions, as passed to the API server. Note
// that these may not be respected by all implementations of interface,
// and the LabelSelector, FieldSelector, Limit and Continue fields are ignored.
@ -369,6 +456,9 @@ func (o *ListOptions) ApplyToList(lo *ListOptions) {
if o.Continue != "" {
lo.Continue = o.Continue
}
if o.UnsafeDisableDeepCopy != nil {
lo.UnsafeDisableDeepCopy = o.UnsafeDisableDeepCopy
}
}
// AsListOptions returns these options as a flattened metav1.ListOptions.
@ -511,6 +601,25 @@ func (l Limit) ApplyToList(opts *ListOptions) {
opts.Limit = int64(l)
}
// UnsafeDisableDeepCopyOption indicates not to deep copy objects during list objects.
// Be very careful with this, when enabled you must DeepCopy any object before mutating it,
// otherwise you will mutate the object in the cache.
type UnsafeDisableDeepCopyOption bool
// ApplyToList applies this configuration to the given an List options.
func (d UnsafeDisableDeepCopyOption) ApplyToList(opts *ListOptions) {
definitelyTrue := true
definitelyFalse := false
if d {
opts.UnsafeDisableDeepCopy = &definitelyTrue
} else {
opts.UnsafeDisableDeepCopy = &definitelyFalse
}
}
// UnsafeDisableDeepCopy indicates not to deep copy objects during list objects.
const UnsafeDisableDeepCopy = UnsafeDisableDeepCopyOption(true)
// Continue sets a continuation token to retrieve chunks of results when using limit.
// Continue does not implement DeleteAllOfOption interface because the server
// does not support setting it for deletecollection operations.

View File

@ -19,7 +19,7 @@ package client
import (
"fmt"
jsonpatch "github.com/evanphx/json-patch"
jsonpatch "github.com/evanphx/json-patch/v5"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/strategicpatch"

View File

@ -61,8 +61,9 @@ func NewDelegatingClient(in NewDelegatingClientInput) (Client, error) {
uncachedGVKs: uncachedGVKs,
cacheUnstructured: in.CacheUnstructured,
},
Writer: in.Client,
StatusClient: in.Client,
Writer: in.Client,
StatusClient: in.Client,
SubResourceClientConstructor: in.Client,
}, nil
}
@ -70,6 +71,7 @@ type delegatingClient struct {
Reader
Writer
StatusClient
SubResourceClientConstructor
scheme *runtime.Scheme
mapper meta.RESTMapper
@ -121,13 +123,13 @@ func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) {
}
// Get retrieves an obj for a given object key from the Kubernetes Cluster.
func (d *delegatingReader) Get(ctx context.Context, key ObjectKey, obj Object) error {
func (d *delegatingReader) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
if isUncached, err := d.shouldBypassCache(obj); err != nil {
return err
} else if isUncached {
return d.ClientReader.Get(ctx, key, obj)
return d.ClientReader.Get(ctx, key, obj, opts...)
}
return d.CacheReader.Get(ctx, key, obj)
return d.CacheReader.Get(ctx, key, obj, opts...)
}
// List retrieves list of objects for a given namespace and list options.

View File

@ -24,7 +24,6 @@ import (
var _ Reader = &typedClient{}
var _ Writer = &typedClient{}
var _ StatusWriter = &typedClient{}
// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes
// new clients at the time they are used, and caches the client.
@ -42,6 +41,7 @@ func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOpti
createOpts := &CreateOptions{}
createOpts.ApplyOptions(opts)
return o.Post().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
@ -60,6 +60,7 @@ func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOpti
updateOpts := &UpdateOptions{}
updateOpts.ApplyOptions(opts)
return o.Put().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
@ -121,25 +122,30 @@ func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts .
}
patchOpts := &PatchOptions{}
patchOpts.ApplyOptions(opts)
return o.Patch(patch.Type()).
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec).
VersionedParams(patchOpts.AsPatchOptions(), c.paramCodec).
Body(data).
Do(ctx).
Into(obj)
}
// Get implements client.Client.
func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object) error {
func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
r, err := c.cache.getResource(obj)
if err != nil {
return err
}
getOpts := GetOptions{}
getOpts.ApplyOptions(opts)
return r.Get().
NamespaceIfScoped(key.Namespace, r.isNamespaced()).
Resource(r.resource()).
VersionedParams(getOpts.AsGetOptions(), c.paramCodec).
Name(key.Name).Do(ctx).Into(obj)
}
@ -149,8 +155,10 @@ func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOpti
if err != nil {
return err
}
listOpts := ListOptions{}
listOpts.ApplyOptions(opts)
return r.Get().
NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()).
Resource(r.resource()).
@ -159,8 +167,55 @@ func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOpti
Into(obj)
}
// UpdateStatus used by StatusWriter to write status.
func (c *typedClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error {
func (c *typedClient) GetSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceGetOption) error {
o, err := c.cache.getObjMeta(obj)
if err != nil {
return err
}
if subResourceObj.GetName() == "" {
subResourceObj.SetName(obj.GetName())
}
getOpts := &SubResourceGetOptions{}
getOpts.ApplyOptions(opts)
return o.Get().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
SubResource(subResource).
VersionedParams(getOpts.AsGetOptions(), c.paramCodec).
Do(ctx).
Into(subResourceObj)
}
func (c *typedClient) CreateSubResource(ctx context.Context, obj Object, subResourceObj Object, subResource string, opts ...SubResourceCreateOption) error {
o, err := c.cache.getObjMeta(obj)
if err != nil {
return err
}
if subResourceObj.GetName() == "" {
subResourceObj.SetName(obj.GetName())
}
createOpts := &SubResourceCreateOptions{}
createOpts.ApplyOptions(opts)
return o.Post().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
SubResource(subResource).
Body(subResourceObj).
VersionedParams(createOpts.AsCreateOptions(), c.paramCodec).
Do(ctx).
Into(subResourceObj)
}
// UpdateSubResource used by SubResourceWriter to write status.
func (c *typedClient) UpdateSubResource(ctx context.Context, obj Object, subResource string, opts ...SubResourceUpdateOption) error {
o, err := c.cache.getObjMeta(obj)
if err != nil {
return err
@ -169,37 +224,58 @@ func (c *typedClient) UpdateStatus(ctx context.Context, obj Object, opts ...Upda
// wrapped to improve the UX ?
// It will be nice to receive an error saying the object doesn't implement
// status subresource and check CRD definition
updateOpts := &SubResourceUpdateOptions{}
updateOpts.ApplyOptions(opts)
body := obj
if updateOpts.SubResourceBody != nil {
body = updateOpts.SubResourceBody
}
if body.GetName() == "" {
body.SetName(obj.GetName())
}
if body.GetNamespace() == "" {
body.SetNamespace(obj.GetNamespace())
}
return o.Put().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
SubResource("status").
Body(obj).
VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), c.paramCodec).
SubResource(subResource).
Body(body).
VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec).
Do(ctx).
Into(obj)
Into(body)
}
// PatchStatus used by StatusWriter to write status.
func (c *typedClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
// PatchSubResource used by SubResourceWriter to write subresource.
func (c *typedClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error {
o, err := c.cache.getObjMeta(obj)
if err != nil {
return err
}
data, err := patch.Data(obj)
patchOpts := &SubResourcePatchOptions{}
patchOpts.ApplyOptions(opts)
body := obj
if patchOpts.SubResourceBody != nil {
body = patchOpts.SubResourceBody
}
data, err := patch.Data(body)
if err != nil {
return err
}
patchOpts := &PatchOptions{}
return o.Patch(patch.Type()).
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
SubResource("status").
SubResource(subResource).
Body(data).
VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), c.paramCodec).
VersionedParams(patchOpts.AsPatchOptions(), c.paramCodec).
Do(ctx).
Into(obj)
Into(body)
}

View File

@ -27,7 +27,6 @@ import (
var _ Reader = &unstructuredClient{}
var _ Writer = &unstructuredClient{}
var _ StatusWriter = &unstructuredClient{}
// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes
// new clients at the time they are used, and caches the client.
@ -52,6 +51,7 @@ func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...Cr
createOpts := &CreateOptions{}
createOpts.ApplyOptions(opts)
result := o.Post().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
@ -80,6 +80,7 @@ func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...Up
updateOpts := UpdateOptions{}
updateOpts.ApplyOptions(opts)
result := o.Put().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
@ -95,8 +96,7 @@ func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...Up
// Delete implements client.Client.
func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
_, ok := obj.(*unstructured.Unstructured)
if !ok {
if _, ok := obj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
@ -107,6 +107,7 @@ func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...De
deleteOpts := DeleteOptions{}
deleteOpts.ApplyOptions(opts)
return o.Delete().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
@ -118,8 +119,7 @@ func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...De
// DeleteAllOf implements client.Client.
func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
_, ok := obj.(*unstructured.Unstructured)
if !ok {
if _, ok := obj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
@ -130,6 +130,7 @@ func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts
deleteAllOfOpts := DeleteAllOfOptions{}
deleteAllOfOpts.ApplyOptions(opts)
return o.Delete().
NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()).
Resource(o.resource()).
@ -141,8 +142,7 @@ func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts
// Patch implements client.Client.
func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
_, ok := obj.(*unstructured.Unstructured)
if !ok {
if _, ok := obj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
@ -157,18 +157,20 @@ func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch
}
patchOpts := &PatchOptions{}
patchOpts.ApplyOptions(opts)
return o.Patch(patch.Type()).
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec).
VersionedParams(patchOpts.AsPatchOptions(), uc.paramCodec).
Body(data).
Do(ctx).
Into(obj)
}
// Get implements client.Client.
func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object) error {
func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
u, ok := obj.(*unstructured.Unstructured)
if !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
@ -176,6 +178,9 @@ func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object
gvk := u.GroupVersionKind()
getOpts := GetOptions{}
getOpts.ApplyOptions(opts)
r, err := uc.cache.getResource(obj)
if err != nil {
return err
@ -184,6 +189,7 @@ func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object
result := r.Get().
NamespaceIfScoped(key.Namespace, r.isNamespaced()).
Resource(r.resource()).
VersionedParams(getOpts.AsGetOptions(), uc.paramCodec).
Name(key.Name).
Do(ctx).
Into(obj)
@ -201,18 +207,16 @@ func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...
}
gvk := u.GroupVersionKind()
if strings.HasSuffix(gvk.Kind, "List") {
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
}
listOpts := ListOptions{}
listOpts.ApplyOptions(opts)
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
r, err := uc.cache.getResource(obj)
if err != nil {
return err
}
listOpts := ListOptions{}
listOpts.ApplyOptions(opts)
return r.Get().
NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()).
Resource(r.resource()).
@ -221,9 +225,71 @@ func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...
Into(obj)
}
func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj Object, opts ...UpdateOption) error {
_, ok := obj.(*unstructured.Unstructured)
if !ok {
func (uc *unstructuredClient) GetSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceGetOption) error {
if _, ok := obj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("unstructured client did not understand object: %T", subResource)
}
if _, ok := subResourceObj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
if subResourceObj.GetName() == "" {
subResourceObj.SetName(obj.GetName())
}
o, err := uc.cache.getObjMeta(obj)
if err != nil {
return err
}
getOpts := &SubResourceGetOptions{}
getOpts.ApplyOptions(opts)
return o.Get().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
SubResource(subResource).
VersionedParams(getOpts.AsGetOptions(), uc.paramCodec).
Do(ctx).
Into(subResourceObj)
}
func (uc *unstructuredClient) CreateSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceCreateOption) error {
if _, ok := obj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj)
}
if _, ok := subResourceObj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
if subResourceObj.GetName() == "" {
subResourceObj.SetName(obj.GetName())
}
o, err := uc.cache.getObjMeta(obj)
if err != nil {
return err
}
createOpts := &SubResourceCreateOptions{}
createOpts.ApplyOptions(opts)
return o.Post().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
SubResource(subResource).
Body(subResourceObj).
VersionedParams(createOpts.AsCreateOptions(), uc.paramCodec).
Do(ctx).
Into(subResourceObj)
}
func (uc *unstructuredClient) UpdateSubResource(ctx context.Context, obj Object, subResource string, opts ...SubResourceUpdateOption) error {
if _, ok := obj.(*unstructured.Unstructured); !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
}
@ -232,18 +298,32 @@ func (uc *unstructuredClient) UpdateStatus(ctx context.Context, obj Object, opts
return err
}
updateOpts := SubResourceUpdateOptions{}
updateOpts.ApplyOptions(opts)
body := obj
if updateOpts.SubResourceBody != nil {
body = updateOpts.SubResourceBody
}
if body.GetName() == "" {
body.SetName(obj.GetName())
}
if body.GetNamespace() == "" {
body.SetNamespace(obj.GetNamespace())
}
return o.Put().
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
SubResource("status").
Body(obj).
VersionedParams((&UpdateOptions{}).ApplyOptions(opts).AsUpdateOptions(), uc.paramCodec).
SubResource(subResource).
Body(body).
VersionedParams(updateOpts.AsUpdateOptions(), uc.paramCodec).
Do(ctx).
Into(obj)
Into(body)
}
func (uc *unstructuredClient) PatchStatus(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error {
func (uc *unstructuredClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error {
u, ok := obj.(*unstructured.Unstructured)
if !ok {
return fmt.Errorf("unstructured client did not understand object: %T", obj)
@ -256,21 +336,28 @@ func (uc *unstructuredClient) PatchStatus(ctx context.Context, obj Object, patch
return err
}
data, err := patch.Data(obj)
patchOpts := &SubResourcePatchOptions{}
patchOpts.ApplyOptions(opts)
body := obj
if patchOpts.SubResourceBody != nil {
body = patchOpts.SubResourceBody
}
data, err := patch.Data(body)
if err != nil {
return err
}
patchOpts := &PatchOptions{}
result := o.Patch(patch.Type()).
NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
Resource(o.resource()).
Name(o.GetName()).
SubResource("status").
SubResource(subResource).
Body(data).
VersionedParams(patchOpts.ApplyOptions(opts).AsPatchOptions(), uc.paramCodec).
VersionedParams(patchOpts.AsPatchOptions(), uc.paramCodec).
Do(ctx).
Into(u)
Into(body)
u.SetGroupVersionKind(gvk)
return result

View File

@ -69,9 +69,7 @@ func (w *watchingClient) listOpts(opts ...ListOption) ListOptions {
func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialObjectMetadataList, opts ...ListOption) (watch.Interface, error) {
gvk := obj.GroupVersionKind()
if strings.HasSuffix(gvk.Kind, "List") {
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
}
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
listOpts := w.listOpts(opts...)
@ -85,9 +83,7 @@ func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialO
func (w *watchingClient) unstructuredWatch(ctx context.Context, obj *unstructured.UnstructuredList, opts ...ListOption) (watch.Interface, error) {
gvk := obj.GroupVersionKind()
if strings.HasSuffix(gvk.Kind, "List") {
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
}
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
r, err := w.client.unstructuredClient.cache.getResource(obj)
if err != nil {

View File

@ -112,6 +112,7 @@ type Options struct {
// NewClient is the func that creates the client to be used by the manager.
// If not set this will create the default DelegatingClient that will
// use the cache for reads and the client for writes.
// NOTE: The default client will not cache Unstructured.
NewClient NewClientFunc
// ClientDisableCacheFor tells the client that, if any cache is used, to bypass it
@ -255,16 +256,33 @@ func setOptionsDefaults(options Options) Options {
// NewClientFunc allows a user to define how to create a client.
type NewClientFunc func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error)
// DefaultNewClient creates the default caching client.
func DefaultNewClient(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) {
c, err := client.New(config, options)
if err != nil {
return nil, err
}
return client.NewDelegatingClient(client.NewDelegatingClientInput{
CacheReader: cache,
Client: c,
UncachedObjects: uncachedObjects,
})
// ClientOptions are the optional arguments for tuning the caching client.
type ClientOptions struct {
UncachedObjects []client.Object
CacheUnstructured bool
}
// DefaultNewClient creates the default caching client, that will never cache Unstructured.
func DefaultNewClient(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) {
return ClientBuilderWithOptions(ClientOptions{})(cache, config, options, uncachedObjects...)
}
// ClientBuilderWithOptions returns a Client constructor that will build a client
// honoring the options argument
func ClientBuilderWithOptions(options ClientOptions) NewClientFunc {
return func(cache cache.Cache, config *rest.Config, clientOpts client.Options, uncachedObjects ...client.Object) (client.Client, error) {
options.UncachedObjects = append(options.UncachedObjects, uncachedObjects...)
c, err := client.New(config, clientOpts)
if err != nil {
return nil, err
}
return client.NewDelegatingClient(client.NewDelegatingClientInput{
CacheReader: cache,
Client: c,
UncachedObjects: options.UncachedObjects,
CacheUnstructured: options.CacheUnstructured,
})
}
}

View File

@ -18,7 +18,7 @@ package config
import (
"fmt"
ioutil "io/ioutil"
"os"
"sync"
"k8s.io/apimachinery/pkg/runtime"
@ -50,8 +50,8 @@ type DeferredFileLoader struct {
// this will also configure the defaults for the loader if nothing is
//
// Defaults:
// Path: "./config.yaml"
// Kind: GenericControllerManagerConfiguration
// * Path: "./config.yaml"
// * Kind: GenericControllerManagerConfiguration
func File() *DeferredFileLoader {
scheme := runtime.NewScheme()
utilruntime.Must(v1alpha1.AddToScheme(scheme))
@ -96,7 +96,7 @@ func (d *DeferredFileLoader) loadFile() {
return
}
content, err := ioutil.ReadFile(d.path)
content, err := os.ReadFile(d.path)
if err != nil {
d.err = fmt.Errorf("could not read file at %s", d.path)
return

View File

@ -17,7 +17,7 @@ limitations under the License.
// Package config contains functionality for interacting with ComponentConfig
// files
//
// DeferredFileLoader
// # DeferredFileLoader
//
// This uses a deferred file decoding allowing you to chain your configuration
// setup. You can pass this into manager.Options#File and it will load your

View File

@ -94,6 +94,10 @@ type ControllerConfigurationSpec struct {
// Defaults to 2 minutes if not set.
// +optional
CacheSyncTimeout *time.Duration `json:"cacheSyncTimeout,omitempty"`
// RecoverPanic indicates if panics should be recovered.
// +optional
RecoverPanic *bool `json:"recoverPanic,omitempty"`
}
// ControllerMetrics defines the metrics configs.
@ -109,6 +113,7 @@ type ControllerMetrics struct {
type ControllerHealth struct {
// HealthProbeBindAddress is the TCP address that the controller should bind to
// for serving health probes
// It can be set to "0" or "" to disable serving the health probe.
// +optional
HealthProbeBindAddress string `json:"healthProbeBindAddress,omitempty"`

View File

@ -27,6 +27,11 @@ func (in *ControllerConfigurationSpec) DeepCopyInto(out *ControllerConfiguration
*out = new(timex.Duration)
**out = **in
}
if in.RecoverPanic != nil {
in, out := &in.RecoverPanic, &out.RecoverPanic
*out = new(bool)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerConfigurationSpec.

View File

@ -23,6 +23,8 @@ import (
"github.com/go-logr/logr"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/internal/controller"
"sigs.k8s.io/controller-runtime/pkg/manager"
@ -45,16 +47,17 @@ type Options struct {
// The overall is a token bucket and the per-item is exponential.
RateLimiter ratelimiter.RateLimiter
// Log is the logger used for this controller and passed to each reconciliation
// request via the context field.
Log logr.Logger
// LogConstructor is used to construct a logger used for this controller and passed
// to each reconciliation via the context field.
LogConstructor func(request *reconcile.Request) logr.Logger
// CacheSyncTimeout refers to the time limit set to wait for syncing caches.
// Defaults to 2 minutes if not set.
CacheSyncTimeout time.Duration
// RecoverPanic indicates whether the panic caused by reconcile should be recovered.
RecoverPanic bool
// Defaults to the Controller.RecoverPanic setting from the Manager if unset.
RecoverPanic *bool
}
// Controller implements a Kubernetes API. A Controller manages a work queue fed reconcile.Requests
@ -104,8 +107,20 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller
return nil, fmt.Errorf("must specify Name for Controller")
}
if options.Log.GetSink() == nil {
options.Log = mgr.GetLogger()
if options.LogConstructor == nil {
log := mgr.GetLogger().WithValues(
"controller", name,
)
options.LogConstructor = func(req *reconcile.Request) logr.Logger {
log := log
if req != nil {
log = log.WithValues(
"object", klog.KRef(req.Namespace, req.Name),
"namespace", req.Namespace, "name", req.Name,
)
}
return log
}
}
if options.MaxConcurrentReconciles <= 0 {
@ -125,6 +140,10 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller
return nil, err
}
if options.RecoverPanic == nil {
options.RecoverPanic = mgr.GetControllerOptions().RecoverPanic
}
// Create controller with dependencies set
return &controller.Controller{
Do: options.Reconciler,
@ -135,7 +154,10 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller
CacheSyncTimeout: options.CacheSyncTimeout,
SetFields: mgr.SetFields,
Name: name,
Log: options.Log.WithName("controller").WithName(name),
LogConstructor: options.LogConstructor,
RecoverPanic: options.RecoverPanic,
}, nil
}
// ReconcileIDFromContext gets the reconcileID from the current context.
var ReconcileIDFromContext = controller.ReconcileIDFromContext

View File

@ -17,7 +17,7 @@ limitations under the License.
/*
Package controller provides types and functions for building Controllers. Controllers implement Kubernetes APIs.
Creation
# Creation
To create a new Controller, first create a manager.Manager and pass it to the controller.New function.
The Controller MUST be started by calling Manager.Start.

View File

@ -21,7 +21,7 @@ Controller.Watch in order to generate and enqueue reconcile.Request work items.
Generally, following premade event handlers should be sufficient for most use cases:
EventHandlers
EventHandlers:
EnqueueRequestForObject - Enqueues a reconcile.Request containing the Name and Namespace of the object in the Event. This will
cause the object that was the source of the Event (e.g. the created / deleted / updated object) to be

View File

@ -70,7 +70,7 @@ func (h *Handler) serveAggregated(resp http.ResponseWriter, req *http.Request) {
parts = append(parts, checkStatus{name: "ping", healthy: true})
}
for _, c := range excluded.List() {
for _, c := range excluded.UnsortedList() {
log.V(1).Info("cannot exclude health check, no matches for it", "checker", c)
}
@ -88,7 +88,7 @@ func (h *Handler) serveAggregated(resp http.ResponseWriter, req *http.Request) {
// any checks that the user requested to have excluded, but weren't actually
// known checks. writeStatusAsText is always verbose on failure, and can be
// forced to be verbose on success using the given argument.
func writeStatusesAsText(resp http.ResponseWriter, parts []checkStatus, unknownExcludes sets.String, failed, forceVerbose bool) {
func writeStatusesAsText(resp http.ResponseWriter, parts []checkStatus, unknownExcludes sets.Set[string], failed, forceVerbose bool) {
resp.Header().Set("Content-Type", "text/plain; charset=utf-8")
resp.Header().Set("X-Content-Type-Options", "nosniff")
@ -121,7 +121,7 @@ func writeStatusesAsText(resp http.ResponseWriter, parts []checkStatus, unknownE
}
if unknownExcludes.Len() > 0 {
fmt.Fprintf(resp, "warn: some health checks cannot be excluded: no matches for %s\n", formatQuoted(unknownExcludes.List()...))
fmt.Fprintf(resp, "warn: some health checks cannot be excluded: no matches for %s\n", formatQuoted(unknownExcludes.UnsortedList()...))
}
if failed {
@ -187,12 +187,12 @@ type Checker func(req *http.Request) error
var Ping Checker = func(_ *http.Request) error { return nil }
// getExcludedChecks extracts the health check names to be excluded from the query param.
func getExcludedChecks(r *http.Request) sets.String {
func getExcludedChecks(r *http.Request) sets.Set[string] {
checks, found := r.URL.Query()["exclude"]
if found {
return sets.NewString(checks...)
return sets.New[string](checks...)
}
return sets.NewString()
return sets.New[string]()
}
// formatQuoted returns a formatted string of the health check names,

View File

@ -24,7 +24,9 @@ import (
"time"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/handler"
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics"
@ -83,11 +85,14 @@ type Controller struct {
// startWatches maintains a list of sources, handlers, and predicates to start when the controller is started.
startWatches []watchDescription
// Log is used to log messages to users during reconciliation, or for example when a watch is started.
Log logr.Logger
// LogConstructor is used to construct a logger to then log messages to users during reconciliation,
// or for example when a watch is started.
// Note: LogConstructor has to be able to handle nil requests as we are also using it
// outside the context of a reconciliation.
LogConstructor func(request *reconcile.Request) logr.Logger
// RecoverPanic indicates whether the panic caused by reconcile should be recovered.
RecoverPanic bool
RecoverPanic *bool
}
// watchDescription contains all the information necessary to start a watch.
@ -99,18 +104,21 @@ type watchDescription struct {
// Reconcile implements reconcile.Reconciler.
func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) {
if c.RecoverPanic {
defer func() {
if r := recover(); r != nil {
defer func() {
if r := recover(); r != nil {
if c.RecoverPanic != nil && *c.RecoverPanic {
for _, fn := range utilruntime.PanicHandlers {
fn(r)
}
err = fmt.Errorf("panic: %v [recovered]", r)
return
}
}()
}
log := c.Log.WithValues("name", req.Name, "namespace", req.Namespace)
ctx = logf.IntoContext(ctx, log)
log := logf.FromContext(ctx)
log.Info(fmt.Sprintf("Observed a panic in reconciler: %v", r))
panic(r)
}
}()
return c.Do.Reconcile(ctx, req)
}
@ -140,7 +148,7 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc
return nil
}
c.Log.Info("Starting EventSource", "source", src)
c.LogConstructor(nil).Info("Starting EventSource", "source", src)
return src.Start(c.ctx, evthdler, c.Queue, prct...)
}
@ -175,7 +183,7 @@ func (c *Controller) Start(ctx context.Context) error {
// caches to sync so that they have a chance to register their intendeded
// caches.
for _, watch := range c.startWatches {
c.Log.Info("Starting EventSource", "source", fmt.Sprintf("%s", watch.src))
c.LogConstructor(nil).Info("Starting EventSource", "source", fmt.Sprintf("%s", watch.src))
if err := watch.src.Start(ctx, watch.handler, c.Queue, watch.predicates...); err != nil {
return err
@ -183,7 +191,7 @@ func (c *Controller) Start(ctx context.Context) error {
}
// Start the SharedIndexInformer factories to begin populating the SharedIndexInformer caches
c.Log.Info("Starting Controller")
c.LogConstructor(nil).Info("Starting Controller")
for _, watch := range c.startWatches {
syncingSource, ok := watch.src.(source.SyncingSource)
@ -200,7 +208,7 @@ func (c *Controller) Start(ctx context.Context) error {
// is an error or a timeout
if err := syncingSource.WaitForSync(sourceStartCtx); err != nil {
err := fmt.Errorf("failed to wait for %s caches to sync: %w", c.Name, err)
c.Log.Error(err, "Could not wait for Cache to sync")
c.LogConstructor(nil).Error(err, "Could not wait for Cache to sync")
return err
}
@ -217,7 +225,7 @@ func (c *Controller) Start(ctx context.Context) error {
c.startWatches = nil
// Launch workers to process resources
c.Log.Info("Starting workers", "worker count", c.MaxConcurrentReconciles)
c.LogConstructor(nil).Info("Starting workers", "worker count", c.MaxConcurrentReconciles)
wg.Add(c.MaxConcurrentReconciles)
for i := 0; i < c.MaxConcurrentReconciles; i++ {
go func() {
@ -237,9 +245,9 @@ func (c *Controller) Start(ctx context.Context) error {
}
<-ctx.Done()
c.Log.Info("Shutdown signal received, waiting for all workers to finish")
c.LogConstructor(nil).Info("Shutdown signal received, waiting for all workers to finish")
wg.Wait()
c.Log.Info("All workers finished")
c.LogConstructor(nil).Info("All workers finished")
return nil
}
@ -291,20 +299,24 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) {
c.updateMetrics(time.Since(reconcileStartTS))
}()
// Make sure that the the object is a valid request.
// Make sure that the object is a valid request.
req, ok := obj.(reconcile.Request)
if !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.Queue.Forget(obj)
c.Log.Error(nil, "Queue item was not a Request", "type", fmt.Sprintf("%T", obj), "value", obj)
c.LogConstructor(nil).Error(nil, "Queue item was not a Request", "type", fmt.Sprintf("%T", obj), "value", obj)
// Return true, don't take a break
return
}
log := c.Log.WithValues("name", req.Name, "namespace", req.Namespace)
log := c.LogConstructor(&req)
reconcileID := uuid.NewUUID()
log = log.WithValues("reconcileID", reconcileID)
ctx = logf.IntoContext(ctx, log)
ctx = addReconcileID(ctx, reconcileID)
// RunInformersAndControllers the syncHandler, passing it the Namespace/Name string of the
// resource to be synced.
@ -336,7 +348,7 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) {
// GetLogger returns this controller's logger.
func (c *Controller) GetLogger() logr.Logger {
return c.Log
return c.LogConstructor(nil)
}
// InjectFunc implement SetFields.Injector.
@ -349,3 +361,21 @@ func (c *Controller) InjectFunc(f inject.Func) error {
func (c *Controller) updateMetrics(reconcileTime time.Duration) {
ctrlmetrics.ReconcileTime.WithLabelValues(c.Name).Observe(reconcileTime.Seconds())
}
// ReconcileIDFromContext gets the reconcileID from the current context.
func ReconcileIDFromContext(ctx context.Context) types.UID {
r, ok := ctx.Value(reconcileIDKey{}).(types.UID)
if !ok {
return ""
}
return r
}
// reconcileIDKey is a context.Context Value key. Its associated value should
// be a types.UID.
type reconcileIDKey struct{}
func addReconcileID(ctx context.Context, reconcileID types.UID) context.Context {
return context.WithValue(ctx, reconcileIDKey{}, reconcileID)
}

View File

@ -0,0 +1,35 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package selector
import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/selection"
)
// RequiresExactMatch checks if the given field selector is of the form `k=v` or `k==v`.
func RequiresExactMatch(sel fields.Selector) (field, val string, required bool) {
reqs := sel.Requirements()
if len(reqs) != 1 {
return "", "", false
}
req := reqs[0]
if req.Operator != selection.Equals && req.Operator != selection.DoubleEquals {
return "", "", false
}
return req.Field, req.Value, true
}

View File

@ -100,7 +100,7 @@ func (p *Provider) getBroadcaster() record.EventBroadcaster {
broadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: p.evtClient})
broadcaster.StartEventWatcher(
func(e *corev1.Event) {
p.logger.V(1).Info(e.Type, "object", e.InvolvedObject, "reason", e.Reason, "message", e.Message)
p.logger.V(1).Info(e.Message, "type", e.Type, "object", e.InvolvedObject, "reason", e.Reason)
})
p.broadcaster = broadcaster
p.stopBroadcaster = stop

View File

@ -19,7 +19,6 @@ package leaderelection
import (
"errors"
"fmt"
"io/ioutil"
"os"
"k8s.io/apimachinery/pkg/util/uuid"
@ -27,6 +26,7 @@ import (
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"sigs.k8s.io/controller-runtime/pkg/recorder"
)
@ -39,7 +39,7 @@ type Options struct {
LeaderElection bool
// LeaderElectionResourceLock determines which resource lock to use for leader election,
// defaults to "configmapsleases".
// defaults to "leases".
LeaderElectionResourceLock string
// LeaderElectionNamespace determines the namespace in which the leader
@ -57,11 +57,12 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op
return nil, nil
}
// Default resource lock to "configmapsleases". We must keep this default until we are sure all controller-runtime
// users have upgraded from the original default ConfigMap lock to a controller-runtime version that has this new
// default. Many users of controller-runtime skip versions, so we should be extremely conservative here.
// Default resource lock to "leases". The previous default (from v0.7.0 to v0.11.x) was configmapsleases, which was
// used to migrate from configmaps to leases. Since the default was "configmapsleases" for over a year, spanning
// five minor releases, any actively maintained operators are very likely to have a released version that uses
// "configmapsleases". Therefore defaulting to "leases" should be safe.
if options.LeaderElectionResourceLock == "" {
options.LeaderElectionResourceLock = resourcelock.ConfigMapsLeasesResourceLock
options.LeaderElectionResourceLock = resourcelock.LeasesResourceLock
}
// LeaderElectionID must be provided to prevent clashes
@ -118,7 +119,7 @@ func getInClusterNamespace() (string, error) {
}
// Load the namespace file and return its content
namespace, err := ioutil.ReadFile(inClusterNamespacePath)
namespace, err := os.ReadFile(inClusterNamespacePath)
if err != nil {
return "", fmt.Errorf("error reading namespace file: %w", err)
}

View File

@ -73,6 +73,9 @@ func (p *loggerPromise) Fulfill(parentLogSink logr.LogSink) {
p.logger.lock.Lock()
p.logger.logger = sink
if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok {
p.logger.logger = withCallDepth.WithCallDepth(1)
}
p.logger.promise = nil
p.logger.lock.Unlock()
@ -141,7 +144,11 @@ func (l *DelegatingLogSink) WithName(name string) logr.LogSink {
defer l.lock.RUnlock()
if l.promise == nil {
return l.logger.WithName(name)
sink := l.logger.WithName(name)
if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok {
sink = withCallDepth.WithCallDepth(-1)
}
return sink
}
res := &DelegatingLogSink{logger: l.logger}
@ -157,7 +164,11 @@ func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink {
defer l.lock.RUnlock()
if l.promise == nil {
return l.logger.WithValues(tags...)
sink := l.logger.WithValues(tags...)
if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok {
sink = withCallDepth.WithCallDepth(-1)
}
return sink
}
res := &DelegatingLogSink{logger: l.logger}
@ -177,7 +188,7 @@ func (l *DelegatingLogSink) Fulfill(actual logr.LogSink) {
}
// NewDelegatingLogSink constructs a new DelegatingLogSink which uses
// the given logger before it's promise is fulfilled.
// the given logger before its promise is fulfilled.
func NewDelegatingLogSink(initial logr.LogSink) *DelegatingLogSink {
l := &DelegatingLogSink{
logger: initial,

View File

@ -17,7 +17,7 @@ limitations under the License.
// Package log contains utilities for fetching a new logger
// when one is not already available.
//
// The Log Handle
// # The Log Handle
//
// This package contains a root logr.Logger Log. It may be used to
// get a handle to whatever the root logging implementation is. By
@ -25,11 +25,11 @@ limitations under the License.
// to loggers. When the implementation is set using SetLogger, these
// "promises" will be converted over to real loggers.
//
// Logr
// # Logr
//
// All logging in controller-runtime is structured, using a set of interfaces
// defined by a package called logr
// (https://godoc.org/github.com/go-logr/logr). The sub-package zap provides
// (https://pkg.go.dev/github.com/go-logr/logr). The sub-package zap provides
// helpers for setting up logr backed by Zap (go.uber.org/zap).
package log

View File

@ -47,7 +47,7 @@ type KubeAPIWarningLogger struct {
}
// HandleWarningHeader handles logging for responses from API server that are
// warnings with code being 299 and uses a logr.Logger for it's logging purposes.
// warnings with code being 299 and uses a logr.Logger for its logging purposes.
func (l *KubeAPIWarningLogger) HandleWarningHeader(code int, agent string, message string) {
if code != 299 || len(message) == 0 {
return

View File

@ -18,6 +18,7 @@ package manager
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net"
@ -135,12 +136,17 @@ type controllerManager struct {
// if not set, webhook server would look up the server key and certificate in
// {TempDir}/k8s-webhook-server/serving-certs
certDir string
// tlsOpts is used to allow configuring the TLS config used for the webhook server.
tlsOpts []func(*tls.Config)
webhookServer *webhook.Server
// webhookServerOnce will be called in GetWebhookServer() to optionally initialize
// webhookServer if unset, and Add() it to controllerManager.
webhookServerOnce sync.Once
// leaderElectionID is the name of the resource that leader election
// will use for holding the leader lock.
leaderElectionID string
// leaseDuration is the duration that non-leader candidates will
// wait to force acquire leadership.
leaseDuration time.Duration
@ -305,6 +311,7 @@ func (cm *controllerManager) GetWebhookServer() *webhook.Server {
Port: cm.port,
Host: cm.host,
CertDir: cm.certDir,
TLSOpts: cm.tlsOpts,
}
}
if err := cm.Add(cm.webhookServer); err != nil {
@ -402,6 +409,8 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) {
cm.Unlock()
return errors.New("manager already started")
}
cm.started = true
var ready bool
defer func() {
// Only unlock the manager if we haven't reached
@ -457,21 +466,21 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) {
// between conversion webhooks and the cache sync (usually initial list) which causes the webhooks
// to never start because no cache can be populated.
if err := cm.runnables.Webhooks.Start(cm.internalCtx); err != nil {
if err != wait.ErrWaitTimeout {
if !errors.Is(err, wait.ErrWaitTimeout) {
return err
}
}
// Start and wait for caches.
if err := cm.runnables.Caches.Start(cm.internalCtx); err != nil {
if err != wait.ErrWaitTimeout {
if !errors.Is(err, wait.ErrWaitTimeout) {
return err
}
}
// Start the non-leaderelection Runnables after the cache has synced.
if err := cm.runnables.Others.Start(cm.internalCtx); err != nil {
if err != wait.ErrWaitTimeout {
if !errors.Is(err, wait.ErrWaitTimeout) {
return err
}
}
@ -587,7 +596,7 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e
}()
<-cm.shutdownCtx.Done()
if err := cm.shutdownCtx.Err(); err != nil && err != context.Canceled {
if err := cm.shutdownCtx.Err(); err != nil && !errors.Is(err, context.Canceled) {
if errors.Is(err, context.DeadlineExceeded) {
if cm.gracefulShutdownTimeout > 0 {
return fmt.Errorf("failed waiting for all runnables to end within grace period of %s: %w", cm.gracefulShutdownTimeout, err)
@ -597,6 +606,7 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e
// For any other error, return the error.
return err
}
return nil
}
@ -632,6 +642,7 @@ func (cm *controllerManager) startLeaderElection(ctx context.Context) (err error
},
},
ReleaseOnCancel: cm.leaderElectionReleaseOnCancel,
Name: cm.leaderElectionID,
})
if err != nil {
return err

View File

@ -18,6 +18,7 @@ package manager
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
@ -98,9 +99,9 @@ type Manager interface {
// Options are the arguments for creating a new Manager.
type Options struct {
// Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources
// Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources.
// Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better
// idea to pass your own scheme in. See the documentation in pkg/scheme for more information.
// to pass your own scheme in. See the documentation in pkg/scheme for more information.
Scheme *runtime.Scheme
// MapperProvider provides the rest mapper used to map go types to Kubernetes APIs
@ -142,18 +143,36 @@ type Options struct {
LeaderElection bool
// LeaderElectionResourceLock determines which resource lock to use for leader election,
// defaults to "configmapsleases". Change this value only if you know what you are doing.
// Otherwise, users of your controller might end up with multiple running instances that
// defaults to "leases". Change this value only if you know what you are doing.
//
// If you are using `configmaps`/`endpoints` resource lock and want to migrate to "leases",
// you might do so by migrating to the respective multilock first ("configmapsleases" or "endpointsleases"),
// which will acquire a leader lock on both resources.
// After all your users have migrated to the multilock, you can go ahead and migrate to "leases".
// Please also keep in mind, that users might skip versions of your controller.
//
// Note: before controller-runtime version v0.7, it was set to "configmaps".
// And from v0.7 to v0.11, the default was "configmapsleases", which was
// used to migrate from configmaps to leases.
// Since the default was "configmapsleases" for over a year, spanning five minor releases,
// any actively maintained operators are very likely to have a released version that uses
// "configmapsleases". Therefore defaulting to "leases" should be safe since v0.12.
//
// So, what do you have to do when you are updating your controller-runtime dependency
// from a lower version to v0.12 or newer?
// - If your operator matches at least one of these conditions:
// - the LeaderElectionResourceLock in your operator has already been explicitly set to "leases"
// - the old controller-runtime version is between v0.7.0 and v0.11.x and the
// LeaderElectionResourceLock wasn't set or was set to "leases"/"configmapsleases"/"endpointsleases"
// feel free to update controller-runtime to v0.12 or newer.
// - Otherwise, you may have to take these steps:
// 1. update controller-runtime to v0.12 or newer in your go.mod
// 2. set LeaderElectionResourceLock to "configmapsleases" (or "endpointsleases")
// 3. package your operator and upgrade it in all your clusters
// 4. only if you have finished 3, you can remove the LeaderElectionResourceLock to use the default "leases"
// Otherwise, your operator might end up with multiple running instances that
// each acquired leadership through different resource locks during upgrades and thus
// act on the same resources concurrently.
// If you want to migrate to the "leases" resource lock, you might do so by migrating to the
// respective multilock first ("configmapsleases" or "endpointsleases"), which will acquire a
// leader lock on both resources. After all your users have migrated to the multilock, you can
// go ahead and migrate to "leases". Please also keep in mind, that users might skip versions
// of your controller.
//
// Note: before controller-runtime version v0.7, the resource lock was set to "configmaps".
// Please keep this in mind, when planning a proper migration path for your controller.
LeaderElectionResourceLock string
// LeaderElectionNamespace determines the namespace in which the leader
@ -175,6 +194,12 @@ type Options struct {
// LeaseDuration time first.
LeaderElectionReleaseOnCancel bool
// LeaderElectionResourceLockInterface allows to provide a custom resourcelock.Interface that was created outside
// of the controller-runtime. If this value is set the options LeaderElectionID, LeaderElectionNamespace,
// LeaderElectionResourceLock, LeaseDuration, RenewDeadline and RetryPeriod will be ignored. This can be useful if you
// want to use a locking mechanism that is currently not supported, like a MultiLock across two Kubernetes clusters.
LeaderElectionResourceLockInterface resourcelock.Interface
// LeaseDuration is the duration that non-leader candidates will
// wait to force acquire leadership. This is measured against time of
// last observed ack. Default is 15 seconds.
@ -186,11 +211,11 @@ type Options struct {
// between tries of actions. Default is 2 seconds.
RetryPeriod *time.Duration
// Namespace if specified restricts the manager's cache to watch objects in
// the desired namespace Defaults to all namespaces
// Namespace, if specified, restricts the manager's cache to watch objects in
// the desired namespace. Defaults to all namespaces.
//
// Note: If a namespace is specified, controllers can still Watch for a
// cluster-scoped resource (e.g Node). For namespaced resources the cache
// cluster-scoped resource (e.g Node). For namespaced resources, the cache
// will only hold objects from the desired namespace.
Namespace string
@ -201,6 +226,7 @@ type Options struct {
// HealthProbeBindAddress is the TCP address that the controller should bind to
// for serving health probes
// It can be set to "0" or "" to disable serving the health probe.
HealthProbeBindAddress string
// Readiness probe endpoint name, defaults to "readyz"
@ -223,12 +249,15 @@ type Options struct {
// It is used to set webhook.Server.CertDir if WebhookServer is not set.
CertDir string
// TLSOpts is used to allow configuring the TLS config used for the webhook server.
TLSOpts []func(*tls.Config)
// WebhookServer is an externally configured webhook.Server. By default,
// a Manager will create a default server using Port, Host, and CertDir;
// if this is set, the Manager will use this server instead.
WebhookServer *webhook.Server
// Functions to all for a user to customize the values that will be injected.
// Functions to allow for a user to customize values that will be injected.
// NewCache is the function that will create the cache to be used
// by the manager. If not set this will use the default new cache function.
@ -239,6 +268,11 @@ type Options struct {
// use the cache for reads and the client for writes.
NewClient cluster.NewClientFunc
// BaseContext is the function that provides Context values to Runnables
// managed by the Manager. If a BaseContext function isn't provided, Runnables
// will receive a new Background Context instead.
BaseContext BaseContextFunc
// ClientDisableCacheFor tells the client that, if any cache is used, to bypass it
// for the given objects.
ClientDisableCacheFor []client.Object
@ -278,6 +312,10 @@ type Options struct {
newHealthProbeListener func(addr string) (net.Listener, error)
}
// BaseContextFunc is a function used to provide a base Context to Runnables
// managed by a Manager.
type BaseContextFunc func() context.Context
// Runnable allows a component to be started.
// It's very important that Start blocks until
// it's done running.
@ -335,18 +373,33 @@ func New(config *rest.Config, options Options) (Manager, error) {
}
// Create the resource lock to enable leader election)
leaderConfig := options.LeaderElectionConfig
if leaderConfig == nil {
var leaderConfig *rest.Config
var leaderRecorderProvider *intrec.Provider
if options.LeaderElectionConfig == nil {
leaderConfig = rest.CopyConfig(config)
leaderRecorderProvider = recorderProvider
} else {
leaderConfig = rest.CopyConfig(options.LeaderElectionConfig)
leaderRecorderProvider, err = options.newRecorderProvider(leaderConfig, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster)
if err != nil {
return nil, err
}
}
resourceLock, err := options.newResourceLock(leaderConfig, recorderProvider, leaderelection.Options{
LeaderElection: options.LeaderElection,
LeaderElectionResourceLock: options.LeaderElectionResourceLock,
LeaderElectionID: options.LeaderElectionID,
LeaderElectionNamespace: options.LeaderElectionNamespace,
})
if err != nil {
return nil, err
var resourceLock resourcelock.Interface
if options.LeaderElectionResourceLockInterface != nil && options.LeaderElection {
resourceLock = options.LeaderElectionResourceLockInterface
} else {
resourceLock, err = options.newResourceLock(leaderConfig, leaderRecorderProvider, leaderelection.Options{
LeaderElection: options.LeaderElection,
LeaderElectionResourceLock: options.LeaderElectionResourceLock,
LeaderElectionID: options.LeaderElectionID,
LeaderElectionNamespace: options.LeaderElectionNamespace,
})
if err != nil {
return nil, err
}
}
// Create the metrics listener. This will throw an error if the metrics bind
@ -367,7 +420,7 @@ func New(config *rest.Config, options Options) (Manager, error) {
}
errChan := make(chan error)
runnables := newRunnables(errChan)
runnables := newRunnables(options.BaseContext, errChan)
return &controllerManager{
stopProcedureEngaged: pointer.Int64(0),
@ -384,7 +437,9 @@ func New(config *rest.Config, options Options) (Manager, error) {
port: options.Port,
host: options.Host,
certDir: options.CertDir,
tlsOpts: options.TLSOpts,
webhookServer: options.WebhookServer,
leaderElectionID: options.LeaderElectionID,
leaseDuration: *options.LeaseDuration,
renewDeadline: *options.RenewDeadline,
retryPeriod: *options.RetryPeriod,
@ -475,6 +530,11 @@ func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Opti
}
func (o Options) setLeaderElectionConfig(obj v1alpha1.ControllerManagerConfigurationSpec) Options {
if obj.LeaderElection == nil {
// The source does not have any configuration; noop
return o
}
if !o.LeaderElection && obj.LeaderElection.LeaderElect != nil {
o.LeaderElection = *obj.LeaderElection.LeaderElect
}
@ -514,11 +574,17 @@ func defaultHealthProbeListener(addr string) (net.Listener, error) {
ln, err := net.Listen("tcp", addr)
if err != nil {
return nil, fmt.Errorf("error listening on %s: %v", addr, err)
return nil, fmt.Errorf("error listening on %s: %w", addr, err)
}
return ln, nil
}
// defaultBaseContext is used as the BaseContext value in Options if one
// has not already been set.
func defaultBaseContext() context.Context {
return context.Background()
}
// setOptionsDefaults set default values for Options fields.
func setOptionsDefaults(options Options) Options {
// Allow newResourceLock to be mocked
@ -582,5 +648,9 @@ func setOptionsDefaults(options Options) Options {
options.Logger = log.Log
}
if options.BaseContext == nil {
options.BaseContext = defaultBaseContext
}
return options
}

View File

@ -35,12 +35,12 @@ type runnables struct {
}
// newRunnables creates a new runnables object.
func newRunnables(errChan chan error) *runnables {
func newRunnables(baseContext BaseContextFunc, errChan chan error) *runnables {
return &runnables{
Webhooks: newRunnableGroup(errChan),
Caches: newRunnableGroup(errChan),
LeaderElection: newRunnableGroup(errChan),
Others: newRunnableGroup(errChan),
Webhooks: newRunnableGroup(baseContext, errChan),
Caches: newRunnableGroup(baseContext, errChan),
LeaderElection: newRunnableGroup(baseContext, errChan),
Others: newRunnableGroup(baseContext, errChan),
}
}
@ -100,14 +100,15 @@ type runnableGroup struct {
wg *sync.WaitGroup
}
func newRunnableGroup(errChan chan error) *runnableGroup {
func newRunnableGroup(baseContext BaseContextFunc, errChan chan error) *runnableGroup {
r := &runnableGroup{
startReadyCh: make(chan *readyRunnable),
errChan: errChan,
ch: make(chan *readyRunnable),
wg: new(sync.WaitGroup),
}
r.ctx, r.cancel = context.WithCancel(context.Background())
r.ctx, r.cancel = context.WithCancel(baseContext())
return r
}

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
/*

View File

@ -22,7 +22,6 @@ import (
"time"
"github.com/prometheus/client_golang/prometheus"
reflectormetrics "k8s.io/client-go/tools/cache"
clientmetrics "k8s.io/client-go/tools/metrics"
)
@ -37,19 +36,6 @@ const (
ResultKey = "requests_total"
)
// Metrics subsystem and all keys used by the reflectors.
const (
ReflectorSubsystem = "reflector"
ListsTotalKey = "lists_total"
ListsDurationKey = "list_duration_seconds"
ItemsPerListKey = "items_per_list"
WatchesTotalKey = "watches_total"
ShortWatchesTotalKey = "short_watches_total"
WatchDurationKey = "watch_duration_seconds"
ItemsPerWatchKey = "items_per_watch"
LastResourceVersionKey = "last_resource_version"
)
var (
// client metrics.
@ -81,64 +67,10 @@ var (
Name: ResultKey,
Help: "Number of HTTP requests, partitioned by status code, method, and host.",
}, []string{"code", "method", "host"})
// reflector metrics.
// TODO(directxman12): update these to be histograms once the metrics overhaul KEP
// PRs start landing.
listsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
Subsystem: ReflectorSubsystem,
Name: ListsTotalKey,
Help: "Total number of API lists done by the reflectors",
}, []string{"name"})
listsDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{
Subsystem: ReflectorSubsystem,
Name: ListsDurationKey,
Help: "How long an API list takes to return and decode for the reflectors",
}, []string{"name"})
itemsPerList = prometheus.NewSummaryVec(prometheus.SummaryOpts{
Subsystem: ReflectorSubsystem,
Name: ItemsPerListKey,
Help: "How many items an API list returns to the reflectors",
}, []string{"name"})
watchesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
Subsystem: ReflectorSubsystem,
Name: WatchesTotalKey,
Help: "Total number of API watches done by the reflectors",
}, []string{"name"})
shortWatchesTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
Subsystem: ReflectorSubsystem,
Name: ShortWatchesTotalKey,
Help: "Total number of short API watches done by the reflectors",
}, []string{"name"})
watchDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{
Subsystem: ReflectorSubsystem,
Name: WatchDurationKey,
Help: "How long an API watch takes to return and decode for the reflectors",
}, []string{"name"})
itemsPerWatch = prometheus.NewSummaryVec(prometheus.SummaryOpts{
Subsystem: ReflectorSubsystem,
Name: ItemsPerWatchKey,
Help: "How many items an API watch returns to the reflectors",
}, []string{"name"})
lastResourceVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Subsystem: ReflectorSubsystem,
Name: LastResourceVersionKey,
Help: "Last resource version seen for the reflectors",
}, []string{"name"})
)
func init() {
registerClientMetrics()
registerReflectorMetrics()
}
// registerClientMetrics sets up the client latency metrics from client-go.
@ -152,20 +84,6 @@ func registerClientMetrics() {
})
}
// registerReflectorMetrics sets up reflector (reconcile) loop metrics.
func registerReflectorMetrics() {
Registry.MustRegister(listsTotal)
Registry.MustRegister(listsDuration)
Registry.MustRegister(itemsPerList)
Registry.MustRegister(watchesTotal)
Registry.MustRegister(shortWatchesTotal)
Registry.MustRegister(watchDuration)
Registry.MustRegister(itemsPerWatch)
Registry.MustRegister(lastResourceVersion)
reflectormetrics.SetReflectorMetricsProvider(reflectorMetricsProvider{})
}
// this section contains adapters, implementations, and other sundry organic, artisanally
// hand-crafted syntax trees required to convince client-go that it actually wants to let
// someone use its metrics.
@ -191,41 +109,3 @@ type resultAdapter struct {
func (r *resultAdapter) Increment(_ context.Context, code, method, host string) {
r.metric.WithLabelValues(code, method, host).Inc()
}
// Reflector metrics provider (method #2 for client-go metrics),
// copied (more-or-less directly) from k8s.io/kubernetes setup code
// (which isn't anywhere in an easily-importable place).
type reflectorMetricsProvider struct{}
func (reflectorMetricsProvider) NewListsMetric(name string) reflectormetrics.CounterMetric {
return listsTotal.WithLabelValues(name)
}
func (reflectorMetricsProvider) NewListDurationMetric(name string) reflectormetrics.SummaryMetric {
return listsDuration.WithLabelValues(name)
}
func (reflectorMetricsProvider) NewItemsInListMetric(name string) reflectormetrics.SummaryMetric {
return itemsPerList.WithLabelValues(name)
}
func (reflectorMetricsProvider) NewWatchesMetric(name string) reflectormetrics.CounterMetric {
return watchesTotal.WithLabelValues(name)
}
func (reflectorMetricsProvider) NewShortWatchesMetric(name string) reflectormetrics.CounterMetric {
return shortWatchesTotal.WithLabelValues(name)
}
func (reflectorMetricsProvider) NewWatchDurationMetric(name string) reflectormetrics.SummaryMetric {
return watchDuration.WithLabelValues(name)
}
func (reflectorMetricsProvider) NewItemsInWatchMetric(name string) reflectormetrics.SummaryMetric {
return itemsPerWatch.WithLabelValues(name)
}
func (reflectorMetricsProvider) NewLastResourceVersionMetric(name string) reflectormetrics.GaugeMetric {
return lastResourceVersion.WithLabelValues(name)
}

View File

@ -0,0 +1,40 @@
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"k8s.io/client-go/tools/leaderelection"
)
// This file is copied and adapted from k8s.io/component-base/metrics/prometheus/clientgo/leaderelection
// which registers metrics to the k8s legacy Registry. We require very
// similar functionality, but must register metrics to a different Registry.
var (
leaderGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "leader_election_master_status",
Help: "Gauge of if the reporting system is master of the relevant lease, 0 indicates backup, 1 indicates master. 'name' is the string used to identify the lease. Please make sure to group by name.",
}, []string{"name"})
)
func init() {
Registry.MustRegister(leaderGauge)
leaderelection.SetProvider(leaderelectionMetricsProvider{})
}
type leaderelectionMetricsProvider struct{}
func (leaderelectionMetricsProvider) NewLeaderMetric() leaderelection.SwitchMetric {
return &switchAdapter{gauge: leaderGauge}
}
type switchAdapter struct {
gauge *prometheus.GaugeVec
}
func (s *switchAdapter) On(name string) {
s.gauge.WithLabelValues(name).Set(1.0)
}
func (s *switchAdapter) Off(name string) {
s.gauge.WithLabelValues(name).Set(0.0)
}

View File

@ -21,8 +21,8 @@ import (
"k8s.io/client-go/util/workqueue"
)
// This file is copied and adapted from k8s.io/kubernetes/pkg/util/workqueue/prometheus
// which registers metrics to the default prometheus Registry. We require very
// This file is copied and adapted from k8s.io/component-base/metrics/prometheus/workqueue
// which registers metrics to the k8s legacy Registry. We require very
// similar functionality, but must register metrics to a different Registry.
// Metrics subsystem and all keys used by the workqueue.

View File

@ -24,6 +24,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
)
var log = logf.RuntimeLog.WithName("predicate").WithName("eventFilters")
@ -49,6 +50,7 @@ var _ Predicate = GenerationChangedPredicate{}
var _ Predicate = AnnotationChangedPredicate{}
var _ Predicate = or{}
var _ Predicate = and{}
var _ Predicate = not{}
// Funcs is a function that implements Predicate.
type Funcs struct {
@ -175,9 +177,9 @@ func (GenerationChangedPredicate) Update(e event.UpdateEvent) bool {
// This predicate will skip update events that have no change in the object's annotation.
// It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example:
//
// Controller.Watch(
// Controller.Watch(
// &source.Kind{Type: v1.MyCustomKind},
// &handler.EnqueueRequestForObject{},
// &handler.EnqueueRequestForObject{},
// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.AnnotationChangedPredicate{}))
//
// This is mostly useful for controllers that needs to trigger both when the resource's generation is incremented
@ -206,9 +208,10 @@ func (AnnotationChangedPredicate) Update(e event.UpdateEvent) bool {
// It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example:
//
// Controller.Watch(
// &source.Kind{Type: v1.MyCustomKind},
// &handler.EnqueueRequestForObject{},
// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}))
//
// &source.Kind{Type: v1.MyCustomKind},
// &handler.EnqueueRequestForObject{},
// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}))
//
// This will be helpful when object's labels is carrying some extra specification information beyond object's spec,
// and the controller will be triggered if any valid spec change (not only in spec, but also in labels) happens.
@ -239,6 +242,15 @@ type and struct {
predicates []Predicate
}
func (a and) InjectFunc(f inject.Func) error {
for _, p := range a.predicates {
if err := f(p); err != nil {
return err
}
}
return nil
}
func (a and) Create(e event.CreateEvent) bool {
for _, p := range a.predicates {
if !p.Create(e) {
@ -284,6 +296,15 @@ type or struct {
predicates []Predicate
}
func (o or) InjectFunc(f inject.Func) error {
for _, p := range o.predicates {
if err := f(p); err != nil {
return err
}
}
return nil
}
func (o or) Create(e event.CreateEvent) bool {
for _, p := range o.predicates {
if p.Create(e) {
@ -320,6 +341,35 @@ func (o or) Generic(e event.GenericEvent) bool {
return false
}
// Not returns a predicate that implements a logical NOT of the predicate passed to it.
func Not(predicate Predicate) Predicate {
return not{predicate}
}
type not struct {
predicate Predicate
}
func (n not) InjectFunc(f inject.Func) error {
return f(n.predicate)
}
func (n not) Create(e event.CreateEvent) bool {
return !n.predicate.Create(e)
}
func (n not) Update(e event.UpdateEvent) bool {
return !n.predicate.Update(e)
}
func (n not) Delete(e event.DeleteEvent) bool {
return !n.predicate.Delete(e)
}
func (n not) Generic(e event.GenericEvent) bool {
return !n.predicate.Generic(e)
}
// LabelSelectorPredicate constructs a Predicate from a LabelSelector.
// Only objects matching the LabelSelector will be admitted.
func LabelSelectorPredicate(s metav1.LabelSelector) (Predicate, error) {

View File

@ -61,24 +61,24 @@ Deleting Kubernetes objects) or external Events (GitHub Webhooks, polling extern
Example reconcile Logic:
* Read an object and all the Pods it owns.
* Observe that the object spec specifies 5 replicas but actual cluster contains only 1 Pod replica.
* Create 4 Pods and set their OwnerReferences to the object.
* Read an object and all the Pods it owns.
* Observe that the object spec specifies 5 replicas but actual cluster contains only 1 Pod replica.
* Create 4 Pods and set their OwnerReferences to the object.
reconcile may be implemented as either a type:
type reconcile struct {}
type reconciler struct {}
func (reconcile) reconcile(controller.Request) (controller.Result, error) {
func (reconciler) Reconcile(ctx context.Context, o reconcile.Request) (reconcile.Result, error) {
// Implement business logic of reading and writing objects here
return controller.Result{}, nil
return reconcile.Result{}, nil
}
Or as a function:
controller.Func(func(o controller.Request) (controller.Result, error) {
reconcile.Func(func(ctx context.Context, o reconcile.Request) (reconcile.Result, error) {
// Implement business logic of reading and writing objects here
return controller.Result{}, nil
return reconcile.Result{}, nil
})
Reconciliation is level-based, meaning action isn't driven off changes in individual Events, but instead is

View File

@ -21,37 +21,36 @@ limitations under the License.
// Each API group should define a utility function
// called AddToScheme for adding its types to a Scheme:
//
// // in package myapigroupv1...
// var (
// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"}
// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
// AddToScheme = SchemeBuilder.AddToScheme
// )
// // in package myapigroupv1...
// var (
// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"}
// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
// AddToScheme = SchemeBuilder.AddToScheme
// )
//
// func init() {
// SchemeBuilder.Register(&MyType{}, &MyTypeList)
// }
// var (
// scheme *runtime.Scheme = runtime.NewScheme()
// )
// func init() {
// SchemeBuilder.Register(&MyType{}, &MyTypeList)
// }
// var (
// scheme *runtime.Scheme = runtime.NewScheme()
// )
//
// This also true of the built-in Kubernetes types. Then, in the entrypoint for
// your manager, assemble the scheme containing exactly the types you need,
// panicing if scheme registration failed. For instance, if our controller needs
// types from the core/v1 API group (e.g. Pod), plus types from my.api.group/v1:
//
// func init() {
// utilruntime.Must(myapigroupv1.AddToScheme(scheme))
// utilruntime.Must(kubernetesscheme.AddToScheme(scheme))
// }
//
// func main() {
// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{
// Scheme: scheme,
// })
// // ...
// }
// func init() {
// utilruntime.Must(myapigroupv1.AddToScheme(scheme))
// utilruntime.Must(kubernetesscheme.AddToScheme(scheme))
// }
//
// func main() {
// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{
// Scheme: scheme,
// })
// // ...
// }
package scheme
import (

View File

@ -24,6 +24,7 @@ import (
"time"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/client"
@ -82,6 +83,10 @@ func (ks *kindWithCache) Start(ctx context.Context, handler handler.EventHandler
return ks.kind.Start(ctx, handler, queue, prct...)
}
func (ks *kindWithCache) String() string {
return ks.kind.String()
}
func (ks *kindWithCache) WaitForSync(ctx context.Context) error {
return ks.kind.WaitForSync(ctx)
}
@ -133,9 +138,14 @@ func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue w
i, lastErr = ks.cache.GetInformer(ctx, ks.Type)
if lastErr != nil {
kindMatchErr := &meta.NoKindMatchError{}
if errors.As(lastErr, &kindMatchErr) {
switch {
case errors.As(lastErr, &kindMatchErr):
log.Error(lastErr, "if kind is a CRD, it should be installed before calling Start",
"kind", kindMatchErr.GroupKind)
case runtime.IsNotRegisteredError(lastErr):
log.Error(lastErr, "kind must be registered to the Scheme")
default:
log.Error(lastErr, "failed to get informer from cache")
}
return false, nil // Retry.
}
@ -149,7 +159,11 @@ func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue w
return
}
i.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct})
_, err := i.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct})
if err != nil {
ks.started <- err
return
}
if !ks.cache.WaitForCacheSync(ctx) {
// Would be great to return something more informative here
ks.started <- errors.New("cache did not sync")
@ -175,6 +189,9 @@ func (ks *Kind) WaitForSync(ctx context.Context) error {
return err
case <-ctx.Done():
ks.startCancel()
if errors.Is(ctx.Err(), context.Canceled) {
return nil
}
return errors.New("timed out waiting for cache to be synced")
}
}
@ -342,7 +359,10 @@ func (is *Informer) Start(ctx context.Context, handler handler.EventHandler, que
return fmt.Errorf("must specify Informer.Informer")
}
is.Informer.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct})
_, err := is.Informer.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct})
if err != nil {
return err
}
return nil
}

View File

@ -21,6 +21,8 @@ import (
"encoding/json"
"net/http"
admissionv1 "k8s.io/api/admission/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
@ -56,6 +58,17 @@ func (h *mutatingHandler) Handle(ctx context.Context, req Request) Response {
panic("defaulter should never be nil")
}
// always skip when a DELETE operation received in mutation handler
// describe in https://github.com/kubernetes-sigs/controller-runtime/issues/1762
if req.Operation == admissionv1.Delete {
return Response{AdmissionResponse: admissionv1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
Code: http.StatusOK,
},
}}
}
// Get the object in the request
obj := h.defaulter.DeepCopyObject().(Defaulter)
if err := h.decoder.Decode(req, obj); err != nil {

View File

@ -19,11 +19,12 @@ package admission
import (
"context"
"encoding/json"
"errors"
"net/http"
admissionv1 "k8s.io/api/admission/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
@ -61,6 +62,18 @@ func (h *defaulterForType) Handle(ctx context.Context, req Request) Response {
panic("object should never be nil")
}
// Always skip when a DELETE operation received in custom mutation handler.
if req.Operation == admissionv1.Delete {
return Response{AdmissionResponse: admissionv1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
Code: http.StatusOK,
},
}}
}
ctx = NewContextWithRequest(ctx, req)
// Get the object in the request
obj := h.object.DeepCopyObject()
if err := h.decoder.Decode(req, obj); err != nil {

View File

@ -21,7 +21,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
v1 "k8s.io/api/admission/v1"
@ -60,7 +59,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
defer r.Body.Close()
if body, err = ioutil.ReadAll(r.Body); err != nil {
if body, err = io.ReadAll(r.Body); err != nil {
wh.log.Error(err, "unable to read the body from the incoming request")
reviewResponse = Errored(http.StatusBadRequest, err)
wh.writeResponse(w, reviewResponse)
@ -125,8 +124,16 @@ func (wh *Webhook) writeResponseTyped(w io.Writer, response Response, admRevGVK
// writeAdmissionResponse writes ar to w.
func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) {
if err := json.NewEncoder(w).Encode(ar); err != nil {
wh.log.Error(err, "unable to encode the response")
wh.writeResponse(w, Errored(http.StatusInternalServerError, err))
wh.log.Error(err, "unable to encode and write the response")
// Since the `ar v1.AdmissionReview` is a clear and legal object,
// it should not have problem to be marshalled into bytes.
// The error here is probably caused by the abnormal HTTP connection,
// e.g., broken pipe, so we can only write the error response once,
// to avoid endless circular calling.
serverError := Errored(http.StatusInternalServerError, err)
if err = json.NewEncoder(w).Encode(v1.AdmissionReview{Response: &serverError.AdmissionResponse}); err != nil {
wh.log.Error(err, "still unable to encode and write the InternalServerError response")
}
} else {
res := ar.Response
if log := wh.log; log.V(1).Enabled() {

View File

@ -64,6 +64,8 @@ func (h *validatorForType) Handle(ctx context.Context, req Request) Response {
panic("object should never be nil")
}
ctx = NewContextWithRequest(ctx, req)
// Get the object in the request
obj := h.object.DeepCopyObject()

View File

@ -19,6 +19,7 @@ package admission
import (
"context"
"errors"
"fmt"
"net/http"
"github.com/go-logr/logr"
@ -27,6 +28,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/json"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
@ -121,6 +123,9 @@ type Webhook struct {
// and potentially patches to apply to the handler.
Handler Handler
// RecoverPanic indicates whether the panic caused by webhook should be recovered.
RecoverPanic bool
// WithContextFunc will allow you to take the http.Request.Context() and
// add any additional information such as passing the request path or
// headers thus allowing you to read them from within the handler
@ -138,11 +143,29 @@ func (wh *Webhook) InjectLogger(l logr.Logger) error {
return nil
}
// WithRecoverPanic takes a bool flag which indicates whether the panic caused by webhook should be recovered.
func (wh *Webhook) WithRecoverPanic(recoverPanic bool) *Webhook {
wh.RecoverPanic = recoverPanic
return wh
}
// Handle processes AdmissionRequest.
// If the webhook is mutating type, it delegates the AdmissionRequest to each handler and merge the patches.
// If the webhook is validating type, it delegates the AdmissionRequest to each handler and
// deny the request if anyone denies.
func (wh *Webhook) Handle(ctx context.Context, req Request) Response {
func (wh *Webhook) Handle(ctx context.Context, req Request) (response Response) {
if wh.RecoverPanic {
defer func() {
if r := recover(); r != nil {
for _, fn := range utilruntime.PanicHandlers {
fn(r)
}
response = Errored(http.StatusInternalServerError, fmt.Errorf("panic: %v [recovered]", r))
return
}
}()
}
resp := wh.Handler.Handle(ctx, req)
if err := resp.Complete(req); err != nil {
wh.log.Error(err, "unable to encode response")
@ -253,3 +276,21 @@ func StandaloneWebhook(hook *Webhook, opts StandaloneOptions) (http.Handler, err
}
return metrics.InstrumentedHook(opts.MetricsPath, hook), nil
}
// requestContextKey is how we find the admission.Request in a context.Context.
type requestContextKey struct{}
// RequestFromContext returns an admission.Request from ctx.
func RequestFromContext(ctx context.Context) (Request, error) {
if v, ok := ctx.Value(requestContextKey{}).(Request); ok {
return v, nil
}
return Request{}, errors.New("admission.Request not found in context")
}
// NewContextWithRequest returns a new Context, derived from ctx, which carries the
// provided admission.Request.
func NewContextWithRequest(ctx context.Context, req Request) context.Context {
return context.WithValue(ctx, requestContextKey{}, req)
}

View File

@ -21,7 +21,6 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
@ -75,8 +74,12 @@ type Server struct {
// TLSVersion is the minimum version of TLS supported. Accepts
// "", "1.0", "1.1", "1.2" and "1.3" only ("" is equivalent to "1.0" for backwards compatibility)
// Deprecated: Use TLSOpts instead.
TLSMinVersion string
// TLSOpts is used to allow configuring the TLS config used for the server
TLSOpts []func(*tls.Config)
// WebhookMux is the multiplexer that handles different webhooks.
WebhookMux *http.ServeMux
@ -241,9 +244,9 @@ func (s *Server) Start(ctx context.Context) error {
// load CA to verify client certificate
if s.ClientCAName != "" {
certPool := x509.NewCertPool()
clientCABytes, err := ioutil.ReadFile(filepath.Join(s.CertDir, s.ClientCAName))
clientCABytes, err := os.ReadFile(filepath.Join(s.CertDir, s.ClientCAName))
if err != nil {
return fmt.Errorf("failed to read client CA cert: %v", err)
return fmt.Errorf("failed to read client CA cert: %w", err)
}
ok := certPool.AppendCertsFromPEM(clientCABytes)
@ -255,6 +258,11 @@ func (s *Server) Start(ctx context.Context) error {
cfg.ClientAuth = tls.RequireAndVerifyClientCert
}
// fallback TLS config ready, will now mutate if passer wants full control over it
for _, op := range s.TLSOpts {
op(cfg)
}
listener, err := tls.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), cfg)
if err != nil {
return err
@ -292,7 +300,7 @@ func (s *Server) Start(ctx context.Context) error {
// server has been started.
func (s *Server) StartedChecker() healthz.Checker {
config := &tls.Config{
InsecureSkipVerify: true, // nolint:gosec // config is used to connect to our own webhook port.
InsecureSkipVerify: true, //nolint:gosec // config is used to connect to our own webhook port.
}
return func(req *http.Request) error {
s.mu.Lock()
@ -305,11 +313,11 @@ func (s *Server) StartedChecker() healthz.Checker {
d := &net.Dialer{Timeout: 10 * time.Second}
conn, err := tls.DialWithDialer(d, "tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), config)
if err != nil {
return fmt.Errorf("webhook server is not reachable: %v", err)
return fmt.Errorf("webhook server is not reachable: %w", err)
}
if err := conn.Close(); err != nil {
return fmt.Errorf("webhook server is not reachable: closing connection: %v", err)
return fmt.Errorf("webhook server is not reachable: closing connection: %w", err)
}
return nil