From b1a45909673faea2412ad0618b084555daca1dfe Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Thu, 1 Jun 2023 19:01:19 +0200 Subject: [PATCH] rebase: update sigs.k8s.io/controller-runtime to current version There is no release for sigs.k8s.io/controller-runtime that supports Kubernetes v1.27. The main branch has all the required modifications, so we can use that for the time being. Signed-off-by: Niels de Vos --- go.mod | 6 +- go.sum | 9 +- .../persistentvolume/persistentvolume.go | 2 +- .../prometheus/client_model/go/metrics.pb.go | 1586 +++++++++++------ vendor/modules.txt | 14 +- .../controller-runtime/pkg/cache/cache.go | 488 ++--- .../pkg/cache/informer_cache.go | 84 +- .../pkg/cache/internal/cache_reader.go | 4 +- .../pkg/cache/internal/deleg_map.go | 126 -- .../pkg/cache/internal/disabledeepcopy.go | 35 - .../pkg/cache/internal/informers.go | 560 ++++++ .../pkg/cache/internal/informers_map.go | 480 ----- .../pkg/cache/internal/selector.go | 15 - .../pkg/cache/internal/transformers.go | 8 +- .../pkg/cache/multi_namespace_cache.go | 100 +- .../pkg/certwatcher/certwatcher.go | 52 +- .../pkg/client/apiutil/apimachinery.go | 66 +- .../pkg/client/apiutil/dynamicrestmapper.go | 301 ---- .../{lazyrestmapper.go => restmapper.go} | 173 +- .../controller-runtime/pkg/client/client.go | 181 +- ...ient_cache.go => client_rest_resources.go} | 27 +- .../pkg/client/config/config.go | 6 +- .../controller-runtime/pkg/client/doc.go | 3 +- .../controller-runtime/pkg/client/dryrun.go | 11 + .../pkg/client/interfaces.go | 5 + .../pkg/client/namespaced_client.go | 33 +- .../controller-runtime/pkg/client/options.go | 10 + .../controller-runtime/pkg/client/split.go | 143 -- .../pkg/client/typed_client.go | 26 +- .../pkg/client/unstructured_client.go | 79 +- .../controller-runtime/pkg/client/watch.go | 30 +- .../controller-runtime/pkg/cluster/cluster.go | 187 +- .../pkg/cluster/internal.go | 41 +- .../controller-runtime/pkg/config/config.go | 12 +- .../pkg/config/controller.go | 49 + .../controller-runtime/pkg/config/doc.go | 10 +- .../pkg/config/v1alpha1/doc.go | 2 + .../pkg/config/v1alpha1/register.go | 6 + .../pkg/config/v1alpha1/types.go | 19 +- .../pkg/controller/controller.go | 43 +- .../controller-runtime/pkg/handler/enqueue.go | 10 +- .../pkg/handler/enqueue_mapped.go | 37 +- .../pkg/handler/enqueue_owner.go | 86 +- .../pkg/handler/eventhandler.go | 34 +- .../pkg/internal/controller/controller.go | 43 +- .../internal/controller/metrics/metrics.go | 8 + .../pkg/internal/objectutil/objectutil.go | 78 - .../pkg/internal/recorder/recorder.go | 9 +- .../source/event_handler.go} | 58 +- .../pkg/internal/source/kind.go | 117 ++ .../controller-runtime/pkg/log/deleg.go | 40 +- .../controller-runtime/pkg/log/log.go | 46 +- .../pkg/manager/internal.go | 120 +- .../controller-runtime/pkg/manager/manager.go | 176 +- .../pkg/manager/runnable_group.go | 2 +- .../controller-runtime/pkg/manager/server.go | 61 + .../pkg/metrics/client_go_adapter.go | 89 +- .../pkg/predicate/predicate.go | 23 - .../pkg/reconcile/reconcile.go | 24 + .../pkg/runtime/inject/doc.go | 22 - .../pkg/runtime/inject/inject.go | 164 -- .../controller-runtime/pkg/source/source.go | 183 +- .../pkg/webhook/admission/decode.go | 16 +- .../pkg/webhook/admission/defaulter.go | 15 +- .../pkg/webhook/admission/defaulter_custom.go | 14 +- .../pkg/webhook/admission/doc.go | 6 - .../pkg/webhook/admission/http.go | 20 +- .../pkg/webhook/admission/inject.go | 31 - .../pkg/webhook/admission/multi.go | 52 - .../pkg/webhook/admission/response.go | 23 +- .../pkg/webhook/admission/validator.go | 105 +- .../pkg/webhook/admission/validator_custom.go | 53 +- .../pkg/webhook/admission/webhook.go | 119 +- .../controller-runtime/pkg/webhook/server.go | 237 ++- 74 files changed, 3412 insertions(+), 3741 deletions(-) delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go rename vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/{lazyrestmapper.go => restmapper.go} (57%) rename vendor/sigs.k8s.io/controller-runtime/pkg/client/{client_cache.go => client_rest_resources.go} (82%) delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go rename vendor/sigs.k8s.io/controller-runtime/pkg/{source/internal/eventsource.go => internal/source/event_handler.go} (67%) create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go diff --git a/go.mod b/go.mod index b424ccb7d..aefdbfa8e 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( k8s.io/mount-utils v0.27.2 k8s.io/pod-security-admission v0.0.0 k8s.io/utils v0.0.0-20230209194617-a36077c30491 - sigs.k8s.io/controller-runtime v0.14.6 + sigs.k8s.io/controller-runtime v0.15.1-0.20230524200249-30eae58f1b98 ) require ( @@ -128,7 +128,7 @@ require ( github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect @@ -156,7 +156,7 @@ require ( golang.org/x/term v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect google.golang.org/api v0.110.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect diff --git a/go.sum b/go.sum index 0e4e29b50..fb86dd618 100644 --- a/go.sum +++ b/go.sum @@ -366,8 +366,8 @@ github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= @@ -987,8 +987,9 @@ github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1: github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -1868,8 +1869,8 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= sigs.k8s.io/controller-runtime v0.2.2/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I= -sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92KcwQA= -sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= +sigs.k8s.io/controller-runtime v0.15.1-0.20230524200249-30eae58f1b98 h1:g/rUFhTxYK9gaf9sIzuZhp8Hc0dc1mTIOAEtrAlBYgc= +sigs.k8s.io/controller-runtime v0.15.1-0.20230524200249-30eae58f1b98/go.mod h1:fnVc7My+0bTh/Y9bft09N4Fqom6WiMQMdCaayy4l/oM= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= diff --git a/internal/controller/persistentvolume/persistentvolume.go b/internal/controller/persistentvolume/persistentvolume.go index 4a3295d8b..d83fb0a4f 100644 --- a/internal/controller/persistentvolume/persistentvolume.go +++ b/internal/controller/persistentvolume/persistentvolume.go @@ -82,7 +82,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { } // Watch for changes to PersistentVolumes - err = c.Watch(&source.Kind{Type: &corev1.PersistentVolume{}}, &handler.EnqueueRequestForObject{}) + err = c.Watch(source.Kind(mgr.GetCache(), &corev1.PersistentVolume{}), &handler.EnqueueRequestForObject{}) if err != nil { return fmt.Errorf("failed to watch the changes: %w", err) } diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 35904ea19..2b5bca4b9 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,25 +1,38 @@ +// Copyright 2013 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.20.3 // source: io/prometheus/client/metrics.proto package io_prometheus_client import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type MetricType int32 @@ -38,23 +51,25 @@ const ( MetricType_GAUGE_HISTOGRAM MetricType = 5 ) -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", - 5: "GAUGE_HISTOGRAM", -} - -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, - "GAUGE_HISTOGRAM": 5, -} +// Enum value maps for MetricType. +var ( + MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", + 5: "GAUGE_HISTOGRAM", + } + MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, + "GAUGE_HISTOGRAM": 5, + } +) func (x MetricType) Enum() *MetricType { p := new(MetricType) @@ -63,449 +78,519 @@ func (x MetricType) Enum() *MetricType { } func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") +func (MetricType) Descriptor() protoreflect.EnumDescriptor { + return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor() +} + +func (MetricType) Type() protoreflect.EnumType { + return &file_io_prometheus_client_metrics_proto_enumTypes[0] +} + +func (x MetricType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *MetricType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) if err != nil { return err } - *x = MetricType(value) + *x = MetricType(num) return nil } +// Deprecated: Use MetricType.Descriptor instead. func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{0} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} } type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` } -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} +func (x *LabelPair) Reset() { + *x = LabelPair{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LabelPair) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelPair) ProtoMessage() {} + +func (x *LabelPair) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead. func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{0} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} } -func (m *LabelPair) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelPair.Unmarshal(m, b) -} -func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) -} -func (m *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(m, src) -} -func (m *LabelPair) XXX_Size() int { - return xxx_messageInfo_LabelPair.Size(m) -} -func (m *LabelPair) XXX_DiscardUnknown() { - xxx_messageInfo_LabelPair.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelPair proto.InternalMessageInfo - -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (x *LabelPair) GetName() string { + if x != nil && x.Name != nil { + return *x.Name } return "" } -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value +func (x *LabelPair) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value } return "" } type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` } -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} +func (x *Gauge) Reset() { + *x = Gauge{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Gauge) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Gauge) ProtoMessage() {} + +func (x *Gauge) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Gauge.ProtoReflect.Descriptor instead. func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{1} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1} } -func (m *Gauge) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Gauge.Unmarshal(m, b) -} -func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) -} -func (m *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(m, src) -} -func (m *Gauge) XXX_Size() int { - return xxx_messageInfo_Gauge.Size(m) -} -func (m *Gauge) XXX_DiscardUnknown() { - xxx_messageInfo_Gauge.DiscardUnknown(m) -} - -var xxx_messageInfo_Gauge proto.InternalMessageInfo - -func (m *Gauge) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Gauge) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` } -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} +func (x *Counter) Reset() { + *x = Counter{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Counter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Counter) ProtoMessage() {} + +func (x *Counter) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Counter.ProtoReflect.Descriptor instead. func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{2} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2} } -func (m *Counter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Counter.Unmarshal(m, b) -} -func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Counter.Marshal(b, m, deterministic) -} -func (m *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(m, src) -} -func (m *Counter) XXX_Size() int { - return xxx_messageInfo_Counter.Size(m) -} -func (m *Counter) XXX_DiscardUnknown() { - xxx_messageInfo_Counter.DiscardUnknown(m) -} - -var xxx_messageInfo_Counter proto.InternalMessageInfo - -func (m *Counter) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Counter) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } -func (m *Counter) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar +func (x *Counter) GetExemplar() *Exemplar { + if x != nil { + return x.Exemplar } return nil } type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` } -func (m *Quantile) Reset() { *m = Quantile{} } -func (m *Quantile) String() string { return proto.CompactTextString(m) } -func (*Quantile) ProtoMessage() {} +func (x *Quantile) Reset() { + *x = Quantile{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Quantile) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Quantile) ProtoMessage() {} + +func (x *Quantile) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Quantile.ProtoReflect.Descriptor instead. func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{3} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3} } -func (m *Quantile) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Quantile.Unmarshal(m, b) -} -func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) -} -func (m *Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantile.Merge(m, src) -} -func (m *Quantile) XXX_Size() int { - return xxx_messageInfo_Quantile.Size(m) -} -func (m *Quantile) XXX_DiscardUnknown() { - xxx_messageInfo_Quantile.DiscardUnknown(m) -} - -var xxx_messageInfo_Quantile proto.InternalMessageInfo - -func (m *Quantile) GetQuantile() float64 { - if m != nil && m.Quantile != nil { - return *m.Quantile +func (x *Quantile) GetQuantile() float64 { + if x != nil && x.Quantile != nil { + return *x.Quantile } return 0 } -func (m *Quantile) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Quantile) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` } -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} +func (x *Summary) Reset() { + *x = Summary{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Summary) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Summary) ProtoMessage() {} + +func (x *Summary) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Summary.ProtoReflect.Descriptor instead. func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{4} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4} } -func (m *Summary) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Summary.Unmarshal(m, b) -} -func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Summary.Marshal(b, m, deterministic) -} -func (m *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(m, src) -} -func (m *Summary) XXX_Size() int { - return xxx_messageInfo_Summary.Size(m) -} -func (m *Summary) XXX_DiscardUnknown() { - xxx_messageInfo_Summary.DiscardUnknown(m) -} - -var xxx_messageInfo_Summary proto.InternalMessageInfo - -func (m *Summary) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount +func (x *Summary) GetSampleCount() uint64 { + if x != nil && x.SampleCount != nil { + return *x.SampleCount } return 0 } -func (m *Summary) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum +func (x *Summary) GetSampleSum() float64 { + if x != nil && x.SampleSum != nil { + return *x.SampleSum } return 0 } -func (m *Summary) GetQuantile() []*Quantile { - if m != nil { - return m.Quantile +func (x *Summary) GetQuantile() []*Quantile { + if x != nil { + return x.Quantile } return nil } type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` } -func (m *Untyped) Reset() { *m = Untyped{} } -func (m *Untyped) String() string { return proto.CompactTextString(m) } -func (*Untyped) ProtoMessage() {} +func (x *Untyped) Reset() { + *x = Untyped{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Untyped) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Untyped) ProtoMessage() {} + +func (x *Untyped) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Untyped.ProtoReflect.Descriptor instead. func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{5} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5} } -func (m *Untyped) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Untyped.Unmarshal(m, b) -} -func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) -} -func (m *Untyped) XXX_Merge(src proto.Message) { - xxx_messageInfo_Untyped.Merge(m, src) -} -func (m *Untyped) XXX_Size() int { - return xxx_messageInfo_Untyped.Size(m) -} -func (m *Untyped) XXX_DiscardUnknown() { - xxx_messageInfo_Untyped.DiscardUnknown(m) -} - -var xxx_messageInfo_Untyped proto.InternalMessageInfo - -func (m *Untyped) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Untyped) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Histogram struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` + SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0. SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` // Buckets for the conventional histogram. - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional. // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and // then each power of two is divided into 2^n logarithmic buckets. // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). // In the future, more bucket schemas may be added using numbers < -4 or > 8. Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"` - ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` - ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` - ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` + ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket. + ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket. + ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0. // Negative buckets for the native histogram. NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"` // Use either "negative_delta" or "negative_count", the former for // regular histograms with integer counts, the latter for float // histograms. - NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` - NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` + NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket. // Positive buckets for the native histogram. PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float // histograms. - PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` - PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket. } -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} +func (x *Histogram) Reset() { + *x = Histogram{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Histogram) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Histogram) ProtoMessage() {} + +func (x *Histogram) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Histogram.ProtoReflect.Descriptor instead. func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{6} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6} } -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Histogram.Unmarshal(m, b) -} -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) -} -func (m *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(m, src) -} -func (m *Histogram) XXX_Size() int { - return xxx_messageInfo_Histogram.Size(m) -} -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) -} - -var xxx_messageInfo_Histogram proto.InternalMessageInfo - -func (m *Histogram) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount +func (x *Histogram) GetSampleCount() uint64 { + if x != nil && x.SampleCount != nil { + return *x.SampleCount } return 0 } -func (m *Histogram) GetSampleCountFloat() float64 { - if m != nil && m.SampleCountFloat != nil { - return *m.SampleCountFloat +func (x *Histogram) GetSampleCountFloat() float64 { + if x != nil && x.SampleCountFloat != nil { + return *x.SampleCountFloat } return 0 } -func (m *Histogram) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum +func (x *Histogram) GetSampleSum() float64 { + if x != nil && x.SampleSum != nil { + return *x.SampleSum } return 0 } -func (m *Histogram) GetBucket() []*Bucket { - if m != nil { - return m.Bucket +func (x *Histogram) GetBucket() []*Bucket { + if x != nil { + return x.Bucket } return nil } -func (m *Histogram) GetSchema() int32 { - if m != nil && m.Schema != nil { - return *m.Schema +func (x *Histogram) GetSchema() int32 { + if x != nil && x.Schema != nil { + return *x.Schema } return 0 } -func (m *Histogram) GetZeroThreshold() float64 { - if m != nil && m.ZeroThreshold != nil { - return *m.ZeroThreshold +func (x *Histogram) GetZeroThreshold() float64 { + if x != nil && x.ZeroThreshold != nil { + return *x.ZeroThreshold } return 0 } -func (m *Histogram) GetZeroCount() uint64 { - if m != nil && m.ZeroCount != nil { - return *m.ZeroCount +func (x *Histogram) GetZeroCount() uint64 { + if x != nil && x.ZeroCount != nil { + return *x.ZeroCount } return 0 } -func (m *Histogram) GetZeroCountFloat() float64 { - if m != nil && m.ZeroCountFloat != nil { - return *m.ZeroCountFloat +func (x *Histogram) GetZeroCountFloat() float64 { + if x != nil && x.ZeroCountFloat != nil { + return *x.ZeroCountFloat } return 0 } -func (m *Histogram) GetNegativeSpan() []*BucketSpan { - if m != nil { - return m.NegativeSpan +func (x *Histogram) GetNegativeSpan() []*BucketSpan { + if x != nil { + return x.NegativeSpan } return nil } -func (m *Histogram) GetNegativeDelta() []int64 { - if m != nil { - return m.NegativeDelta +func (x *Histogram) GetNegativeDelta() []int64 { + if x != nil { + return x.NegativeDelta } return nil } -func (m *Histogram) GetNegativeCount() []float64 { - if m != nil { - return m.NegativeCount +func (x *Histogram) GetNegativeCount() []float64 { + if x != nil { + return x.NegativeCount } return nil } -func (m *Histogram) GetPositiveSpan() []*BucketSpan { - if m != nil { - return m.PositiveSpan +func (x *Histogram) GetPositiveSpan() []*BucketSpan { + if x != nil { + return x.PositiveSpan } return nil } -func (m *Histogram) GetPositiveDelta() []int64 { - if m != nil { - return m.PositiveDelta +func (x *Histogram) GetPositiveDelta() []int64 { + if x != nil { + return x.PositiveDelta } return nil } -func (m *Histogram) GetPositiveCount() []float64 { - if m != nil { - return m.PositiveCount +func (x *Histogram) GetPositiveCount() []float64 { + if x != nil { + return x.PositiveCount } return nil } @@ -513,64 +598,72 @@ func (m *Histogram) GetPositiveCount() []float64 { // A Bucket of a conventional histogram, each of which is treated as // an individual counter-like time series by Prometheus. type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` - CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order. + CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0. + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive. Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} +func (x *Bucket) Reset() { + *x = Bucket{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket) ProtoMessage() {} + +func (x *Bucket) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket.ProtoReflect.Descriptor instead. func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{7} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7} } -func (m *Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Bucket.Unmarshal(m, b) -} -func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) -} -func (m *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(m, src) -} -func (m *Bucket) XXX_Size() int { - return xxx_messageInfo_Bucket.Size(m) -} -func (m *Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_Bucket.DiscardUnknown(m) -} - -var xxx_messageInfo_Bucket proto.InternalMessageInfo - -func (m *Bucket) GetCumulativeCount() uint64 { - if m != nil && m.CumulativeCount != nil { - return *m.CumulativeCount +func (x *Bucket) GetCumulativeCount() uint64 { + if x != nil && x.CumulativeCount != nil { + return *x.CumulativeCount } return 0 } -func (m *Bucket) GetCumulativeCountFloat() float64 { - if m != nil && m.CumulativeCountFloat != nil { - return *m.CumulativeCountFloat +func (x *Bucket) GetCumulativeCountFloat() float64 { + if x != nil && x.CumulativeCountFloat != nil { + return *x.CumulativeCountFloat } return 0 } -func (m *Bucket) GetUpperBound() float64 { - if m != nil && m.UpperBound != nil { - return *m.UpperBound +func (x *Bucket) GetUpperBound() float64 { + if x != nil && x.UpperBound != nil { + return *x.UpperBound } return 0 } -func (m *Bucket) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar +func (x *Bucket) GetExemplar() *Exemplar { + if x != nil { + return x.Exemplar } return nil } @@ -582,333 +675,658 @@ func (m *Bucket) GetExemplar() *Exemplar { // structured here (with all the buckets in a single array separate // from the Spans). type BucketSpan struct { - Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` - Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative). + Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets. } -func (m *BucketSpan) Reset() { *m = BucketSpan{} } -func (m *BucketSpan) String() string { return proto.CompactTextString(m) } -func (*BucketSpan) ProtoMessage() {} +func (x *BucketSpan) Reset() { + *x = BucketSpan{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BucketSpan) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BucketSpan) ProtoMessage() {} + +func (x *BucketSpan) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead. func (*BucketSpan) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{8} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8} } -func (m *BucketSpan) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BucketSpan.Unmarshal(m, b) -} -func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) -} -func (m *BucketSpan) XXX_Merge(src proto.Message) { - xxx_messageInfo_BucketSpan.Merge(m, src) -} -func (m *BucketSpan) XXX_Size() int { - return xxx_messageInfo_BucketSpan.Size(m) -} -func (m *BucketSpan) XXX_DiscardUnknown() { - xxx_messageInfo_BucketSpan.DiscardUnknown(m) -} - -var xxx_messageInfo_BucketSpan proto.InternalMessageInfo - -func (m *BucketSpan) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset +func (x *BucketSpan) GetOffset() int32 { + if x != nil && x.Offset != nil { + return *x.Offset } return 0 } -func (m *BucketSpan) GetLength() uint32 { - if m != nil && m.Length != nil { - return *m.Length +func (x *BucketSpan) GetLength() uint32 { + if x != nil && x.Length != nil { + return *x.Length } return 0 } type Exemplar struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style. } -func (m *Exemplar) Reset() { *m = Exemplar{} } -func (m *Exemplar) String() string { return proto.CompactTextString(m) } -func (*Exemplar) ProtoMessage() {} +func (x *Exemplar) Reset() { + *x = Exemplar{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Exemplar) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Exemplar) ProtoMessage() {} + +func (x *Exemplar) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead. func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{9} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9} } -func (m *Exemplar) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Exemplar.Unmarshal(m, b) -} -func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) -} -func (m *Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_Exemplar.Merge(m, src) -} -func (m *Exemplar) XXX_Size() int { - return xxx_messageInfo_Exemplar.Size(m) -} -func (m *Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_Exemplar.DiscardUnknown(m) -} - -var xxx_messageInfo_Exemplar proto.InternalMessageInfo - -func (m *Exemplar) GetLabel() []*LabelPair { - if m != nil { - return m.Label +func (x *Exemplar) GetLabel() []*LabelPair { + if x != nil { + return x.Label } return nil } -func (m *Exemplar) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Exemplar) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } -func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp +func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp } return nil } type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` } -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} +func (x *Metric) Reset() { + *x = Metric{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metric) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metric) ProtoMessage() {} + +func (x *Metric) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metric.ProtoReflect.Descriptor instead. func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{10} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10} } -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo - -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label +func (x *Metric) GetLabel() []*LabelPair { + if x != nil { + return x.Label } return nil } -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge +func (x *Metric) GetGauge() *Gauge { + if x != nil { + return x.Gauge } return nil } -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter +func (x *Metric) GetCounter() *Counter { + if x != nil { + return x.Counter } return nil } -func (m *Metric) GetSummary() *Summary { - if m != nil { - return m.Summary +func (x *Metric) GetSummary() *Summary { + if x != nil { + return x.Summary } return nil } -func (m *Metric) GetUntyped() *Untyped { - if m != nil { - return m.Untyped +func (x *Metric) GetUntyped() *Untyped { + if x != nil { + return x.Untyped } return nil } -func (m *Metric) GetHistogram() *Histogram { - if m != nil { - return m.Histogram +func (x *Metric) GetHistogram() *Histogram { + if x != nil { + return x.Histogram } return nil } -func (m *Metric) GetTimestampMs() int64 { - if m != nil && m.TimestampMs != nil { - return *m.TimestampMs +func (x *Metric) GetTimestampMs() int64 { + if x != nil && x.TimestampMs != nil { + return *x.TimestampMs } return 0 } type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` } -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} +func (x *MetricFamily) Reset() { + *x = MetricFamily{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetricFamily) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetricFamily) ProtoMessage() {} + +func (x *MetricFamily) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead. func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{11} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11} } -func (m *MetricFamily) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricFamily.Unmarshal(m, b) -} -func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) -} -func (m *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(m, src) -} -func (m *MetricFamily) XXX_Size() int { - return xxx_messageInfo_MetricFamily.Size(m) -} -func (m *MetricFamily) XXX_DiscardUnknown() { - xxx_messageInfo_MetricFamily.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricFamily proto.InternalMessageInfo - -func (m *MetricFamily) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (x *MetricFamily) GetName() string { + if x != nil && x.Name != nil { + return *x.Name } return "" } -func (m *MetricFamily) GetHelp() string { - if m != nil && m.Help != nil { - return *m.Help +func (x *MetricFamily) GetHelp() string { + if x != nil && x.Help != nil { + return *x.Help } return "" } -func (m *MetricFamily) GetType() MetricType { - if m != nil && m.Type != nil { - return *m.Type +func (x *MetricFamily) GetType() MetricType { + if x != nil && x.Type != nil { + return *x.Type } return MetricType_COUNTER } -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric +func (x *MetricFamily) GetMetric() []*Metric { + if x != nil { + return x.Metric } return nil } -func init() { - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) - proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") - proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") - proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") - proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") - proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") - proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") - proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") - proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") - proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan") - proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") - proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") - proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") +var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor + +var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0x5b, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, + 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, + 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x87, 0x01, 0x0a, + 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, + 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x04, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, + 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, + 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, + 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, + 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, + 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, + 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x73, 0x70, 0x61, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, + 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, + 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, + 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, + 0x6e, 0x52, 0x0c, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, + 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, + 0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, + 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, + 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, + 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, + 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, + 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, + 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, + 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, + 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, + 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, + 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, + 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, + 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, + 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, + 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, + 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, + 0x62, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, + 0x07, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, + 0x55, 0x47, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, + 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, + 0x0d, 0x0a, 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, + 0x0a, 0x0f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, + 0x4d, 0x10, 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, + 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, + 0x67, 0x6f, 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, } -func init() { - proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258) +var ( + file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once + file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc +) + +func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte { + file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() { + file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData) + }) + return file_io_prometheus_client_metrics_proto_rawDescData } -var fileDescriptor_d1e5ddb18987a258 = []byte{ - // 896 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44, - 0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48, - 0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92, - 0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0, - 0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b, - 0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3, - 0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90, - 0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25, - 0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb, - 0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19, - 0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba, - 0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b, - 0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c, - 0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0, - 0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5, - 0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd, - 0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d, - 0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b, - 0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b, - 0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f, - 0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b, - 0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8, - 0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e, - 0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79, - 0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29, - 0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48, - 0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca, - 0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9, - 0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5, - 0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe, - 0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55, - 0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e, - 0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83, - 0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65, - 0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a, - 0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8, - 0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf, - 0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1, - 0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97, - 0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc, - 0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7, - 0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b, - 0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58, - 0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b, - 0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8, - 0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f, - 0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67, - 0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28, - 0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27, - 0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41, - 0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f, - 0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f, - 0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac, - 0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a, - 0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab, - 0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00, +var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{ + (MetricType)(0), // 0: io.prometheus.client.MetricType + (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair + (*Gauge)(nil), // 2: io.prometheus.client.Gauge + (*Counter)(nil), // 3: io.prometheus.client.Counter + (*Quantile)(nil), // 4: io.prometheus.client.Quantile + (*Summary)(nil), // 5: io.prometheus.client.Summary + (*Untyped)(nil), // 6: io.prometheus.client.Untyped + (*Histogram)(nil), // 7: io.prometheus.client.Histogram + (*Bucket)(nil), // 8: io.prometheus.client.Bucket + (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan + (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar + (*Metric)(nil), // 11: io.prometheus.client.Metric + (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily + (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp +} +var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ + 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar + 4, // 1: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile + 8, // 2: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket + 9, // 3: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan + 9, // 4: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan + 10, // 5: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar + 1, // 6: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair + 13, // 7: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 1, // 8: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair + 2, // 9: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge + 3, // 10: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter + 5, // 11: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary + 6, // 12: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped + 7, // 13: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram + 0, // 14: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType + 11, // 15: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name +} + +func init() { file_io_prometheus_client_metrics_proto_init() } +func file_io_prometheus_client_metrics_proto_init() { + if File_io_prometheus_client_metrics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LabelPair); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Gauge); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Counter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Quantile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Summary); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Untyped); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Histogram); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BucketSpan); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Exemplar); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metric); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricFamily); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc, + NumEnums: 1, + NumMessages: 12, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_io_prometheus_client_metrics_proto_goTypes, + DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs, + EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes, + MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes, + }.Build() + File_io_prometheus_client_metrics_proto = out.File + file_io_prometheus_client_metrics_proto_rawDesc = nil + file_io_prometheus_client_metrics_proto_goTypes = nil + file_io_prometheus_client_metrics_proto_depIdxs = nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 83ab87c62..e89b9ddb4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -498,8 +498,8 @@ github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/testutil github.com/prometheus/client_golang/prometheus/testutil/promlint -# github.com/prometheus/client_model v0.3.0 -## explicit; go 1.9 +# github.com/prometheus/client_model v0.4.0 +## explicit; go 1.18 github.com/prometheus/client_model/go # github.com/prometheus/common v0.42.0 ## explicit; go 1.18 @@ -705,7 +705,7 @@ golang.org/x/text/width # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# gomodules.xyz/jsonpatch/v2 v2.2.0 => github.com/gomodules/jsonpatch/v2 v2.2.0 +# gomodules.xyz/jsonpatch/v2 v2.3.0 => github.com/gomodules/jsonpatch/v2 v2.2.0 ## explicit; go 1.12 gomodules.xyz/jsonpatch/v2 # google.golang.org/api v0.110.0 @@ -1560,8 +1560,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/controller-runtime v0.14.6 -## explicit; go 1.19 +# sigs.k8s.io/controller-runtime v0.15.1-0.20230524200249-30eae58f1b98 +## explicit; go 1.20 sigs.k8s.io/controller-runtime/pkg/cache sigs.k8s.io/controller-runtime/pkg/cache/internal sigs.k8s.io/controller-runtime/pkg/certwatcher @@ -1581,8 +1581,8 @@ sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics sigs.k8s.io/controller-runtime/pkg/internal/field/selector sigs.k8s.io/controller-runtime/pkg/internal/httpserver sigs.k8s.io/controller-runtime/pkg/internal/log -sigs.k8s.io/controller-runtime/pkg/internal/objectutil sigs.k8s.io/controller-runtime/pkg/internal/recorder +sigs.k8s.io/controller-runtime/pkg/internal/source sigs.k8s.io/controller-runtime/pkg/leaderelection sigs.k8s.io/controller-runtime/pkg/log sigs.k8s.io/controller-runtime/pkg/manager @@ -1592,10 +1592,8 @@ sigs.k8s.io/controller-runtime/pkg/predicate sigs.k8s.io/controller-runtime/pkg/ratelimiter sigs.k8s.io/controller-runtime/pkg/reconcile sigs.k8s.io/controller-runtime/pkg/recorder -sigs.k8s.io/controller-runtime/pkg/runtime/inject sigs.k8s.io/controller-runtime/pkg/scheme sigs.k8s.io/controller-runtime/pkg/source -sigs.k8s.io/controller-runtime/pkg/source/internal sigs.k8s.io/controller-runtime/pkg/webhook sigs.k8s.io/controller-runtime/pkg/webhook/admission sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index bcb1141a5..f01de4381 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -19,10 +19,11 @@ package cache import ( "context" "fmt" - "reflect" + "net/http" "time" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -37,7 +38,10 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) -var log = logf.RuntimeLog.WithName("object-cache") +var ( + log = logf.RuntimeLog.WithName("object-cache") + defaultSyncPeriod = 10 * time.Hour +) // Cache knows how to load Kubernetes objects, fetch informers to request // to receive events for Kubernetes objects (at a low-level), @@ -98,310 +102,152 @@ type Informer interface { HasSynced() bool } -// ObjectSelector is an alias name of internal.Selector. -type ObjectSelector internal.Selector - -// SelectorsByObject associate a client.Object's GVK to a field/label selector. -// There is also `DefaultSelector` to set a global default (which will be overridden by -// a more specific setting here, if any). -type SelectorsByObject map[client.Object]ObjectSelector - // Options are the optional arguments for creating a new InformersMap object. type Options struct { + // HTTPClient is the http client to use for the REST client + HTTPClient *http.Client + // Scheme is the scheme to use for mapping objects to GroupVersionKinds Scheme *runtime.Scheme // Mapper is the RESTMapper to use for mapping GroupVersionKinds to Resources Mapper meta.RESTMapper - // Resync is the base frequency the informers are resynced. - // Defaults to defaultResyncTime. - // A 10 percent jitter will be added to the Resync period between informers - // So that all informers will not send list requests simultaneously. - Resync *time.Duration + // SyncPeriod determines the minimum frequency at which watched resources are + // reconciled. A lower period will correct entropy more quickly, but reduce + // responsiveness to change if there are many watched resources. Change this + // value only if you know what you are doing. Defaults to 10 hours if unset. + // there will a 10 percent jitter between the SyncPeriod of all controllers + // so that all controllers will not send list requests simultaneously. + // + // This applies to all controllers. + // + // A period sync happens for two reasons: + // 1. To insure against a bug in the controller that causes an object to not + // be requeued, when it otherwise should be requeued. + // 2. To insure against an unknown bug in controller-runtime, or its dependencies, + // that causes an object to not be requeued, when it otherwise should be + // requeued, or to be removed from the queue, when it otherwise should not + // be removed. + // + // If you want + // 1. to insure against missed watch events, or + // 2. to poll services that cannot be watched, + // then we recommend that, instead of changing the default period, the + // controller requeue, with a constant duration `t`, whenever the controller + // is "done" with an object, and would otherwise not requeue it, i.e., we + // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`, + // instead of `reconcile.Result{}`. + SyncPeriod *time.Duration - // Namespace restricts the cache's ListWatch to the desired namespace + // Namespaces restricts the cache's ListWatch to the desired namespaces // Default watches all namespaces - Namespace string + Namespaces []string - // SelectorsByObject restricts the cache's ListWatch to the desired - // fields per GVK at the specified object, the map's value must implement - // Selector [1] using for example a Set [2] - // [1] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Selector - // [2] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Set - SelectorsByObject SelectorsByObject + // DefaultLabelSelector will be used as a label selectors for all object types + // unless they have a more specific selector set in ByObject. + DefaultLabelSelector labels.Selector - // DefaultSelector will be used as selectors for all object types - // that do not have a selector in SelectorsByObject defined. - DefaultSelector ObjectSelector + // DefaultFieldSelector will be used as a field selectors for all object types + // unless they have a more specific selector set in ByObject. + DefaultFieldSelector fields.Selector - // UnsafeDisableDeepCopyByObject indicates not to deep copy objects during get or - // list objects per GVK at the specified object. + // DefaultTransform will be used as transform for all object types + // unless they have a more specific transform set in ByObject. + DefaultTransform toolscache.TransformFunc + + // ByObject restricts the cache's ListWatch to the desired fields per GVK at the specified object. + ByObject map[client.Object]ByObject + + // UnsafeDisableDeepCopy indicates not to deep copy objects during get or + // list objects for EVERY object. // Be very careful with this, when enabled you must DeepCopy any object before mutating it, // otherwise you will mutate the object in the cache. - UnsafeDisableDeepCopyByObject DisableDeepCopyByObject + // + // This is a global setting for all objects, and can be overridden by the ByObject setting. + UnsafeDisableDeepCopy *bool +} - // TransformByObject is a map from GVKs to transformer functions which +// ByObject offers more fine-grained control over the cache's ListWatch by object. +type ByObject struct { + // Label represents a label selector for the object. + Label labels.Selector + + // Field represents a field selector for the object. + Field fields.Selector + + // Transform is a map from objects to transformer functions which // get applied when objects of the transformation are about to be committed // to cache. // // This function is called both for new objects to enter the cache, - // and for updated objects. - TransformByObject TransformByObject + // and for updated objects. + Transform toolscache.TransformFunc - // DefaultTransform is the transform used for all GVKs which do - // not have an explicit transform func set in TransformByObject - DefaultTransform toolscache.TransformFunc + // UnsafeDisableDeepCopy indicates not to deep copy objects during get or + // list objects per GVK at the specified object. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + UnsafeDisableDeepCopy *bool } -var defaultResyncTime = 10 * time.Hour +// NewCacheFunc - Function for creating a new cache from the options and a rest config. +type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) // New initializes and returns a new Cache. func New(config *rest.Config, opts Options) (Cache, error) { + if len(opts.Namespaces) == 0 { + opts.Namespaces = []string{metav1.NamespaceAll} + } + if len(opts.Namespaces) > 1 { + return newMultiNamespaceCache(config, opts) + } + opts, err := defaultOpts(config, opts) if err != nil { return nil, err } - selectorsByGVK, err := convertToByGVK(opts.SelectorsByObject, opts.DefaultSelector, opts.Scheme) + + byGVK, err := convertToInformerOptsByGVK(opts.ByObject, opts.Scheme) if err != nil { return nil, err } - disableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(opts.UnsafeDisableDeepCopyByObject, opts.Scheme) - if err != nil { - return nil, err - } - transformByGVK, err := convertToByGVK(opts.TransformByObject, opts.DefaultTransform, opts.Scheme) - if err != nil { - return nil, err - } - transformByObj := internal.TransformFuncByObjectFromMap(transformByGVK) - - internalSelectorsByGVK := internal.SelectorsByGVK{} - for gvk, selector := range selectorsByGVK { - internalSelectorsByGVK[gvk] = internal.Selector(selector) + // Set the default selector and transform. + byGVK[schema.GroupVersionKind{}] = internal.InformersOptsByGVK{ + Selector: internal.Selector{ + Label: opts.DefaultLabelSelector, + Field: opts.DefaultFieldSelector, + }, + Transform: opts.DefaultTransform, + UnsafeDisableDeepCopy: opts.UnsafeDisableDeepCopy, } - im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace, internalSelectorsByGVK, disableDeepCopyByGVK, transformByObj) - return &informerCache{InformersMap: im}, nil -} - -// BuilderWithOptions returns a Cache constructor that will build a cache -// honoring the options argument, this is useful to specify options like -// SelectorsByObject -// WARNING: If SelectorsByObject is specified, filtered out resources are not -// returned. -// WARNING: If UnsafeDisableDeepCopy is enabled, you must DeepCopy any object -// returned from cache get/list before mutating it. -func BuilderWithOptions(options Options) NewCacheFunc { - return func(config *rest.Config, inherited Options) (Cache, error) { - var err error - inherited, err = defaultOpts(config, inherited) - if err != nil { - return nil, err - } - options, err = defaultOpts(config, options) - if err != nil { - return nil, err - } - combined, err := options.inheritFrom(inherited) - if err != nil { - return nil, err - } - return New(config, *combined) - } -} - -func (options Options) inheritFrom(inherited Options) (*Options, error) { - var ( - combined Options - err error - ) - combined.Scheme = combineScheme(inherited.Scheme, options.Scheme) - combined.Mapper = selectMapper(inherited.Mapper, options.Mapper) - combined.Resync = selectResync(inherited.Resync, options.Resync) - combined.Namespace = selectNamespace(inherited.Namespace, options.Namespace) - combined.SelectorsByObject, combined.DefaultSelector, err = combineSelectors(inherited, options, combined.Scheme) - if err != nil { - return nil, err - } - combined.UnsafeDisableDeepCopyByObject, err = combineUnsafeDeepCopy(inherited, options, combined.Scheme) - if err != nil { - return nil, err - } - combined.TransformByObject, combined.DefaultTransform, err = combineTransforms(inherited, options, combined.Scheme) - if err != nil { - return nil, err - } - return &combined, nil -} - -func combineScheme(schemes ...*runtime.Scheme) *runtime.Scheme { - var out *runtime.Scheme - for _, sch := range schemes { - if sch == nil { - continue - } - for gvk, t := range sch.AllKnownTypes() { - if out == nil { - out = runtime.NewScheme() - } - out.AddKnownTypeWithName(gvk, reflect.New(t).Interface().(runtime.Object)) - } - } - return out -} - -func selectMapper(def, override meta.RESTMapper) meta.RESTMapper { - if override != nil { - return override - } - return def -} - -func selectResync(def, override *time.Duration) *time.Duration { - if override != nil { - return override - } - return def -} - -func selectNamespace(def, override string) string { - if override != "" { - return override - } - return def -} - -func combineSelectors(inherited, options Options, scheme *runtime.Scheme) (SelectorsByObject, ObjectSelector, error) { - // Selectors are combined via logical AND. - // - Combined label selector is a union of the selectors requirements from both sets of options. - // - Combined field selector uses fields.AndSelectors with the combined list of non-nil field selectors - // defined in both sets of options. - // - // There is a bunch of complexity here because we need to convert to SelectorsByGVK - // to be able to match keys between options and inherited and then convert back to SelectorsByObject - optionsSelectorsByGVK, err := convertToByGVK(options.SelectorsByObject, options.DefaultSelector, scheme) - if err != nil { - return nil, ObjectSelector{}, err - } - inheritedSelectorsByGVK, err := convertToByGVK(inherited.SelectorsByObject, inherited.DefaultSelector, inherited.Scheme) - if err != nil { - return nil, ObjectSelector{}, err - } - - for gvk, inheritedSelector := range inheritedSelectorsByGVK { - optionsSelectorsByGVK[gvk] = combineSelector(inheritedSelector, optionsSelectorsByGVK[gvk]) - } - return convertToByObject(optionsSelectorsByGVK, scheme) -} - -func combineSelector(selectors ...ObjectSelector) ObjectSelector { - ls := make([]labels.Selector, 0, len(selectors)) - fs := make([]fields.Selector, 0, len(selectors)) - for _, s := range selectors { - ls = append(ls, s.Label) - fs = append(fs, s.Field) - } - return ObjectSelector{ - Label: combineLabelSelectors(ls...), - Field: combineFieldSelectors(fs...), - } -} - -func combineLabelSelectors(ls ...labels.Selector) labels.Selector { - var combined labels.Selector - for _, l := range ls { - if l == nil { - continue - } - if combined == nil { - combined = labels.NewSelector() - } - reqs, _ := l.Requirements() - combined = combined.Add(reqs...) - } - return combined -} - -func combineFieldSelectors(fs ...fields.Selector) fields.Selector { - nonNil := fs[:0] - for _, f := range fs { - if f == nil { - continue - } - nonNil = append(nonNil, f) - } - if len(nonNil) == 0 { - return nil - } - if len(nonNil) == 1 { - return nonNil[0] - } - return fields.AndSelectors(nonNil...) -} - -func combineUnsafeDeepCopy(inherited, options Options, scheme *runtime.Scheme) (DisableDeepCopyByObject, error) { - // UnsafeDisableDeepCopyByObject is combined via precedence. Only if a value for a particular GVK is unset - // in options will a value from inherited be used. - optionsDisableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(options.UnsafeDisableDeepCopyByObject, options.Scheme) - if err != nil { - return nil, err - } - inheritedDisableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(inherited.UnsafeDisableDeepCopyByObject, inherited.Scheme) - if err != nil { - return nil, err - } - - for gvk, inheritedDeepCopy := range inheritedDisableDeepCopyByGVK { - if _, ok := optionsDisableDeepCopyByGVK[gvk]; !ok { - if optionsDisableDeepCopyByGVK == nil { - optionsDisableDeepCopyByGVK = map[schema.GroupVersionKind]bool{} - } - optionsDisableDeepCopyByGVK[gvk] = inheritedDeepCopy - } - } - return convertToDisableDeepCopyByObject(optionsDisableDeepCopyByGVK, scheme) -} - -func combineTransforms(inherited, options Options, scheme *runtime.Scheme) (TransformByObject, toolscache.TransformFunc, error) { - // Transform functions are combined via chaining. If both inherited and options define a transform - // function, the transform function from inherited will be called first, and the transform function from - // options will be called second. - optionsTransformByGVK, err := convertToByGVK(options.TransformByObject, options.DefaultTransform, options.Scheme) - if err != nil { - return nil, nil, err - } - inheritedTransformByGVK, err := convertToByGVK(inherited.TransformByObject, inherited.DefaultTransform, inherited.Scheme) - if err != nil { - return nil, nil, err - } - - for gvk, inheritedTransform := range inheritedTransformByGVK { - if optionsTransformByGVK == nil { - optionsTransformByGVK = map[schema.GroupVersionKind]toolscache.TransformFunc{} - } - optionsTransformByGVK[gvk] = combineTransform(inheritedTransform, optionsTransformByGVK[gvk]) - } - return convertToByObject(optionsTransformByGVK, scheme) -} - -func combineTransform(inherited, current toolscache.TransformFunc) toolscache.TransformFunc { - if inherited == nil { - return current - } - if current == nil { - return inherited - } - return func(in interface{}) (interface{}, error) { - mid, err := inherited(in) - if err != nil { - return nil, err - } - return current(mid) - } + return &informerCache{ + scheme: opts.Scheme, + Informers: internal.NewInformers(config, &internal.InformersOpts{ + HTTPClient: opts.HTTPClient, + Scheme: opts.Scheme, + Mapper: opts.Mapper, + ResyncPeriod: *opts.SyncPeriod, + Namespace: opts.Namespaces[0], + ByGVK: byGVK, + }), + }, nil } func defaultOpts(config *rest.Config, opts Options) (Options, error) { + logger := log.WithName("setup") + + // Use the rest HTTP client for the provided config if unset + if opts.HTTPClient == nil { + var err error + opts.HTTPClient, err = rest.HTTPClientFor(config) + if err != nil { + logger.Error(err, "Failed to get HTTP client") + return opts, fmt.Errorf("could not create HTTP client from config: %w", err) + } + } + // Use the default Kubernetes Scheme if unset if opts.Scheme == nil { opts.Scheme = scheme.Scheme @@ -410,108 +256,38 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { // Construct a new Mapper if unset if opts.Mapper == nil { var err error - opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config) + opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config, opts.HTTPClient) if err != nil { - log.WithName("setup").Error(err, "Failed to get API Group-Resources") - return opts, fmt.Errorf("could not create RESTMapper from config") + logger.Error(err, "Failed to get API Group-Resources") + return opts, fmt.Errorf("could not create RESTMapper from config: %w", err) } } // Default the resync period to 10 hours if unset - if opts.Resync == nil { - opts.Resync = &defaultResyncTime + if opts.SyncPeriod == nil { + opts.SyncPeriod = &defaultSyncPeriod } return opts, nil } -func convertToByGVK[T any](byObject map[client.Object]T, def T, scheme *runtime.Scheme) (map[schema.GroupVersionKind]T, error) { - byGVK := map[schema.GroupVersionKind]T{} - for object, value := range byObject { +func convertToInformerOptsByGVK(in map[client.Object]ByObject, scheme *runtime.Scheme) (map[schema.GroupVersionKind]internal.InformersOptsByGVK, error) { + out := map[schema.GroupVersionKind]internal.InformersOptsByGVK{} + for object, byObject := range in { gvk, err := apiutil.GVKForObject(object, scheme) if err != nil { return nil, err } - byGVK[gvk] = value - } - byGVK[schema.GroupVersionKind{}] = def - return byGVK, nil -} - -func convertToByObject[T any](byGVK map[schema.GroupVersionKind]T, scheme *runtime.Scheme) (map[client.Object]T, T, error) { - var byObject map[client.Object]T - def := byGVK[schema.GroupVersionKind{}] - for gvk, value := range byGVK { - if gvk == (schema.GroupVersionKind{}) { - continue + if _, ok := out[gvk]; ok { + return nil, fmt.Errorf("duplicate cache options for GVK %v, cache.Options.ByObject has multiple types with the same GroupVersionKind", gvk) } - obj, err := scheme.New(gvk) - if err != nil { - return nil, def, err - } - cObj, ok := obj.(client.Object) - if !ok { - return nil, def, fmt.Errorf("object %T for GVK %q does not implement client.Object", obj, gvk) - } - cObj.GetObjectKind().SetGroupVersionKind(gvk) - if byObject == nil { - byObject = map[client.Object]T{} - } - byObject[cObj] = value - } - return byObject, def, nil -} - -// DisableDeepCopyByObject associate a client.Object's GVK to disable DeepCopy during get or list from cache. -type DisableDeepCopyByObject map[client.Object]bool - -var _ client.Object = &ObjectAll{} - -// ObjectAll is the argument to represent all objects' types. -type ObjectAll struct { - client.Object -} - -func convertToDisableDeepCopyByGVK(disableDeepCopyByObject DisableDeepCopyByObject, scheme *runtime.Scheme) (internal.DisableDeepCopyByGVK, error) { - disableDeepCopyByGVK := internal.DisableDeepCopyByGVK{} - for obj, disable := range disableDeepCopyByObject { - switch obj.(type) { - case ObjectAll, *ObjectAll: - disableDeepCopyByGVK[internal.GroupVersionKindAll] = disable - default: - gvk, err := apiutil.GVKForObject(obj, scheme) - if err != nil { - return nil, err - } - disableDeepCopyByGVK[gvk] = disable + out[gvk] = internal.InformersOptsByGVK{ + Selector: internal.Selector{ + Field: byObject.Field, + Label: byObject.Label, + }, + Transform: byObject.Transform, + UnsafeDisableDeepCopy: byObject.UnsafeDisableDeepCopy, } } - return disableDeepCopyByGVK, nil + return out, nil } - -func convertToDisableDeepCopyByObject(byGVK internal.DisableDeepCopyByGVK, scheme *runtime.Scheme) (DisableDeepCopyByObject, error) { - var byObject DisableDeepCopyByObject - for gvk, value := range byGVK { - if byObject == nil { - byObject = DisableDeepCopyByObject{} - } - if gvk == (schema.GroupVersionKind{}) { - byObject[ObjectAll{}] = value - continue - } - obj, err := scheme.New(gvk) - if err != nil { - return nil, err - } - cObj, ok := obj.(client.Object) - if !ok { - return nil, fmt.Errorf("object %T for GVK %q does not implement client.Object", obj, gvk) - } - - byObject[cObj] = value - } - return byObject, nil -} - -// TransformByObject associate a client.Object's GVK to a transformer function -// to be applied when storing the object into the cache. -type TransformByObject map[client.Object]toolscache.TransformFunc diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go index 08e4e6df5..771244d52 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go @@ -19,10 +19,10 @@ package cache import ( "context" "fmt" - "reflect" "strings" apimeta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -45,19 +45,21 @@ func (*ErrCacheNotStarted) Error() string { return "the cache is not started, can not read objects" } -// informerCache is a Kubernetes Object cache populated from InformersMap. informerCache wraps an InformersMap. +// informerCache is a Kubernetes Object cache populated from internal.Informers. +// informerCache wraps internal.Informers. type informerCache struct { - *internal.InformersMap + scheme *runtime.Scheme + *internal.Informers } // Get implements Reader. -func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { - gvk, err := apiutil.GVKForObject(out, ip.Scheme) +func (ic *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { + gvk, err := apiutil.GVKForObject(out, ic.scheme) if err != nil { return err } - started, cache, err := ip.InformersMap.Get(ctx, gvk, out) + started, cache, err := ic.Informers.Get(ctx, gvk, out) if err != nil { return err } @@ -69,13 +71,13 @@ func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out clie } // List implements Reader. -func (ip *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error { - gvk, cacheTypeObj, err := ip.objectTypeForListObject(out) +func (ic *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error { + gvk, cacheTypeObj, err := ic.objectTypeForListObject(out) if err != nil { return err } - started, cache, err := ip.InformersMap.Get(ctx, *gvk, cacheTypeObj) + started, cache, err := ic.Informers.Get(ctx, *gvk, cacheTypeObj) if err != nil { return err } @@ -90,54 +92,46 @@ func (ip *informerCache) List(ctx context.Context, out client.ObjectList, opts . // objectTypeForListObject tries to find the runtime.Object and associated GVK // for a single object corresponding to the passed-in list type. We need them // because they are used as cache map key. -func (ip *informerCache) objectTypeForListObject(list client.ObjectList) (*schema.GroupVersionKind, runtime.Object, error) { - gvk, err := apiutil.GVKForObject(list, ip.Scheme) +func (ic *informerCache) objectTypeForListObject(list client.ObjectList) (*schema.GroupVersionKind, runtime.Object, error) { + gvk, err := apiutil.GVKForObject(list, ic.scheme) if err != nil { return nil, nil, err } - // we need the non-list GVK, so chop off the "List" from the end of the kind - if strings.HasSuffix(gvk.Kind, "List") && apimeta.IsListType(list) { - gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] - } + // We need the non-list GVK, so chop off the "List" from the end of the kind. + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - _, isUnstructured := list.(*unstructured.UnstructuredList) - var cacheTypeObj runtime.Object - if isUnstructured { + // Handle unstructured.UnstructuredList. + if _, isUnstructured := list.(runtime.Unstructured); isUnstructured { u := &unstructured.Unstructured{} u.SetGroupVersionKind(gvk) - cacheTypeObj = u - } else { - itemsPtr, err := apimeta.GetItemsPtr(list) - if err != nil { - return nil, nil, err - } - // http://knowyourmeme.com/memes/this-is-fine - elemType := reflect.Indirect(reflect.ValueOf(itemsPtr)).Type().Elem() - if elemType.Kind() != reflect.Ptr { - elemType = reflect.PtrTo(elemType) - } - - cacheTypeValue := reflect.Zero(elemType) - var ok bool - cacheTypeObj, ok = cacheTypeValue.Interface().(runtime.Object) - if !ok { - return nil, nil, fmt.Errorf("cannot get cache for %T, its element %T is not a runtime.Object", list, cacheTypeValue.Interface()) - } + return &gvk, u, nil + } + // Handle metav1.PartialObjectMetadataList. + if _, isPartialObjectMetadata := list.(*metav1.PartialObjectMetadataList); isPartialObjectMetadata { + pom := &metav1.PartialObjectMetadata{} + pom.SetGroupVersionKind(gvk) + return &gvk, pom, nil } + // Any other list type should have a corresponding non-list type registered + // in the scheme. Use that to create a new instance of the non-list type. + cacheTypeObj, err := ic.scheme.New(gvk) + if err != nil { + return nil, nil, err + } return &gvk, cacheTypeObj, nil } // GetInformerForKind returns the informer for the GroupVersionKind. -func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { +func (ic *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { // Map the gvk to an object - obj, err := ip.Scheme.New(gvk) + obj, err := ic.scheme.New(gvk) if err != nil { return nil, err } - _, i, err := ip.InformersMap.Get(ctx, gvk, obj) + _, i, err := ic.Informers.Get(ctx, gvk, obj) if err != nil { return nil, err } @@ -145,13 +139,13 @@ func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.Grou } // GetInformer returns the informer for the obj. -func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { - gvk, err := apiutil.GVKForObject(obj, ip.Scheme) +func (ic *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { + gvk, err := apiutil.GVKForObject(obj, ic.scheme) if err != nil { return nil, err } - _, i, err := ip.InformersMap.Get(ctx, gvk, obj) + _, i, err := ic.Informers.Get(ctx, gvk, obj) if err != nil { return nil, err } @@ -160,7 +154,7 @@ func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (In // NeedLeaderElection implements the LeaderElectionRunnable interface // to indicate that this can be started without requiring the leader lock. -func (ip *informerCache) NeedLeaderElection() bool { +func (ic *informerCache) NeedLeaderElection() bool { return false } @@ -169,8 +163,8 @@ func (ip *informerCache) NeedLeaderElection() bool { // to List. For one-to-one compatibility with "normal" field selectors, only return one value. // The values may be anything. They will automatically be prefixed with the namespace of the // given object, if present. The objects passed are guaranteed to be objects of the correct type. -func (ip *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { - informer, err := ip.GetInformer(ctx, obj) +func (ic *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + informer, err := ic.GetInformer(ctx, obj) if err != nil { return err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go index f78b08338..3c8355bbd 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go @@ -27,9 +27,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/cache" - "sigs.k8s.io/controller-runtime/pkg/internal/field/selector" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/internal/field/selector" ) // CacheReader is a client.Reader. @@ -147,7 +147,7 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli } obj, isObj := item.(runtime.Object) if !isObj { - return fmt.Errorf("cache contained %T, which is not an Object", obj) + return fmt.Errorf("cache contained %T, which is not an Object", item) } meta, err := apimeta.Accessor(obj) if err != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go deleted file mode 100644 index 27f46e327..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "context" - "time" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" -) - -// InformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. -// It uses a standard parameter codec constructed based on the given generated Scheme. -type InformersMap struct { - // we abstract over the details of structured/unstructured/metadata with the specificInformerMaps - // TODO(directxman12): genericize this over different projections now that we have 3 different maps - - structured *specificInformersMap - unstructured *specificInformersMap - metadata *specificInformersMap - - // Scheme maps runtime.Objects to GroupVersionKinds - Scheme *runtime.Scheme -} - -// NewInformersMap creates a new InformersMap that can create informers for -// both structured and unstructured objects. -func NewInformersMap(config *rest.Config, - scheme *runtime.Scheme, - mapper meta.RESTMapper, - resync time.Duration, - namespace string, - selectors SelectorsByGVK, - disableDeepCopy DisableDeepCopyByGVK, - transformers TransformFuncByObject, -) *InformersMap { - return &InformersMap{ - structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), - unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), - metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), - - Scheme: scheme, - } -} - -// Start calls Run on each of the informers and sets started to true. Blocks on the context. -func (m *InformersMap) Start(ctx context.Context) error { - go m.structured.Start(ctx) - go m.unstructured.Start(ctx) - go m.metadata.Start(ctx) - <-ctx.Done() - return nil -} - -// WaitForCacheSync waits until all the caches have been started and synced. -func (m *InformersMap) WaitForCacheSync(ctx context.Context) bool { - syncedFuncs := append([]cache.InformerSynced(nil), m.structured.HasSyncedFuncs()...) - syncedFuncs = append(syncedFuncs, m.unstructured.HasSyncedFuncs()...) - syncedFuncs = append(syncedFuncs, m.metadata.HasSyncedFuncs()...) - - if !m.structured.waitForStarted(ctx) { - return false - } - if !m.unstructured.waitForStarted(ctx) { - return false - } - if !m.metadata.waitForStarted(ctx) { - return false - } - return cache.WaitForCacheSync(ctx.Done(), syncedFuncs...) -} - -// Get will create a new Informer and add it to the map of InformersMap if none exists. Returns -// the Informer from the map. -func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { - switch obj.(type) { - case *unstructured.Unstructured: - return m.unstructured.Get(ctx, gvk, obj) - case *unstructured.UnstructuredList: - return m.unstructured.Get(ctx, gvk, obj) - case *metav1.PartialObjectMetadata: - return m.metadata.Get(ctx, gvk, obj) - case *metav1.PartialObjectMetadataList: - return m.metadata.Get(ctx, gvk, obj) - default: - return m.structured.Get(ctx, gvk, obj) - } -} - -// newStructuredInformersMap creates a new InformersMap for structured objects. -func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, - namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { - return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createStructuredListWatch) -} - -// newUnstructuredInformersMap creates a new InformersMap for unstructured objects. -func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, - namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { - return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createUnstructuredListWatch) -} - -// newMetadataInformersMap creates a new InformersMap for metadata-only objects. -func newMetadataInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, - namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { - return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createMetadataListWatch) -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go deleted file mode 100644 index 54bd7eec9..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import "k8s.io/apimachinery/pkg/runtime/schema" - -// GroupVersionKindAll is the argument to represent all GroupVersionKind types. -var GroupVersionKindAll = schema.GroupVersionKind{} - -// DisableDeepCopyByGVK associate a GroupVersionKind to disable DeepCopy during get or list from cache. -type DisableDeepCopyByGVK map[schema.GroupVersionKind]bool - -// IsDisabled returns whether a GroupVersionKind is disabled DeepCopy. -func (disableByGVK DisableDeepCopyByGVK) IsDisabled(gvk schema.GroupVersionKind) bool { - if d, ok := disableByGVK[gvk]; ok { - return d - } else if d, ok = disableByGVK[GroupVersionKindAll]; ok { - return d - } - return false -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go new file mode 100644 index 000000000..09e011111 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go @@ -0,0 +1,560 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "math/rand" + "net/http" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/metadata" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// InformersOpts configures an InformerMap. +type InformersOpts struct { + HTTPClient *http.Client + Scheme *runtime.Scheme + Mapper meta.RESTMapper + ResyncPeriod time.Duration + Namespace string + ByGVK map[schema.GroupVersionKind]InformersOptsByGVK +} + +// InformersOptsByGVK configured additional by group version kind (or object) +// in an InformerMap. +type InformersOptsByGVK struct { + Selector Selector + Transform cache.TransformFunc + UnsafeDisableDeepCopy *bool +} + +// NewInformers creates a new InformersMap that can create informers under the hood. +func NewInformers(config *rest.Config, options *InformersOpts) *Informers { + return &Informers{ + config: config, + httpClient: options.HTTPClient, + scheme: options.Scheme, + mapper: options.Mapper, + tracker: tracker{ + Structured: make(map[schema.GroupVersionKind]*Cache), + Unstructured: make(map[schema.GroupVersionKind]*Cache), + Metadata: make(map[schema.GroupVersionKind]*Cache), + }, + codecs: serializer.NewCodecFactory(options.Scheme), + paramCodec: runtime.NewParameterCodec(options.Scheme), + resync: options.ResyncPeriod, + startWait: make(chan struct{}), + namespace: options.Namespace, + byGVK: options.ByGVK, + } +} + +// Cache contains the cached data for an Cache. +type Cache struct { + // Informer is the cached informer + Informer cache.SharedIndexInformer + + // CacheReader wraps Informer and implements the CacheReader interface for a single type + Reader CacheReader +} + +type tracker struct { + Structured map[schema.GroupVersionKind]*Cache + Unstructured map[schema.GroupVersionKind]*Cache + Metadata map[schema.GroupVersionKind]*Cache +} + +// Informers create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. +// It uses a standard parameter codec constructed based on the given generated Scheme. +type Informers struct { + // httpClient is used to create a new REST client + httpClient *http.Client + + // scheme maps runtime.Objects to GroupVersionKinds + scheme *runtime.Scheme + + // config is used to talk to the apiserver + config *rest.Config + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // tracker tracks informers keyed by their type and groupVersionKind + tracker tracker + + // codecs is used to create a new REST client + codecs serializer.CodecFactory + + // paramCodec is used by list and watch + paramCodec runtime.ParameterCodec + + // resync is the base frequency the informers are resynced + // a 10 percent jitter will be added to the resync period between informers + // so that all informers will not send list requests simultaneously. + resync time.Duration + + // mu guards access to the map + mu sync.RWMutex + + // started is true if the informers have been started + started bool + + // startWait is a channel that is closed after the + // informer has been started. + startWait chan struct{} + + // waitGroup is the wait group that is used to wait for all informers to stop + waitGroup sync.WaitGroup + + // stopped is true if the informers have been stopped + stopped bool + + // ctx is the context to stop informers + ctx context.Context + + // namespace is the namespace that all ListWatches are restricted to + // default or empty string means all namespaces + namespace string + + byGVK map[schema.GroupVersionKind]InformersOptsByGVK +} + +func (ip *Informers) getSelector(gvk schema.GroupVersionKind) Selector { + if ip.byGVK == nil { + return Selector{} + } + if res, ok := ip.byGVK[gvk]; ok { + return res.Selector + } + if res, ok := ip.byGVK[schema.GroupVersionKind{}]; ok { + return res.Selector + } + return Selector{} +} + +func (ip *Informers) getTransform(gvk schema.GroupVersionKind) cache.TransformFunc { + if ip.byGVK == nil { + return nil + } + if res, ok := ip.byGVK[gvk]; ok { + return res.Transform + } + if res, ok := ip.byGVK[schema.GroupVersionKind{}]; ok { + return res.Transform + } + return nil +} + +func (ip *Informers) getDisableDeepCopy(gvk schema.GroupVersionKind) bool { + if ip.byGVK == nil { + return false + } + if res, ok := ip.byGVK[gvk]; ok && res.UnsafeDisableDeepCopy != nil { + return *res.UnsafeDisableDeepCopy + } + if res, ok := ip.byGVK[schema.GroupVersionKind{}]; ok && res.UnsafeDisableDeepCopy != nil { + return *res.UnsafeDisableDeepCopy + } + return false +} + +// Start calls Run on each of the informers and sets started to true. Blocks on the context. +// It doesn't return start because it can't return an error, and it's not a runnable directly. +func (ip *Informers) Start(ctx context.Context) error { + func() { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Set the context so it can be passed to informers that are added later + ip.ctx = ctx + + // Start each informer + for _, i := range ip.tracker.Structured { + ip.startInformerLocked(i.Informer) + } + for _, i := range ip.tracker.Unstructured { + ip.startInformerLocked(i.Informer) + } + for _, i := range ip.tracker.Metadata { + ip.startInformerLocked(i.Informer) + } + + // Set started to true so we immediately start any informers added later. + ip.started = true + close(ip.startWait) + }() + <-ctx.Done() // Block until the context is done + ip.mu.Lock() + ip.stopped = true // Set stopped to true so we don't start any new informers + ip.mu.Unlock() + ip.waitGroup.Wait() // Block until all informers have stopped + return nil +} + +func (ip *Informers) startInformerLocked(informer cache.SharedIndexInformer) { + // Don't start the informer in case we are already waiting for the items in + // the waitGroup to finish, since waitGroups don't support waiting and adding + // at the same time. + if ip.stopped { + return + } + + ip.waitGroup.Add(1) + go func() { + defer ip.waitGroup.Done() + informer.Run(ip.ctx.Done()) + }() +} + +func (ip *Informers) waitForStarted(ctx context.Context) bool { + select { + case <-ip.startWait: + return true + case <-ctx.Done(): + return false + } +} + +// getHasSyncedFuncs returns all the HasSynced functions for the informers in this map. +func (ip *Informers) getHasSyncedFuncs() []cache.InformerSynced { + ip.mu.RLock() + defer ip.mu.RUnlock() + + res := make([]cache.InformerSynced, 0, + len(ip.tracker.Structured)+len(ip.tracker.Unstructured)+len(ip.tracker.Metadata), + ) + for _, i := range ip.tracker.Structured { + res = append(res, i.Informer.HasSynced) + } + for _, i := range ip.tracker.Unstructured { + res = append(res, i.Informer.HasSynced) + } + for _, i := range ip.tracker.Metadata { + res = append(res, i.Informer.HasSynced) + } + return res +} + +// WaitForCacheSync waits until all the caches have been started and synced. +func (ip *Informers) WaitForCacheSync(ctx context.Context) bool { + if !ip.waitForStarted(ctx) { + return false + } + return cache.WaitForCacheSync(ctx.Done(), ip.getHasSyncedFuncs()...) +} + +func (ip *Informers) get(gvk schema.GroupVersionKind, obj runtime.Object) (res *Cache, started bool, ok bool) { + ip.mu.RLock() + defer ip.mu.RUnlock() + i, ok := ip.informersByType(obj)[gvk] + return i, ip.started, ok +} + +// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns +// the Informer from the map. +func (ip *Informers) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *Cache, error) { + // Return the informer if it is found + i, started, ok := ip.get(gvk, obj) + if !ok { + var err error + if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { + return started, nil, err + } + } + + if started && !i.Informer.HasSynced() { + // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. + if !cache.WaitForCacheSync(ctx.Done(), i.Informer.HasSynced) { + return started, nil, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0) + } + } + + return started, i, nil +} + +func (ip *Informers) informersByType(obj runtime.Object) map[schema.GroupVersionKind]*Cache { + switch obj.(type) { + case runtime.Unstructured: + return ip.tracker.Unstructured + case *metav1.PartialObjectMetadata, *metav1.PartialObjectMetadataList: + return ip.tracker.Metadata + default: + return ip.tracker.Structured + } +} + +func (ip *Informers) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*Cache, bool, error) { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Check the cache to see if we already have an Informer. If we do, return the Informer. + // This is for the case where 2 routines tried to get the informer when it wasn't in the map + // so neither returned early, but the first one created it. + if i, ok := ip.informersByType(obj)[gvk]; ok { + return i, ip.started, nil + } + + // Create a NewSharedIndexInformer and add it to the map. + listWatcher, err := ip.makeListWatcher(gvk, obj) + if err != nil { + return nil, false, err + } + sharedIndexInformer := cache.NewSharedIndexInformer(&cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.getSelector(gvk).ApplyToList(&opts) + return listWatcher.ListFunc(opts) + }, + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.getSelector(gvk).ApplyToList(&opts) + opts.Watch = true // Watch needs to be set to true separately + return listWatcher.WatchFunc(opts) + }, + }, obj, calculateResyncPeriod(ip.resync), cache.Indexers{ + cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, + }) + + // Check to see if there is a transformer for this gvk + if err := sharedIndexInformer.SetTransform(ip.getTransform(gvk)); err != nil { + return nil, false, err + } + + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, false, err + } + + // Create the new entry and set it in the map. + i := &Cache{ + Informer: sharedIndexInformer, + Reader: CacheReader{ + indexer: sharedIndexInformer.GetIndexer(), + groupVersionKind: gvk, + scopeName: mapping.Scope.Name(), + disableDeepCopy: ip.getDisableDeepCopy(gvk), + }, + } + ip.informersByType(obj)[gvk] = i + + // Start the informer in case the InformersMap has started, otherwise it will be + // started when the InformersMap starts. + if ip.started { + ip.startInformerLocked(i.Informer) + } + return i, ip.started, nil +} + +func (ip *Informers) makeListWatcher(gvk schema.GroupVersionKind, obj runtime.Object) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + // Figure out if the GVK we're dealing with is global, or namespace scoped. + var namespace string + if mapping.Scope.Name() == meta.RESTScopeNameNamespace { + namespace = restrictNamespaceBySelector(ip.namespace, ip.getSelector(gvk)) + } + + switch obj.(type) { + // + // Unstructured + // + case runtime.Unstructured: + // If the rest configuration has a negotiated serializer passed in, + // we should remove it and use the one that the dynamic client sets for us. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + dynamicClient, err := dynamic.NewForConfigAndClient(cfg, ip.httpClient) + if err != nil { + return nil, err + } + resources := dynamicClient.Resource(mapping.Resource) + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + if namespace != "" { + return resources.Namespace(namespace).List(ip.ctx, opts) + } + return resources.List(ip.ctx, opts) + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + if namespace != "" { + return resources.Namespace(namespace).Watch(ip.ctx, opts) + } + return resources.Watch(ip.ctx, opts) + }, + }, nil + // + // Metadata + // + case *metav1.PartialObjectMetadata, *metav1.PartialObjectMetadataList: + // Always clear the negotiated serializer and use the one + // set from the metadata client. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + + // Grab the metadata metadataClient. + metadataClient, err := metadata.NewForConfigAndClient(cfg, ip.httpClient) + if err != nil { + return nil, err + } + resources := metadataClient.Resource(mapping.Resource) + + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + var ( + list *metav1.PartialObjectMetadataList + err error + ) + if namespace != "" { + list, err = resources.Namespace(namespace).List(ip.ctx, opts) + } else { + list, err = resources.List(ip.ctx, opts) + } + if list != nil { + for i := range list.Items { + list.Items[i].SetGroupVersionKind(gvk) + } + } + return list, err + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watcher watch.Interface, err error) { + if namespace != "" { + watcher, err = resources.Namespace(namespace).Watch(ip.ctx, opts) + } else { + watcher, err = resources.Watch(ip.ctx, opts) + } + if err != nil { + return nil, err + } + return newGVKFixupWatcher(gvk, watcher), nil + }, + }, nil + // + // Structured. + // + default: + client, err := apiutil.RESTClientForGVK(gvk, false, ip.config, ip.codecs, ip.httpClient) + if err != nil { + return nil, err + } + listGVK := gvk.GroupVersion().WithKind(gvk.Kind + "List") + listObj, err := ip.scheme.New(listGVK) + if err != nil { + return nil, err + } + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + // Build the request. + req := client.Get().Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec) + if namespace != "" { + req.Namespace(namespace) + } + + // Create the resulting object, and execute the request. + res := listObj.DeepCopyObject() + if err := req.Do(ip.ctx).Into(res); err != nil { + return nil, err + } + return res, nil + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + // Build the request. + req := client.Get().Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec) + if namespace != "" { + req.Namespace(namespace) + } + // Call the watch. + return req.Watch(ip.ctx) + }, + }, nil + } +} + +// newGVKFixupWatcher adds a wrapper that preserves the GVK information when +// events come in. +// +// This works around a bug where GVK information is not passed into mapping +// functions when using the OnlyMetadata option in the builder. +// This issue is most likely caused by kubernetes/kubernetes#80609. +// See kubernetes-sigs/controller-runtime#1484. +// +// This was originally implemented as a cache.ResourceEventHandler wrapper but +// that contained a data race which was resolved by setting the GVK in a watch +// wrapper, before the objects are written to the cache. +// See kubernetes-sigs/controller-runtime#1650. +// +// The original watch wrapper was found to be incompatible with +// k8s.io/client-go/tools/cache.Reflector so it has been re-implemented as a +// watch.Filter which is compatible. +// See kubernetes-sigs/controller-runtime#1789. +func newGVKFixupWatcher(gvk schema.GroupVersionKind, watcher watch.Interface) watch.Interface { + return watch.Filter( + watcher, + func(in watch.Event) (watch.Event, bool) { + in.Object.GetObjectKind().SetGroupVersionKind(gvk) + return in, true + }, + ) +} + +// calculateResyncPeriod returns a duration based on the desired input +// this is so that multiple controllers don't get into lock-step and all +// hammer the apiserver with list requests simultaneously. +func calculateResyncPeriod(resync time.Duration) time.Duration { + // the factor will fall into [0.9, 1.1) + factor := rand.Float64()/5.0 + 0.9 //nolint:gosec + return time.Duration(float64(resync.Nanoseconds()) * factor) +} + +// restrictNamespaceBySelector returns either a global restriction for all ListWatches +// if not default/empty, or the namespace that a ListWatch for the specific resource +// is restricted to, based on a specified field selector for metadata.namespace field. +func restrictNamespaceBySelector(namespaceOpt string, s Selector) string { + if namespaceOpt != "" { + // namespace is already restricted + return namespaceOpt + } + fieldSelector := s.Field + if fieldSelector == nil || fieldSelector.Empty() { + return "" + } + // check whether a selector includes the namespace field + value, found := fieldSelector.RequiresExactMatch("metadata.namespace") + if found { + return value + } + return "" +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go deleted file mode 100644 index 1524d2316..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go +++ /dev/null @@ -1,480 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internal - -import ( - "context" - "fmt" - "math/rand" - "sync" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/metadata" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -// clientListWatcherFunc knows how to create a ListWatcher. -type createListWatcherFunc func(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) - -// newSpecificInformersMap returns a new specificInformersMap (like -// the generical InformersMap, except that it doesn't implement WaitForCacheSync). -func newSpecificInformersMap(config *rest.Config, - scheme *runtime.Scheme, - mapper meta.RESTMapper, - resync time.Duration, - namespace string, - selectors SelectorsByGVK, - disableDeepCopy DisableDeepCopyByGVK, - transformers TransformFuncByObject, - createListWatcher createListWatcherFunc, -) *specificInformersMap { - ip := &specificInformersMap{ - config: config, - Scheme: scheme, - mapper: mapper, - informersByGVK: make(map[schema.GroupVersionKind]*MapEntry), - codecs: serializer.NewCodecFactory(scheme), - paramCodec: runtime.NewParameterCodec(scheme), - resync: resync, - startWait: make(chan struct{}), - createListWatcher: createListWatcher, - namespace: namespace, - selectors: selectors.forGVK, - disableDeepCopy: disableDeepCopy, - transformers: transformers, - } - return ip -} - -// MapEntry contains the cached data for an Informer. -type MapEntry struct { - // Informer is the cached informer - Informer cache.SharedIndexInformer - - // CacheReader wraps Informer and implements the CacheReader interface for a single type - Reader CacheReader -} - -// specificInformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. -// It uses a standard parameter codec constructed based on the given generated Scheme. -type specificInformersMap struct { - // Scheme maps runtime.Objects to GroupVersionKinds - Scheme *runtime.Scheme - - // config is used to talk to the apiserver - config *rest.Config - - // mapper maps GroupVersionKinds to Resources - mapper meta.RESTMapper - - // informersByGVK is the cache of informers keyed by groupVersionKind - informersByGVK map[schema.GroupVersionKind]*MapEntry - - // codecs is used to create a new REST client - codecs serializer.CodecFactory - - // paramCodec is used by list and watch - paramCodec runtime.ParameterCodec - - // stop is the stop channel to stop informers - stop <-chan struct{} - - // resync is the base frequency the informers are resynced - // a 10 percent jitter will be added to the resync period between informers - // so that all informers will not send list requests simultaneously. - resync time.Duration - - // mu guards access to the map - mu sync.RWMutex - - // start is true if the informers have been started - started bool - - // startWait is a channel that is closed after the - // informer has been started. - startWait chan struct{} - - // createClient knows how to create a client and a list object, - // and allows for abstracting over the particulars of structured vs - // unstructured objects. - createListWatcher createListWatcherFunc - - // namespace is the namespace that all ListWatches are restricted to - // default or empty string means all namespaces - namespace string - - // selectors are the label or field selectors that will be added to the - // ListWatch ListOptions. - selectors func(gvk schema.GroupVersionKind) Selector - - // disableDeepCopy indicates not to deep copy objects during get or list objects. - disableDeepCopy DisableDeepCopyByGVK - - // transform funcs are applied to objects before they are committed to the cache - transformers TransformFuncByObject -} - -// Start calls Run on each of the informers and sets started to true. Blocks on the context. -// It doesn't return start because it can't return an error, and it's not a runnable directly. -func (ip *specificInformersMap) Start(ctx context.Context) { - func() { - ip.mu.Lock() - defer ip.mu.Unlock() - - // Set the stop channel so it can be passed to informers that are added later - ip.stop = ctx.Done() - - // Start each informer - for _, informer := range ip.informersByGVK { - go informer.Informer.Run(ctx.Done()) - } - - // Set started to true so we immediately start any informers added later. - ip.started = true - close(ip.startWait) - }() - <-ctx.Done() -} - -func (ip *specificInformersMap) waitForStarted(ctx context.Context) bool { - select { - case <-ip.startWait: - return true - case <-ctx.Done(): - return false - } -} - -// HasSyncedFuncs returns all the HasSynced functions for the informers in this map. -func (ip *specificInformersMap) HasSyncedFuncs() []cache.InformerSynced { - ip.mu.RLock() - defer ip.mu.RUnlock() - syncedFuncs := make([]cache.InformerSynced, 0, len(ip.informersByGVK)) - for _, informer := range ip.informersByGVK { - syncedFuncs = append(syncedFuncs, informer.Informer.HasSynced) - } - return syncedFuncs -} - -// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns -// the Informer from the map. -func (ip *specificInformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { - // Return the informer if it is found - i, started, ok := func() (*MapEntry, bool, bool) { - ip.mu.RLock() - defer ip.mu.RUnlock() - i, ok := ip.informersByGVK[gvk] - return i, ip.started, ok - }() - - if !ok { - var err error - if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { - return started, nil, err - } - } - - if started && !i.Informer.HasSynced() { - // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. - if !cache.WaitForCacheSync(ctx.Done(), i.Informer.HasSynced) { - return started, nil, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0) - } - } - - return started, i, nil -} - -func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, bool, error) { - ip.mu.Lock() - defer ip.mu.Unlock() - - // Check the cache to see if we already have an Informer. If we do, return the Informer. - // This is for the case where 2 routines tried to get the informer when it wasn't in the map - // so neither returned early, but the first one created it. - if i, ok := ip.informersByGVK[gvk]; ok { - return i, ip.started, nil - } - - // Create a NewSharedIndexInformer and add it to the map. - var lw *cache.ListWatch - lw, err := ip.createListWatcher(gvk, ip) - if err != nil { - return nil, false, err - } - ni := cache.NewSharedIndexInformer(lw, obj, resyncPeriod(ip.resync)(), cache.Indexers{ - cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, - }) - - // Check to see if there is a transformer for this gvk - if err := ni.SetTransform(ip.transformers.Get(gvk)); err != nil { - return nil, false, err - } - - rm, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, false, err - } - - i := &MapEntry{ - Informer: ni, - Reader: CacheReader{ - indexer: ni.GetIndexer(), - groupVersionKind: gvk, - scopeName: rm.Scope.Name(), - disableDeepCopy: ip.disableDeepCopy.IsDisabled(gvk), - }, - } - ip.informersByGVK[gvk] = i - - // Start the Informer if need by - // TODO(seans): write thorough tests and document what happens here - can you add indexers? - // can you add eventhandlers? - if ip.started { - go i.Informer.Run(ip.stop) - } - return i, ip.started, nil -} - -// newListWatch returns a new ListWatch object that can be used to create a SharedIndexInformer. -func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { - // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the - // groupVersionKind to the Resource API we will use. - mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, err - } - - client, err := apiutil.RESTClientForGVK(gvk, false, ip.config, ip.codecs) - if err != nil { - return nil, err - } - listGVK := gvk.GroupVersion().WithKind(gvk.Kind + "List") - listObj, err := ip.Scheme.New(listGVK) - if err != nil { - return nil, err - } - - // TODO: the functions that make use of this ListWatch should be adapted to - // pass in their own contexts instead of relying on this fixed one here. - ctx := context.TODO() - // Create a new ListWatch for the obj - return &cache.ListWatch{ - ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { - ip.selectors(gvk).ApplyToList(&opts) - res := listObj.DeepCopyObject() - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot - err := client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res) - return res, err - }, - // Setup the watch function - WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { - ip.selectors(gvk).ApplyToList(&opts) - // Watch needs to be set to true separately - opts.Watch = true - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot - return client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Watch(ctx) - }, - }, nil -} - -func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { - // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the - // groupVersionKind to the Resource API we will use. - mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, err - } - - // If the rest configuration has a negotiated serializer passed in, - // we should remove it and use the one that the dynamic client sets for us. - cfg := rest.CopyConfig(ip.config) - cfg.NegotiatedSerializer = nil - dynamicClient, err := dynamic.NewForConfig(cfg) - if err != nil { - return nil, err - } - - // TODO: the functions that make use of this ListWatch should be adapted to - // pass in their own contexts instead of relying on this fixed one here. - ctx := context.TODO() - // Create a new ListWatch for the obj - return &cache.ListWatch{ - ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { - ip.selectors(gvk).ApplyToList(&opts) - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return dynamicClient.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) - } - return dynamicClient.Resource(mapping.Resource).List(ctx, opts) - }, - // Setup the watch function - WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { - ip.selectors(gvk).ApplyToList(&opts) - // Watch needs to be set to true separately - opts.Watch = true - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return dynamicClient.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) - } - return dynamicClient.Resource(mapping.Resource).Watch(ctx, opts) - }, - }, nil -} - -func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { - // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the - // groupVersionKind to the Resource API we will use. - mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, err - } - - // Always clear the negotiated serializer and use the one - // set from the metadata client. - cfg := rest.CopyConfig(ip.config) - cfg.NegotiatedSerializer = nil - - // grab the metadata client - client, err := metadata.NewForConfig(cfg) - if err != nil { - return nil, err - } - - // TODO: the functions that make use of this ListWatch should be adapted to - // pass in their own contexts instead of relying on this fixed one here. - ctx := context.TODO() - - // create the relevant listwatch - return &cache.ListWatch{ - ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { - ip.selectors(gvk).ApplyToList(&opts) - - var ( - list *metav1.PartialObjectMetadataList - err error - ) - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - list, err = client.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) - } else { - list, err = client.Resource(mapping.Resource).List(ctx, opts) - } - if list != nil { - for i := range list.Items { - list.Items[i].SetGroupVersionKind(gvk) - } - } - return list, err - }, - // Setup the watch function - WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { - ip.selectors(gvk).ApplyToList(&opts) - // Watch needs to be set to true separately - opts.Watch = true - - var ( - watcher watch.Interface - err error - ) - namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) - if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - watcher, err = client.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) - } else { - watcher, err = client.Resource(mapping.Resource).Watch(ctx, opts) - } - if watcher != nil { - watcher = newGVKFixupWatcher(gvk, watcher) - } - return watcher, err - }, - }, nil -} - -// newGVKFixupWatcher adds a wrapper that preserves the GVK information when -// events come in. -// -// This works around a bug where GVK information is not passed into mapping -// functions when using the OnlyMetadata option in the builder. -// This issue is most likely caused by kubernetes/kubernetes#80609. -// See kubernetes-sigs/controller-runtime#1484. -// -// This was originally implemented as a cache.ResourceEventHandler wrapper but -// that contained a data race which was resolved by setting the GVK in a watch -// wrapper, before the objects are written to the cache. -// See kubernetes-sigs/controller-runtime#1650. -// -// The original watch wrapper was found to be incompatible with -// k8s.io/client-go/tools/cache.Reflector so it has been re-implemented as a -// watch.Filter which is compatible. -// See kubernetes-sigs/controller-runtime#1789. -func newGVKFixupWatcher(gvk schema.GroupVersionKind, watcher watch.Interface) watch.Interface { - return watch.Filter( - watcher, - func(in watch.Event) (watch.Event, bool) { - in.Object.GetObjectKind().SetGroupVersionKind(gvk) - return in, true - }, - ) -} - -// resyncPeriod returns a function which generates a duration each time it is -// invoked; this is so that multiple controllers don't get into lock-step and all -// hammer the apiserver with list requests simultaneously. -func resyncPeriod(resync time.Duration) func() time.Duration { - return func() time.Duration { - // the factor will fall into [0.9, 1.1) - factor := rand.Float64()/5.0 + 0.9 //nolint:gosec - return time.Duration(float64(resync.Nanoseconds()) * factor) - } -} - -// restrictNamespaceBySelector returns either a global restriction for all ListWatches -// if not default/empty, or the namespace that a ListWatch for the specific resource -// is restricted to, based on a specified field selector for metadata.namespace field. -func restrictNamespaceBySelector(namespaceOpt string, s Selector) string { - if namespaceOpt != "" { - // namespace is already restricted - return namespaceOpt - } - fieldSelector := s.Field - if fieldSelector == nil || fieldSelector.Empty() { - return "" - } - // check whether a selector includes the namespace field - value, found := fieldSelector.RequiresExactMatch("metadata.namespace") - if found { - return value - } - return "" -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go index 4eff32fb3..c674379b9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go @@ -20,23 +20,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" ) -// SelectorsByGVK associate a GroupVersionKind to a field/label selector. -type SelectorsByGVK map[schema.GroupVersionKind]Selector - -func (s SelectorsByGVK) forGVK(gvk schema.GroupVersionKind) Selector { - if specific, found := s[gvk]; found { - return specific - } - if defaultSelector, found := s[schema.GroupVersionKind{}]; found { - return defaultSelector - } - - return Selector{} -} - // Selector specify the label/field selector to fill in ListOptions. type Selector struct { Label labels.Selector diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go index f69e02262..0725f550c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go @@ -8,9 +8,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// TransformFuncByObject provides access to the correct transform function for +// TransformFuncByGVK provides access to the correct transform function for // any given GVK. -type TransformFuncByObject interface { +type TransformFuncByGVK interface { Set(runtime.Object, *runtime.Scheme, cache.TransformFunc) error Get(schema.GroupVersionKind) cache.TransformFunc SetDefault(transformer cache.TransformFunc) @@ -21,9 +21,9 @@ type transformFuncByGVK struct { transformers map[schema.GroupVersionKind]cache.TransformFunc } -// TransformFuncByObjectFromMap creates a TransformFuncByObject from a map that +// TransformFuncByGVKFromMap creates a TransformFuncByGVK from a map that // maps GVKs to TransformFuncs. -func TransformFuncByObjectFromMap(in map[schema.GroupVersionKind]cache.TransformFunc) TransformFuncByObject { +func TransformFuncByGVKFromMap(in map[schema.GroupVersionKind]cache.TransformFunc) TransformFuncByGVK { byGVK := &transformFuncByGVK{} if defaultFunc, hasDefault := in[schema.GroupVersionKind{}]; hasDefault { byGVK.defaultTransform = defaultFunc diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go index fccb36471..ac97beae9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go @@ -28,12 +28,9 @@ import ( "k8s.io/client-go/rest" toolscache "k8s.io/client-go/tools/cache" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// NewCacheFunc - Function for creating a new cache from the options and a rest config. -type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) - // a new global namespaced cache to handle cluster scoped resources. const globalCache = "_cluster-scope" @@ -43,31 +40,43 @@ const globalCache = "_cluster-scope" // a global cache for cluster scoped resource. Note that this is not intended // to be used for excluding namespaces, this is better done via a Predicate. Also note that // you may face performance issues when using this with a high number of namespaces. +// +// Deprecated: Use cache.Options.Namespaces instead. func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { return func(config *rest.Config, opts Options) (Cache, error) { - opts, err := defaultOpts(config, opts) + opts.Namespaces = namespaces + return newMultiNamespaceCache(config, opts) + } +} + +func newMultiNamespaceCache(config *rest.Config, opts Options) (Cache, error) { + if len(opts.Namespaces) < 2 { + return nil, fmt.Errorf("must specify more than one namespace to use multi-namespace cache") + } + opts, err := defaultOpts(config, opts) + if err != nil { + return nil, err + } + + // Create every namespace cache. + caches := map[string]Cache{} + for _, ns := range opts.Namespaces { + opts.Namespaces = []string{ns} + c, err := New(config, opts) if err != nil { return nil, err } - - caches := map[string]Cache{} - - // create a cache for cluster scoped resources - gCache, err := New(config, opts) - if err != nil { - return nil, fmt.Errorf("error creating global cache: %w", err) - } - - for _, ns := range namespaces { - opts.Namespace = ns - c, err := New(config, opts) - if err != nil { - return nil, err - } - caches[ns] = c - } - return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper, clusterCache: gCache}, nil + caches[ns] = c } + + // Create a cache for cluster scoped resources. + opts.Namespaces = []string{} + gCache, err := New(config, opts) + if err != nil { + return nil, fmt.Errorf("error creating global cache: %w", err) + } + + return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper, clusterCache: gCache}, nil } // multiNamespaceCache knows how to handle multiple namespaced caches @@ -89,7 +98,7 @@ func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object // If the object is clusterscoped, get the informer from clusterCache, // if not use the namespaced caches. - isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) if err != nil { return nil, err } @@ -119,7 +128,7 @@ func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema // If the object is clusterscoped, get the informer from clusterCache, // if not use the namespaced caches. - isNamespaced, err := objectutil.IsAPINamespacedWithGVK(gvk, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsGVKNamespaced(gvk, c.RESTMapper) if err != nil { return nil, err } @@ -183,9 +192,9 @@ func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool { } func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { - isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) if err != nil { - return nil //nolint:nilerr + return err } if !isNamespaced { @@ -201,7 +210,7 @@ func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, } func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { - isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) if err != nil { return err } @@ -223,7 +232,7 @@ func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, listOpts := client.ListOptions{} listOpts.ApplyOptions(opts) - isNamespaced, err := objectutil.IsAPINamespaced(list, c.Scheme, c.RESTMapper) + isNamespaced, err := apiutil.IsObjectNamespaced(list, c.Scheme, c.RESTMapper) if err != nil { return err } @@ -293,42 +302,63 @@ type multiNamespaceInformer struct { namespaceToInformer map[string]Informer } +type handlerRegistration struct { + handles map[string]toolscache.ResourceEventHandlerRegistration +} + +type syncer interface { + HasSynced() bool +} + +// HasSynced asserts that the handler has been called for the full initial state of the informer. +// This uses syncer to be compatible between client-go 1.27+ and older versions when the interface changed. +func (h handlerRegistration) HasSynced() bool { + for _, reg := range h.handles { + if s, ok := reg.(syncer); ok { + if !s.HasSynced() { + return false + } + } + } + return true +} + var _ Informer = &multiNamespaceInformer{} // AddEventHandler adds the handler to each namespaced informer. func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) (toolscache.ResourceEventHandlerRegistration, error) { - handles := make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer)) + handles := handlerRegistration{handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer))} for ns, informer := range i.namespaceToInformer { registration, err := informer.AddEventHandler(handler) if err != nil { return nil, err } - handles[ns] = registration + handles.handles[ns] = registration } return handles, nil } // AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer. func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) (toolscache.ResourceEventHandlerRegistration, error) { - handles := make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer)) + handles := handlerRegistration{handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer))} for ns, informer := range i.namespaceToInformer { registration, err := informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod) if err != nil { return nil, err } - handles[ns] = registration + handles.handles[ns] = registration } return handles, nil } // RemoveEventHandler removes a formerly added event handler given by its registration handle. func (i *multiNamespaceInformer) RemoveEventHandler(h toolscache.ResourceEventHandlerRegistration) error { - handles, ok := h.(map[string]toolscache.ResourceEventHandlerRegistration) + handles, ok := h.(handlerRegistration) if !ok { return fmt.Errorf("it is not the registration returned by multiNamespaceInformer") } for ns, informer := range i.namespaceToInformer { - registration, ok := handles[ns] + registration, ok := handles.handles[ns] if !ok { continue } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go index 1030013db..2b9b60d8d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go @@ -19,9 +19,14 @@ package certwatcher import ( "context" "crypto/tls" + "fmt" "sync" + "time" "github.com/fsnotify/fsnotify" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/certwatcher/metrics" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" ) @@ -39,6 +44,9 @@ type CertWatcher struct { certPath string keyPath string + + // callback is a function to be invoked when the certificate changes. + callback func(tls.Certificate) } // New returns a new CertWatcher watching the given certificate and key. @@ -63,6 +71,17 @@ func New(certPath, keyPath string) (*CertWatcher, error) { return cw, nil } +// RegisterCallback registers a callback to be invoked when the certificate changes. +func (cw *CertWatcher) RegisterCallback(callback func(tls.Certificate)) { + cw.Lock() + defer cw.Unlock() + // If the current certificate is not nil, invoke the callback immediately. + if cw.currentCert != nil { + callback(*cw.currentCert) + } + cw.callback = callback +} + // GetCertificate fetches the currently loaded certificate, which may be nil. func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { cw.RLock() @@ -72,11 +91,22 @@ func (cw *CertWatcher) GetCertificate(_ *tls.ClientHelloInfo) (*tls.Certificate, // Start starts the watch on the certificate and key files. func (cw *CertWatcher) Start(ctx context.Context) error { - files := []string{cw.certPath, cw.keyPath} + files := sets.New(cw.certPath, cw.keyPath) - for _, f := range files { - if err := cw.watcher.Add(f); err != nil { - return err + { + var watchErr error + if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 10*time.Second, true, func(ctx context.Context) (done bool, err error) { + for _, f := range files.UnsortedList() { + if err := cw.watcher.Add(f); err != nil { + watchErr = err + return false, nil //nolint:nilerr // We want to keep trying. + } + // We've added the watch, remove it from the set. + files.Delete(f) + } + return true, nil + }); err != nil { + return fmt.Errorf("failed to add watches: %w", kerrors.NewAggregate([]error{err, watchErr})) } } @@ -130,6 +160,14 @@ func (cw *CertWatcher) ReadCertificate() error { log.Info("Updated current TLS certificate") + // If a callback is registered, invoke it with the new certificate. + cw.RLock() + defer cw.RUnlock() + if cw.callback != nil { + go func() { + cw.callback(cert) + }() + } return nil } @@ -154,13 +192,13 @@ func (cw *CertWatcher) handleEvent(event fsnotify.Event) { } func isWrite(event fsnotify.Event) bool { - return event.Op&fsnotify.Write == fsnotify.Write + return event.Op.Has(fsnotify.Write) } func isCreate(event fsnotify.Event) bool { - return event.Op&fsnotify.Create == fsnotify.Create + return event.Op.Has(fsnotify.Create) } func isRemove(event fsnotify.Event) bool { - return event.Op&fsnotify.Remove == fsnotify.Remove + return event.Op.Has(fsnotify.Remove) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go index 8e2ac48fa..6a1bfb546 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -20,7 +20,9 @@ limitations under the License. package apiutil import ( + "errors" "fmt" + "net/http" "reflect" "sync" @@ -30,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" @@ -59,9 +62,13 @@ func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error { // NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery // information fetched by a new client with the given config. -func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { +func NewDiscoveryRESTMapper(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + // Get a mapper - dc, err := discovery.NewDiscoveryClientForConfig(c) + dc, err := discovery.NewDiscoveryClientForConfigAndClient(c, httpClient) if err != nil { return nil, err } @@ -72,6 +79,36 @@ func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { return restmapper.NewDiscoveryRESTMapper(gr), nil } +// IsObjectNamespaced returns true if the object is namespace scoped. +// For unstructured objects the gvk is found from the object itself. +func IsObjectNamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper meta.RESTMapper) (bool, error) { + gvk, err := GVKForObject(obj, scheme) + if err != nil { + return false, err + } + + return IsGVKNamespaced(gvk, restmapper) +} + +// IsGVKNamespaced returns true if the object having the provided +// GVK is namespace scoped. +func IsGVKNamespaced(gvk schema.GroupVersionKind, restmapper meta.RESTMapper) (bool, error) { + restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}) + if err != nil { + return false, fmt.Errorf("failed to get restmapping: %w", err) + } + + scope := restmapping.Scope.Name() + if scope == "" { + return false, errors.New("scope cannot be identified, empty scope returned") + } + + if scope != meta.RESTScopeNameRoot { + return true, nil + } + return false, nil +} + // GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { // TODO(directxman12): do we want to generalize this to arbitrary container types? @@ -142,21 +179,11 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi // RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated // with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from // baseConfig, if set, otherwise a default serializer will be set. -func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { - return rest.RESTClientFor(createRestConfig(gvk, isUnstructured, baseConfig, codecs)) -} - -// serializerWithDecodedGVK is a CodecFactory that overrides the DecoderToVersion of a WithoutConversionCodecFactory -// in order to avoid clearing the GVK from the decoded object. -// -// See https://github.com/kubernetes/kubernetes/issues/80609. -type serializerWithDecodedGVK struct { - serializer.WithoutConversionCodecFactory -} - -// DecoderToVersion returns an decoder that does not do conversion. -func (f serializerWithDecodedGVK) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { - return serializer +func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory, httpClient *http.Client) (rest.Interface, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } + return rest.RESTClientForConfigAndClient(createRestConfig(gvk, isUnstructured, baseConfig, codecs), httpClient) } // createRestConfig copies the base config and updates needed fields for a new rest config. @@ -183,9 +210,8 @@ func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConf } if isUnstructured { - // If the object is unstructured, we need to preserve the GVK information. - // Use our own custom serializer. - cfg.NegotiatedSerializer = serializerWithDecodedGVK{serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + // If the object is unstructured, we use the client-go dynamic serializer. + cfg = dynamic.ConfigFor(cfg) } else { cfg.NegotiatedSerializer = serializerWithTargetZeroingDecode{NegotiatedSerializer: serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go deleted file mode 100644 index 6b9dcf68a..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go +++ /dev/null @@ -1,301 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apiutil - -import ( - "sync" - "sync/atomic" - - "golang.org/x/time/rate" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/discovery" - "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" -) - -// dynamicRESTMapper is a RESTMapper that dynamically discovers resource -// types at runtime. -type dynamicRESTMapper struct { - mu sync.RWMutex // protects the following fields - staticMapper meta.RESTMapper - limiter *rate.Limiter - newMapper func() (meta.RESTMapper, error) - - lazy bool - // Used for lazy init. - inited uint32 - initMtx sync.Mutex - - useLazyRestmapper bool -} - -// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper. -type DynamicRESTMapperOption func(*dynamicRESTMapper) error - -// WithLimiter sets the RESTMapper's underlying limiter to lim. -func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption { - return func(drm *dynamicRESTMapper) error { - drm.limiter = lim - return nil - } -} - -// WithLazyDiscovery prevents the RESTMapper from discovering REST mappings -// until an API call is made. -var WithLazyDiscovery DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { - drm.lazy = true - return nil -} - -// WithExperimentalLazyMapper enables experimental more advanced Lazy Restmapping mechanism. -var WithExperimentalLazyMapper DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { - drm.useLazyRestmapper = true - return nil -} - -// WithCustomMapper supports setting a custom RESTMapper refresher instead of -// the default method, which uses a discovery client. -// -// This exists mainly for testing, but can be useful if you need tighter control -// over how discovery is performed, which discovery endpoints are queried, etc. -func WithCustomMapper(newMapper func() (meta.RESTMapper, error)) DynamicRESTMapperOption { - return func(drm *dynamicRESTMapper) error { - drm.newMapper = newMapper - return nil - } -} - -// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic -// RESTMapper dynamically discovers resource types at runtime. opts -// configure the RESTMapper. -func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (meta.RESTMapper, error) { - client, err := discovery.NewDiscoveryClientForConfig(cfg) - if err != nil { - return nil, err - } - drm := &dynamicRESTMapper{ - limiter: rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize), - newMapper: func() (meta.RESTMapper, error) { - groupResources, err := restmapper.GetAPIGroupResources(client) - if err != nil { - return nil, err - } - return restmapper.NewDiscoveryRESTMapper(groupResources), nil - }, - } - for _, opt := range opts { - if err = opt(drm); err != nil { - return nil, err - } - } - if drm.useLazyRestmapper { - return newLazyRESTMapperWithClient(client) - } - if !drm.lazy { - if err := drm.setStaticMapper(); err != nil { - return nil, err - } - } - return drm, nil -} - -var ( - // defaultRefilRate is the default rate at which potential calls are - // added back to the "bucket" of allowed calls. - defaultRefillRate = 5 - // defaultLimitSize is the default starting/max number of potential calls - // per second. Once a call is used, it's added back to the bucket at a rate - // of defaultRefillRate per second. - defaultLimitSize = 5 -) - -// setStaticMapper sets drm's staticMapper by querying its client, regardless -// of reload backoff. -func (drm *dynamicRESTMapper) setStaticMapper() error { - newMapper, err := drm.newMapper() - if err != nil { - return err - } - drm.staticMapper = newMapper - return nil -} - -// init initializes drm only once if drm is lazy. -func (drm *dynamicRESTMapper) init() (err error) { - // skip init if drm is not lazy or has initialized - if !drm.lazy || atomic.LoadUint32(&drm.inited) != 0 { - return nil - } - - drm.initMtx.Lock() - defer drm.initMtx.Unlock() - if drm.inited == 0 { - if err = drm.setStaticMapper(); err == nil { - atomic.StoreUint32(&drm.inited, 1) - } - } - return err -} - -// checkAndReload attempts to call the given callback, which is assumed to be dependent -// on the data in the restmapper. -// -// If the callback returns an error matching meta.IsNoMatchErr, it will attempt to reload -// the RESTMapper's data and re-call the callback once that's occurred. -// If the callback returns any other error, the function will return immediately regardless. -// -// It will take care of ensuring that reloads are rate-limited and that extraneous calls -// aren't made. If a reload would exceed the limiters rate, it returns the error return by -// the callback. -// It's thread-safe, and worries about thread-safety for the callback (so the callback does -// not need to attempt to lock the restmapper). -func (drm *dynamicRESTMapper) checkAndReload(checkNeedsReload func() error) error { - // first, check the common path -- data is fresh enough - // (use an IIFE for the lock's defer) - err := func() error { - drm.mu.RLock() - defer drm.mu.RUnlock() - - return checkNeedsReload() - }() - - needsReload := meta.IsNoMatchError(err) - if !needsReload { - return err - } - - // if the data wasn't fresh, we'll need to try and update it, so grab the lock... - drm.mu.Lock() - defer drm.mu.Unlock() - - // ... and double-check that we didn't reload in the meantime - err = checkNeedsReload() - needsReload = meta.IsNoMatchError(err) - if !needsReload { - return err - } - - // we're still stale, so grab a rate-limit token if we can... - if !drm.limiter.Allow() { - // return error from static mapper here, we have refreshed often enough (exceeding rate of provided limiter) - // so that client's can handle this the same way as a "normal" NoResourceMatchError / NoKindMatchError - return err - } - - // ...reload... - if err := drm.setStaticMapper(); err != nil { - return err - } - - // ...and return the results of the closure regardless - return checkNeedsReload() -} - -// TODO: wrap reload errors on NoKindMatchError with go 1.13 errors. - -func (drm *dynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - if err := drm.init(); err != nil { - return schema.GroupVersionKind{}, err - } - var gvk schema.GroupVersionKind - err := drm.checkAndReload(func() error { - var err error - gvk, err = drm.staticMapper.KindFor(resource) - return err - }) - return gvk, err -} - -func (drm *dynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - if err := drm.init(); err != nil { - return nil, err - } - var gvks []schema.GroupVersionKind - err := drm.checkAndReload(func() error { - var err error - gvks, err = drm.staticMapper.KindsFor(resource) - return err - }) - return gvks, err -} - -func (drm *dynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { - if err := drm.init(); err != nil { - return schema.GroupVersionResource{}, err - } - - var gvr schema.GroupVersionResource - err := drm.checkAndReload(func() error { - var err error - gvr, err = drm.staticMapper.ResourceFor(input) - return err - }) - return gvr, err -} - -func (drm *dynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - if err := drm.init(); err != nil { - return nil, err - } - var gvrs []schema.GroupVersionResource - err := drm.checkAndReload(func() error { - var err error - gvrs, err = drm.staticMapper.ResourcesFor(input) - return err - }) - return gvrs, err -} - -func (drm *dynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - if err := drm.init(); err != nil { - return nil, err - } - var mapping *meta.RESTMapping - err := drm.checkAndReload(func() error { - var err error - mapping, err = drm.staticMapper.RESTMapping(gk, versions...) - return err - }) - return mapping, err -} - -func (drm *dynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { - if err := drm.init(); err != nil { - return nil, err - } - var mappings []*meta.RESTMapping - err := drm.checkAndReload(func() error { - var err error - mappings, err = drm.staticMapper.RESTMappings(gk, versions...) - return err - }) - return mappings, err -} - -func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, error) { - if err := drm.init(); err != nil { - return "", err - } - var singular string - err := drm.checkAndReload(func() error { - var err error - singular, err = drm.staticMapper.ResourceSingularizer(resource) - return err - }) - return singular, err -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/lazyrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go similarity index 57% rename from vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/lazyrestmapper.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go index e9b1e710c..e0ff72dc1 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/lazyrestmapper.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/restmapper.go @@ -18,137 +18,151 @@ package apiutil import ( "fmt" + "net/http" "sync" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" "k8s.io/client-go/restmapper" ) -// lazyRESTMapper is a RESTMapper that will lazily query the provided -// client for discovery information to do REST mappings. -type lazyRESTMapper struct { - mapper meta.RESTMapper - client *discovery.DiscoveryClient - knownGroups map[string]*restmapper.APIGroupResources - apiGroups []metav1.APIGroup +// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. +func NewDynamicRESTMapper(cfg *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { + if httpClient == nil { + return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client") + } - // mutex to provide thread-safe mapper reloading. - mu sync.Mutex -} - -// newLazyRESTMapperWithClient initializes a LazyRESTMapper with a custom discovery client. -func newLazyRESTMapperWithClient(discoveryClient *discovery.DiscoveryClient) (meta.RESTMapper, error) { - return &lazyRESTMapper{ + client, err := discovery.NewDiscoveryClientForConfigAndClient(cfg, httpClient) + if err != nil { + return nil, err + } + return &mapper{ mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), - client: discoveryClient, + client: client, knownGroups: map[string]*restmapper.APIGroupResources{}, - apiGroups: []metav1.APIGroup{}, + apiGroups: map[string]*metav1.APIGroup{}, }, nil } -// KindFor implements Mapper.KindFor. -func (m *lazyRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - res, err := m.mapper.KindFor(resource) - if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { - return res, err - } +// mapper is a RESTMapper that will lazily query the provided +// client for discovery information to do REST mappings. +type mapper struct { + mapper meta.RESTMapper + client *discovery.DiscoveryClient + knownGroups map[string]*restmapper.APIGroupResources + apiGroups map[string]*metav1.APIGroup - res, err = m.mapper.KindFor(resource) + // mutex to provide thread-safe mapper reloading. + mu sync.RWMutex +} + +// KindFor implements Mapper.KindFor. +func (m *mapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + res, err := m.getMapper().KindFor(resource) + if meta.IsNoMatchError(err) { + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return schema.GroupVersionKind{}, err + } + res, err = m.getMapper().KindFor(resource) } return res, err } // KindsFor implements Mapper.KindsFor. -func (m *lazyRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - res, err := m.mapper.KindsFor(resource) +func (m *mapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + res, err := m.getMapper().KindsFor(resource) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { - return res, err + if err := m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return nil, err } - - res, err = m.mapper.KindsFor(resource) + res, err = m.getMapper().KindsFor(resource) } return res, err } // ResourceFor implements Mapper.ResourceFor. -func (m *lazyRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { - res, err := m.mapper.ResourceFor(input) +func (m *mapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourceFor(input) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(input.Group, input.Version); err != nil { - return res, err + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return schema.GroupVersionResource{}, err } - - res, err = m.mapper.ResourceFor(input) + res, err = m.getMapper().ResourceFor(input) } return res, err } // ResourcesFor implements Mapper.ResourcesFor. -func (m *lazyRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - res, err := m.mapper.ResourcesFor(input) +func (m *mapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + res, err := m.getMapper().ResourcesFor(input) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(input.Group, input.Version); err != nil { - return res, err + if err := m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return nil, err } - - res, err = m.mapper.ResourcesFor(input) + res, err = m.getMapper().ResourcesFor(input) } return res, err } // RESTMapping implements Mapper.RESTMapping. -func (m *lazyRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { - res, err := m.mapper.RESTMapping(gk, versions...) +func (m *mapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMapping(gk, versions...) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(gk.Group, versions...); err != nil { - return res, err + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err } - - res, err = m.mapper.RESTMapping(gk, versions...) + res, err = m.getMapper().RESTMapping(gk, versions...) } return res, err } // RESTMappings implements Mapper.RESTMappings. -func (m *lazyRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { - res, err := m.mapper.RESTMappings(gk, versions...) +func (m *mapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + res, err := m.getMapper().RESTMappings(gk, versions...) if meta.IsNoMatchError(err) { - if err = m.addKnownGroupAndReload(gk.Group, versions...); err != nil { - return res, err + if err := m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return nil, err } - - res, err = m.mapper.RESTMappings(gk, versions...) + res, err = m.getMapper().RESTMappings(gk, versions...) } return res, err } // ResourceSingularizer implements Mapper.ResourceSingularizer. -func (m *lazyRESTMapper) ResourceSingularizer(resource string) (string, error) { - return m.mapper.ResourceSingularizer(resource) +func (m *mapper) ResourceSingularizer(resource string) (string, error) { + return m.getMapper().ResourceSingularizer(resource) +} + +func (m *mapper) getMapper() meta.RESTMapper { + m.mu.RLock() + defer m.mu.RUnlock() + return m.mapper } // addKnownGroupAndReload reloads the mapper with updated information about missing API group. // versions can be specified for partial updates, for instance for v1beta1 version only. -func (m *lazyRESTMapper) addKnownGroupAndReload(groupName string, versions ...string) error { - m.mu.Lock() - defer m.mu.Unlock() +func (m *mapper) addKnownGroupAndReload(groupName string, versions ...string) error { + // versions will here be [""] if the forwarded Version value of + // GroupVersionResource (in calling method) was not specified. + if len(versions) == 1 && versions[0] == "" { + versions = nil + } // If no specific versions are set by user, we will scan all available ones for the API group. // This operation requires 2 requests: /api and /apis, but only once. For all subsequent calls // this data will be taken from cache. if len(versions) == 0 { - apiGroup, err := m.findAPIGroupByNameLocked(groupName) + apiGroup, err := m.findAPIGroupByName(groupName) if err != nil { return err } @@ -157,6 +171,9 @@ func (m *lazyRESTMapper) addKnownGroupAndReload(groupName string, versions ...st } } + m.mu.Lock() + defer m.mu.Unlock() + // Create or fetch group resources from cache. groupResources := &restmapper.APIGroupResources{ Group: metav1.APIGroup{Name: groupName}, @@ -205,43 +222,53 @@ func (m *lazyRESTMapper) addKnownGroupAndReload(groupName string, versions ...st } m.mapper = restmapper.NewDiscoveryRESTMapper(updatedGroupResources) - return nil } // findAPIGroupByNameLocked returns API group by its name. -func (m *lazyRESTMapper) findAPIGroupByNameLocked(groupName string) (metav1.APIGroup, error) { +func (m *mapper) findAPIGroupByName(groupName string) (*metav1.APIGroup, error) { // Looking in the cache first. - for _, apiGroup := range m.apiGroups { - if groupName == apiGroup.Name { - return apiGroup, nil + { + m.mu.RLock() + group, ok := m.apiGroups[groupName] + m.mu.RUnlock() + if ok { + return group, nil } } // Update the cache if nothing was found. apiGroups, err := m.client.ServerGroups() if err != nil { - return metav1.APIGroup{}, fmt.Errorf("failed to get server groups: %w", err) + return nil, fmt.Errorf("failed to get server groups: %w", err) } if len(apiGroups.Groups) == 0 { - return metav1.APIGroup{}, fmt.Errorf("received an empty API groups list") + return nil, fmt.Errorf("received an empty API groups list") } - m.apiGroups = apiGroups.Groups + m.mu.Lock() + for i := range apiGroups.Groups { + group := &apiGroups.Groups[i] + m.apiGroups[group.Name] = group + } + m.mu.Unlock() // Looking in the cache again. - for _, apiGroup := range m.apiGroups { - if groupName == apiGroup.Name { - return apiGroup, nil + { + m.mu.RLock() + group, ok := m.apiGroups[groupName] + m.mu.RUnlock() + if ok { + return group, nil } } // If there is still nothing, return an error. - return metav1.APIGroup{}, fmt.Errorf("failed to find API group %s", groupName) + return nil, fmt.Errorf("failed to find API group %q", groupName) } // fetchGroupVersionResources fetches the resources for the specified group and its versions. -func (m *lazyRESTMapper) fetchGroupVersionResources(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { +func (m *mapper) fetchGroupVersionResources(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) failedGroups := make(map[schema.GroupVersion]error) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index 7d1ed5c96..21067b6f8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -20,11 +20,11 @@ import ( "context" "errors" "fmt" + "net/http" "strings" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -36,6 +36,28 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" ) +// Options are creation options for a Client. +type Options struct { + // HTTPClient is the HTTP client to use for requests. + HTTPClient *http.Client + + // Scheme, if provided, will be used to map go structs to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper, if provided, will be used to map GroupVersionKinds to Resources + Mapper meta.RESTMapper + + // Cache, if provided, is used to read objects from the cache. + Cache *CacheOptions + + // WarningHandler is used to configure the warning handler responsible for + // surfacing and handling warnings messages sent by the API server. + WarningHandler WarningHandlerOptions + + // DryRun instructs the client to only perform dry run requests. + DryRun *bool +} + // WarningHandlerOptions are options for configuring a // warning handler for the client which is responsible // for surfacing API Server warnings. @@ -50,19 +72,21 @@ type WarningHandlerOptions struct { AllowDuplicateLogs bool } -// Options are creation options for a Client. -type Options struct { - // Scheme, if provided, will be used to map go structs to GroupVersionKinds - Scheme *runtime.Scheme - - // Mapper, if provided, will be used to map GroupVersionKinds to Resources - Mapper meta.RESTMapper - - // Opts is used to configure the warning handler responsible for - // surfacing and handling warnings messages sent by the API server. - Opts WarningHandlerOptions +// CacheOptions are options for creating a cache-backed client. +type CacheOptions struct { + // Reader is a cache-backed reader that will be used to read objects from the cache. + // +required + Reader Reader + // DisableFor is a list of objects that should not be read from the cache. + DisableFor []Object + // Unstructured is a flag that indicates whether the cache-backed client should + // read unstructured objects or lists from the cache. + Unstructured bool } +// NewClientFunc allows a user to define how to create a client. +type NewClientFunc func(config *rest.Config, options Options) (Client, error) + // New returns a new Client using the provided config and Options. // The returned client reads *and* writes directly from the server // (it doesn't use object caches). It understands how to work with @@ -73,8 +97,12 @@ type Options struct { // corresponding group, version, and kind for the given type. In the // case of unstructured types, the group, version, and kind will be extracted // from the corresponding fields on the object. -func New(config *rest.Config, options Options) (Client, error) { - return newClient(config, options) +func New(config *rest.Config, options Options) (c Client, err error) { + c, err = newClient(config, options) + if err == nil && options.DryRun != nil && *options.DryRun { + c = NewDryRunClient(c) + } + return c, err } func newClient(config *rest.Config, options Options) (*client, error) { @@ -82,7 +110,7 @@ func newClient(config *rest.Config, options Options) (*client, error) { return nil, fmt.Errorf("must provide non-nil rest.Config to client.New") } - if !options.Opts.SuppressWarnings { + if !options.WarningHandler.SuppressWarnings { // surface warnings logger := log.Log.WithName("KubeAPIWarningLogger") // Set a WarningHandler, the default WarningHandler @@ -93,11 +121,20 @@ func newClient(config *rest.Config, options Options) (*client, error) { config.WarningHandler = log.NewKubeAPIWarningLogger( logger, log.KubeAPIWarningLoggerOptions{ - Deduplicate: !options.Opts.AllowDuplicateLogs, + Deduplicate: !options.WarningHandler.AllowDuplicateLogs, }, ) } + // Use the rest HTTP client for the provided config if unset + if options.HTTPClient == nil { + var err error + options.HTTPClient, err = rest.HTTPClientFor(config) + if err != nil { + return nil, err + } + } + // Init a scheme if none provided if options.Scheme == nil { options.Scheme = scheme.Scheme @@ -106,34 +143,35 @@ func newClient(config *rest.Config, options Options) (*client, error) { // Init a Mapper if none provided if options.Mapper == nil { var err error - options.Mapper, err = apiutil.NewDynamicRESTMapper(config) + options.Mapper, err = apiutil.NewDynamicRESTMapper(config, options.HTTPClient) if err != nil { return nil, err } } - clientcache := &clientCache{ - config: config, - scheme: options.Scheme, - mapper: options.Mapper, - codecs: serializer.NewCodecFactory(options.Scheme), + resources := &clientRestResources{ + httpClient: options.HTTPClient, + config: config, + scheme: options.Scheme, + mapper: options.Mapper, + codecs: serializer.NewCodecFactory(options.Scheme), structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), } - rawMetaClient, err := metadata.NewForConfig(config) + rawMetaClient, err := metadata.NewForConfigAndClient(config, options.HTTPClient) if err != nil { return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err) } c := &client{ typedClient: typedClient{ - cache: clientcache, + resources: resources, paramCodec: runtime.NewParameterCodec(options.Scheme), }, unstructuredClient: unstructuredClient{ - cache: clientcache, + resources: resources, paramCodec: noConversionParamCodec{}, }, metadataClient: metadataClient{ @@ -143,20 +181,65 @@ func newClient(config *rest.Config, options Options) (*client, error) { scheme: options.Scheme, mapper: options.Mapper, } + if options.Cache == nil || options.Cache.Reader == nil { + return c, nil + } + // We want a cache if we're here. + // Set the cache. + c.cache = options.Cache.Reader + + // Load uncached GVKs. + c.cacheUnstructured = options.Cache.Unstructured + c.uncachedGVKs = map[schema.GroupVersionKind]struct{}{} + for _, obj := range options.Cache.DisableFor { + gvk, err := c.GroupVersionKindFor(obj) + if err != nil { + return nil, err + } + c.uncachedGVKs[gvk] = struct{}{} + } return c, nil } var _ Client = &client{} -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. +// client is a client.Client that reads and writes directly from/to an API server. +// It lazily initializes new clients at the time they are used. type client struct { typedClient typedClient unstructuredClient unstructuredClient metadataClient metadataClient scheme *runtime.Scheme mapper meta.RESTMapper + + cache Reader + uncachedGVKs map[schema.GroupVersionKind]struct{} + cacheUnstructured bool +} + +func (c *client) shouldBypassCache(obj runtime.Object) (bool, error) { + if c.cache == nil { + return true, nil + } + + gvk, err := c.GroupVersionKindFor(obj) + if err != nil { + return false, err + } + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + if meta.IsListType(obj) { + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + } + if _, isUncached := c.uncachedGVKs[gvk]; isUncached { + return true, nil + } + if !c.cacheUnstructured { + _, isUnstructured := obj.(runtime.Unstructured) + return isUnstructured, nil + } + return false, nil } // resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. @@ -168,6 +251,16 @@ func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersi } } +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *client) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return apiutil.GVKForObject(obj, c.scheme) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *client) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return apiutil.IsObjectNamespaced(obj, c.scheme, c.mapper) +} + // Scheme returns the scheme this client is using. func (c *client) Scheme() *runtime.Scheme { return c.scheme @@ -181,7 +274,7 @@ func (c *client) RESTMapper() meta.RESTMapper { // Create implements client.Client. func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Create(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot create using only metadata") @@ -194,7 +287,7 @@ func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) e func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Update(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot update using only metadata -- did you mean to patch?") @@ -206,7 +299,7 @@ func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) e // Delete implements client.Client. func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Delete(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return c.metadataClient.Delete(ctx, obj, opts...) @@ -218,7 +311,7 @@ func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) e // DeleteAllOf implements client.Client. func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) case *metav1.PartialObjectMetadata: return c.metadataClient.DeleteAllOf(ctx, obj, opts...) @@ -231,7 +324,7 @@ func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllO func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Patch(ctx, obj, patch, opts...) case *metav1.PartialObjectMetadata: return c.metadataClient.Patch(ctx, obj, patch, opts...) @@ -242,8 +335,14 @@ func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...Pat // Get implements client.Client. func (c *client) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + if isUncached, err := c.shouldBypassCache(obj); err != nil { + return err + } else if !isUncached { + return c.cache.Get(ctx, key, obj, opts...) + } + switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return c.unstructuredClient.Get(ctx, key, obj, opts...) case *metav1.PartialObjectMetadata: // Metadata only object should always preserve the GVK coming in from the caller. @@ -256,8 +355,14 @@ func (c *client) Get(ctx context.Context, key ObjectKey, obj Object, opts ...Get // List implements client.Client. func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + if isUncached, err := c.shouldBypassCache(obj); err != nil { + return err + } else if !isUncached { + return c.cache.List(ctx, obj, opts...) + } + switch x := obj.(type) { - case *unstructured.UnstructuredList: + case runtime.Unstructured: return c.unstructuredClient.List(ctx, obj, opts...) case *metav1.PartialObjectMetadataList: // Metadata only object should always preserve the GVK. @@ -431,7 +536,7 @@ func (po *SubResourcePatchOptions) ApplyToSubResourcePatch(o *SubResourcePatchOp func (sc *subResourceClient) Get(ctx context.Context, obj Object, subResource Object, opts ...SubResourceGetOption) error { switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return sc.client.unstructuredClient.GetSubResource(ctx, obj, subResource, sc.subResource, opts...) case *metav1.PartialObjectMetadata: return errors.New("can not get subresource using only metadata") @@ -446,7 +551,7 @@ func (sc *subResourceClient) Create(ctx context.Context, obj Object, subResource defer sc.client.resetGroupVersionKind(subResource, subResource.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return sc.client.unstructuredClient.CreateSubResource(ctx, obj, subResource, sc.subResource, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") @@ -459,7 +564,7 @@ func (sc *subResourceClient) Create(ctx context.Context, obj Object, subResource func (sc *subResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return sc.client.unstructuredClient.UpdateSubResource(ctx, obj, sc.subResource, opts...) case *metav1.PartialObjectMetadata: return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") @@ -472,7 +577,7 @@ func (sc *subResourceClient) Update(ctx context.Context, obj Object, opts ...Sub func (sc *subResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { - case *unstructured.Unstructured: + case runtime.Unstructured: return sc.client.unstructuredClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...) case *metav1.PartialObjectMetadata: return sc.client.metadataClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go similarity index 82% rename from vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go index 857a0b38a..2d0787952 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go @@ -17,12 +17,12 @@ limitations under the License. package client import ( + "net/http" "strings" "sync" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -30,8 +30,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// clientCache creates and caches rest clients and metadata for Kubernetes types. -type clientCache struct { +// clientRestResources creates and stores rest clients and metadata for Kubernetes types. +type clientRestResources struct { + // httpClient is the http client to use for requests + httpClient *http.Client + // config is the rest.Config to talk to an apiserver config *rest.Config @@ -44,22 +47,22 @@ type clientCache struct { // codecs are used to create a REST client for a gvk codecs serializer.CodecFactory - // structuredResourceByType caches structured type metadata + // structuredResourceByType stores structured type metadata structuredResourceByType map[schema.GroupVersionKind]*resourceMeta - // unstructuredResourceByType caches unstructured type metadata + // unstructuredResourceByType stores unstructured type metadata unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta mu sync.RWMutex } // newResource maps obj to a Kubernetes Resource and constructs a client for that Resource. // If the object is a list, the resource represents the item's type instead. -func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) { +func (c *clientRestResources) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) { if strings.HasSuffix(gvk.Kind, "List") && isList { // if this was a list, treat it as a request for the item's resource gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] } - client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs) + client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs, c.httpClient) if err != nil { return nil, err } @@ -72,15 +75,13 @@ func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstruc // getResource returns the resource meta information for the given type of object. // If the object is a list, the resource represents the item's type instead. -func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { +func (c *clientRestResources) getResource(obj runtime.Object) (*resourceMeta, error) { gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { return nil, err } - _, isUnstructured := obj.(*unstructured.Unstructured) - _, isUnstructuredList := obj.(*unstructured.UnstructuredList) - isUnstructured = isUnstructured || isUnstructuredList + _, isUnstructured := obj.(runtime.Unstructured) // It's better to do creation work twice than to not let multiple // people make requests at once @@ -108,7 +109,7 @@ func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { } // getObjMeta returns objMeta containing both type and object metadata and state. -func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { +func (c *clientRestResources) getObjMeta(obj runtime.Object) (*objMeta, error) { r, err := c.getResource(obj) if err != nil { return nil, err @@ -120,7 +121,7 @@ func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { return &objMeta{resourceMeta: r, Object: m}, err } -// resourceMeta caches state for a Kubernetes type. +// resourceMeta stores state for a Kubernetes type. type resourceMeta struct { // client is the rest client used to talk to the apiserver rest.Interface diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go index e4e8585cb..5f0a6d4b1 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go @@ -98,12 +98,12 @@ func GetConfigWithContext(context string) (*rest.Config, error) { if err != nil { return nil, err } - if cfg.QPS == 0.0 { cfg.QPS = 20.0 - cfg.Burst = 30.0 } - + if cfg.Burst == 0 { + cfg.Burst = 30 + } return cfg, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go index e0e288509..b2e202494 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go @@ -26,8 +26,7 @@ limitations under the License. // to the API server. // // It is a common pattern in Kubernetes to read from a cache and write to the API -// server. This pattern is covered by the DelegatingClient type, which can -// be used to have a client whose Reader is different from the Writer. +// server. This pattern is covered by the creating the Client with a Cache. // // # Options // diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go index 73b56429e..bbcdd3832 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go @@ -21,6 +21,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // NewDryRunClient wraps an existing client and enforces DryRun mode @@ -46,6 +47,16 @@ func (c *dryRunClient) RESTMapper() meta.RESTMapper { return c.client.RESTMapper() } +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (c *dryRunClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return c.client.GroupVersionKindFor(obj) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (c *dryRunClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return c.client.IsObjectNamespaced(obj) +} + // Create implements client.Client. func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { return c.client.Create(ctx, obj, append(opts, DryRunAll)...) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go index b642f7f88..0ddda3163 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -20,6 +20,7 @@ import ( "context" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" @@ -169,6 +170,10 @@ type Client interface { Scheme() *runtime.Scheme // RESTMapper returns the rest this client is using. RESTMapper() meta.RESTMapper + // GroupVersionKindFor returns the GroupVersionKind for the given object. + GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) + // IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. + IsObjectNamespaced(obj runtime.Object) (bool, error) } // WithWatch supports Watch on top of the CRUD operations supported by diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go index 00bc2175c..222dc7957 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" + "k8s.io/apimachinery/pkg/runtime/schema" ) // NewNamespacedClient wraps an existing client enforcing the namespace value. @@ -52,9 +52,19 @@ func (n *namespacedClient) RESTMapper() meta.RESTMapper { return n.client.RESTMapper() } +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (n *namespacedClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + return n.client.GroupVersionKindFor(obj) +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (n *namespacedClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + return n.client.IsObjectNamespaced(obj) +} + // Create implements client.Client. func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -72,7 +82,7 @@ func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...Creat // Update implements client.Client. func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -90,7 +100,7 @@ func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...Updat // Delete implements client.Client. func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -108,7 +118,7 @@ func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...Delet // DeleteAllOf implements client.Client. func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -121,7 +131,7 @@ func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ... // Patch implements client.Client. func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -139,7 +149,7 @@ func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, o // Get implements client.Client. func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + isNamespaceScoped, err := n.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -180,7 +190,7 @@ type namespacedClientSubResourceClient struct { } func (nsw *namespacedClientSubResourceClient) Get(ctx context.Context, obj, subResource Object, opts ...SubResourceGetOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -198,7 +208,7 @@ func (nsw *namespacedClientSubResourceClient) Get(ctx context.Context, obj, subR } func (nsw *namespacedClientSubResourceClient) Create(ctx context.Context, obj, subResource Object, opts ...SubResourceCreateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -217,7 +227,7 @@ func (nsw *namespacedClientSubResourceClient) Create(ctx context.Context, obj, s // Update implements client.SubResourceWriter. func (nsw *namespacedClientSubResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } @@ -235,8 +245,7 @@ func (nsw *namespacedClientSubResourceClient) Update(ctx context.Context, obj Ob // Patch implements client.SubResourceWriter. func (nsw *namespacedClientSubResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { - isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) - + isNamespaceScoped, err := nsw.namespacedclient.IsObjectNamespaced(obj) if err != nil { return fmt.Errorf("error finding the scope of the object: %w", err) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go index 7f6f5b83f..50a461f1c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -606,6 +606,11 @@ func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { n.ApplyToList(&opts.ListOptions) } +// AsSelector returns a selector that matches objects in the given namespace. +func (n InNamespace) AsSelector() fields.Selector { + return fields.SelectorFromSet(fields.Set{"metadata.namespace": string(n)}) +} + // Limit specifies the maximum number of results to return from the server. // Limit does not implement DeleteAllOfOption interface because the server // does not support setting it for deletecollection operations. @@ -788,6 +793,11 @@ func (forceOwnership) ApplyToPatch(opts *PatchOptions) { opts.Force = &definitelyTrue } +func (forceOwnership) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) { + definitelyTrue := true + opts.Force = &definitelyTrue +} + // }}} // {{{ DeleteAllOf Options diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go deleted file mode 100644 index 19d1ab4db..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "context" - "strings" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -// NewDelegatingClientInput encapsulates the input parameters to create a new delegating client. -type NewDelegatingClientInput struct { - CacheReader Reader - Client Client - UncachedObjects []Object - CacheUnstructured bool -} - -// NewDelegatingClient creates a new delegating client. -// -// A delegating client forms a Client by composing separate reader, writer and -// statusclient interfaces. This way, you can have an Client that reads from a -// cache and writes to the API server. -func NewDelegatingClient(in NewDelegatingClientInput) (Client, error) { - uncachedGVKs := map[schema.GroupVersionKind]struct{}{} - for _, obj := range in.UncachedObjects { - gvk, err := apiutil.GVKForObject(obj, in.Client.Scheme()) - if err != nil { - return nil, err - } - uncachedGVKs[gvk] = struct{}{} - } - - return &delegatingClient{ - scheme: in.Client.Scheme(), - mapper: in.Client.RESTMapper(), - Reader: &delegatingReader{ - CacheReader: in.CacheReader, - ClientReader: in.Client, - scheme: in.Client.Scheme(), - uncachedGVKs: uncachedGVKs, - cacheUnstructured: in.CacheUnstructured, - }, - Writer: in.Client, - StatusClient: in.Client, - SubResourceClientConstructor: in.Client, - }, nil -} - -type delegatingClient struct { - Reader - Writer - StatusClient - SubResourceClientConstructor - - scheme *runtime.Scheme - mapper meta.RESTMapper -} - -// Scheme returns the scheme this client is using. -func (d *delegatingClient) Scheme() *runtime.Scheme { - return d.scheme -} - -// RESTMapper returns the rest mapper this client is using. -func (d *delegatingClient) RESTMapper() meta.RESTMapper { - return d.mapper -} - -// delegatingReader forms a Reader that will cause Get and List requests for -// unstructured types to use the ClientReader while requests for any other type -// of object with use the CacheReader. This avoids accidentally caching the -// entire cluster in the common case of loading arbitrary unstructured objects -// (e.g. from OwnerReferences). -type delegatingReader struct { - CacheReader Reader - ClientReader Reader - - uncachedGVKs map[schema.GroupVersionKind]struct{} - scheme *runtime.Scheme - cacheUnstructured bool -} - -func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) { - gvk, err := apiutil.GVKForObject(obj, d.scheme) - if err != nil { - return false, err - } - // TODO: this is producing unsafe guesses that don't actually work, - // but it matches ~99% of the cases out there. - if meta.IsListType(obj) { - gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - } - if _, isUncached := d.uncachedGVKs[gvk]; isUncached { - return true, nil - } - if !d.cacheUnstructured { - _, isUnstructured := obj.(*unstructured.Unstructured) - _, isUnstructuredList := obj.(*unstructured.UnstructuredList) - return isUnstructured || isUnstructuredList, nil - } - return false, nil -} - -// Get retrieves an obj for a given object key from the Kubernetes Cluster. -func (d *delegatingReader) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { - if isUncached, err := d.shouldBypassCache(obj); err != nil { - return err - } else if isUncached { - return d.ClientReader.Get(ctx, key, obj, opts...) - } - return d.CacheReader.Get(ctx, key, obj, opts...) -} - -// List retrieves list of objects for a given namespace and list options. -func (d *delegatingReader) List(ctx context.Context, list ObjectList, opts ...ListOption) error { - if isUncached, err := d.shouldBypassCache(list); err != nil { - return err - } else if isUncached { - return d.ClientReader.List(ctx, list, opts...) - } - return d.CacheReader.List(ctx, list, opts...) -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go index ade251572..92afd9a9c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -25,16 +25,14 @@ import ( var _ Reader = &typedClient{} var _ Writer = &typedClient{} -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. type typedClient struct { - cache *clientCache + resources *clientRestResources paramCodec runtime.ParameterCodec } // Create implements client.Client. func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -53,7 +51,7 @@ func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOpti // Update implements client.Client. func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -73,7 +71,7 @@ func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOpti // Delete implements client.Client. func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -92,7 +90,7 @@ func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOpti // DeleteAllOf implements client.Client. func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -111,7 +109,7 @@ func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...Delet // Patch implements client.Client. func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -136,7 +134,7 @@ func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts . // Get implements client.Client. func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { - r, err := c.cache.getResource(obj) + r, err := c.resources.getResource(obj) if err != nil { return err } @@ -151,7 +149,7 @@ func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts . // List implements client.Client. func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { - r, err := c.cache.getResource(obj) + r, err := c.resources.getResource(obj) if err != nil { return err } @@ -168,7 +166,7 @@ func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOpti } func (c *typedClient) GetSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceGetOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -191,7 +189,7 @@ func (c *typedClient) GetSubResource(ctx context.Context, obj, subResourceObj Ob } func (c *typedClient) CreateSubResource(ctx context.Context, obj Object, subResourceObj Object, subResource string, opts ...SubResourceCreateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -216,7 +214,7 @@ func (c *typedClient) CreateSubResource(ctx context.Context, obj Object, subReso // UpdateSubResource used by SubResourceWriter to write status. func (c *typedClient) UpdateSubResource(ctx context.Context, obj Object, subResource string, opts ...SubResourceUpdateOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } @@ -251,7 +249,7 @@ func (c *typedClient) UpdateSubResource(ctx context.Context, obj Object, subReso // PatchSubResource used by SubResourceWriter to write subresource. func (c *typedClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error { - o, err := c.cache.getObjMeta(obj) + o, err := c.resources.getObjMeta(obj) if err != nil { return err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go index 7f25c7be9..0d9695178 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -21,30 +21,27 @@ import ( "fmt" "strings" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" ) var _ Reader = &unstructuredClient{} var _ Writer = &unstructuredClient{} -// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes -// new clients at the time they are used, and caches the client. type unstructuredClient struct { - cache *clientCache + resources *clientRestResources paramCodec runtime.ParameterCodec } // Create implements client.Client. func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -60,20 +57,20 @@ func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...Cr Do(ctx). Into(obj) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } // Update implements client.Client. func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -90,17 +87,17 @@ func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...Up Do(ctx). Into(obj) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } // Delete implements client.Client. func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -119,11 +116,11 @@ func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...De // DeleteAllOf implements client.Client. func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -142,11 +139,11 @@ func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts // Patch implements client.Client. func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -171,17 +168,17 @@ func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch // Get implements client.Client. func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() getOpts := GetOptions{} getOpts.ApplyOptions(opts) - r, err := uc.cache.getResource(obj) + r, err := uc.resources.getResource(obj) if err != nil { return err } @@ -194,22 +191,22 @@ func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object Do(ctx). Into(obj) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } // List implements client.Client. func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { - u, ok := obj.(*unstructured.UnstructuredList) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - r, err := uc.cache.getResource(obj) + r, err := uc.resources.getResource(obj) if err != nil { return err } @@ -226,19 +223,19 @@ func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ... } func (uc *unstructuredClient) GetSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceGetOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { - return fmt.Errorf("unstructured client did not understand object: %T", subResource) + if _, ok := obj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) } - if _, ok := subResourceObj.(*unstructured.Unstructured); !ok { - return fmt.Errorf("unstructured client did not understand object: %T", obj) + if _, ok := subResourceObj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) } if subResourceObj.GetName() == "" { subResourceObj.SetName(obj.GetName()) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -257,19 +254,19 @@ func (uc *unstructuredClient) GetSubResource(ctx context.Context, obj, subResour } func (uc *unstructuredClient) CreateSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceCreateOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { - return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) + if _, ok := obj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) } - if _, ok := subResourceObj.(*unstructured.Unstructured); !ok { - return fmt.Errorf("unstructured client did not understand object: %T", obj) + if _, ok := subResourceObj.(runtime.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) } if subResourceObj.GetName() == "" { subResourceObj.SetName(obj.GetName()) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -289,11 +286,11 @@ func (uc *unstructuredClient) CreateSubResource(ctx context.Context, obj, subRes } func (uc *unstructuredClient) UpdateSubResource(ctx context.Context, obj Object, subResource string, opts ...SubResourceUpdateOption) error { - if _, ok := obj.(*unstructured.Unstructured); !ok { + if _, ok := obj.(runtime.Unstructured); !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -324,14 +321,14 @@ func (uc *unstructuredClient) UpdateSubResource(ctx context.Context, obj Object, } func (uc *unstructuredClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error { - u, ok := obj.(*unstructured.Unstructured) + u, ok := obj.(runtime.Unstructured) if !ok { return fmt.Errorf("unstructured client did not understand object: %T", obj) } - gvk := u.GroupVersionKind() + gvk := u.GetObjectKind().GroupVersionKind() - o, err := uc.cache.getObjMeta(obj) + o, err := uc.resources.getObjMeta(obj) if err != nil { return err } @@ -359,6 +356,6 @@ func (uc *unstructuredClient) PatchSubResource(ctx context.Context, obj Object, Do(ctx). Into(body) - u.SetGroupVersionKind(gvk) + u.GetObjectKind().SetGroupVersionKind(gvk) return result } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go index 70490664b..181b22a67 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go @@ -21,9 +21,8 @@ import ( "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" ) @@ -33,21 +32,16 @@ func NewWithWatch(config *rest.Config, options Options) (WithWatch, error) { if err != nil { return nil, err } - dynamicClient, err := dynamic.NewForConfig(config) - if err != nil { - return nil, err - } - return &watchingClient{client: client, dynamic: dynamicClient}, nil + return &watchingClient{client: client}, nil } type watchingClient struct { *client - dynamic dynamic.Interface } func (w *watchingClient) Watch(ctx context.Context, list ObjectList, opts ...ListOption) (watch.Interface, error) { switch l := list.(type) { - case *unstructured.UnstructuredList: + case runtime.Unstructured: return w.unstructuredWatch(ctx, l, opts...) case *metav1.PartialObjectMetadataList: return w.metadataWatch(ctx, l, opts...) @@ -81,25 +75,23 @@ func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialO return resInt.Watch(ctx, *listOpts.AsListOptions()) } -func (w *watchingClient) unstructuredWatch(ctx context.Context, obj *unstructured.UnstructuredList, opts ...ListOption) (watch.Interface, error) { - gvk := obj.GroupVersionKind() - gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") - - r, err := w.client.unstructuredClient.cache.getResource(obj) +func (w *watchingClient) unstructuredWatch(ctx context.Context, obj runtime.Unstructured, opts ...ListOption) (watch.Interface, error) { + r, err := w.client.unstructuredClient.resources.getResource(obj) if err != nil { return nil, err } listOpts := w.listOpts(opts...) - if listOpts.Namespace != "" && r.isNamespaced() { - return w.dynamic.Resource(r.mapping.Resource).Namespace(listOpts.Namespace).Watch(ctx, *listOpts.AsListOptions()) - } - return w.dynamic.Resource(r.mapping.Resource).Watch(ctx, *listOpts.AsListOptions()) + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), w.client.unstructuredClient.paramCodec). + Watch(ctx) } func (w *watchingClient) typedWatch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) { - r, err := w.client.typedClient.cache.getResource(obj) + r, err := w.client.typedClient.resources.getResource(obj) if err != nil { return nil, err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go index 905296cd3..7d00c3c4b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go @@ -19,6 +19,7 @@ package cluster import ( "context" "errors" + "net/http" "time" "github.com/go-logr/logr" @@ -27,6 +28,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" + "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" @@ -37,14 +39,15 @@ import ( // Cluster provides various methods to interact with a cluster. type Cluster interface { - // SetFields will set any dependencies on an object for which the object has implemented the inject - // interface - e.g. inject.Client. - // Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10. - SetFields(interface{}) error + // GetHTTPClient returns an HTTP client that can be used to talk to the apiserver + GetHTTPClient() *http.Client // GetConfig returns an initialized Config GetConfig() *rest.Config + // GetCache returns a cache.Cache + GetCache() cache.Cache + // GetScheme returns an initialized Scheme GetScheme() *runtime.Scheme @@ -57,9 +60,6 @@ type Cluster interface { // GetFieldIndexer returns a client.FieldIndexer configured with the client GetFieldIndexer() client.FieldIndexer - // GetCache returns a cache.Cache - GetCache() cache.Cache - // GetEventRecorderFor returns a new EventRecorder for the provided name GetEventRecorderFor(name string) record.EventRecorder @@ -83,7 +83,7 @@ type Options struct { Scheme *runtime.Scheme // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs - MapperProvider func(c *rest.Config) (meta.RESTMapper, error) + MapperProvider func(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) // Logger is the logger that should be used by this Cluster. // If none is set, it defaults to log.Log global logger. @@ -103,24 +103,54 @@ type Options struct { // Note: If a namespace is specified, controllers can still Watch for a // cluster-scoped resource (e.g Node). For namespaced resources the cache // will only hold objects from the desired namespace. + // + // Deprecated: Use Cache.Namespaces instead. Namespace string + // HTTPClient is the http client that will be used to create the default + // Cache and Client. If not set the rest.HTTPClientFor function will be used + // to create the http client. + HTTPClient *http.Client + + // Cache is the cache.Options that will be used to create the default Cache. + // By default, the cache will watch and list requested objects in all namespaces. + Cache cache.Options + // NewCache is the function that will create the cache to be used // by the manager. If not set this will use the default new cache function. + // + // When using a custom NewCache, the Cache options will be passed to the + // NewCache function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewCache if you know what you are doing. NewCache cache.NewCacheFunc + // Client is the client.Options that will be used to create the default Client. + // By default, the client will use the cache for reads and direct calls for writes. + Client client.Options + // NewClient is the func that creates the client to be used by the manager. - // If not set this will create the default DelegatingClient that will - // use the cache for reads and the client for writes. - // NOTE: The default client will not cache Unstructured. - NewClient NewClientFunc + // If not set this will create a Client backed by a Cache for read operations + // and a direct Client for write operations. + // + // When using a custom NewClient, the Client options will be passed to the + // NewClient function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewClient if you know what you are doing. + NewClient client.NewClientFunc // ClientDisableCacheFor tells the client that, if any cache is used, to bypass it // for the given objects. + // + // Deprecated: Use Client.Cache.DisableFor instead. ClientDisableCacheFor []client.Object // DryRunClient specifies whether the client should be configured to enforce // dryRun mode. + // + // Deprecated: Use Client.DryRun instead. DryRunClient bool // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API @@ -137,7 +167,7 @@ type Options struct { makeBroadcaster intrec.EventBroadcasterProducer // Dependency injection for testing - newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) + newRecorderProvider func(config *rest.Config, httpClient *http.Client, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) } // Option can be used to manipulate Options. @@ -153,52 +183,105 @@ func New(config *rest.Config, opts ...Option) (Cluster, error) { for _, opt := range opts { opt(&options) } - options = setOptionsDefaults(options) + options, err := setOptionsDefaults(options, config) + if err != nil { + options.Logger.Error(err, "Failed to set defaults") + return nil, err + } // Create the mapper provider - mapper, err := options.MapperProvider(config) + mapper, err := options.MapperProvider(config, options.HTTPClient) if err != nil { options.Logger.Error(err, "Failed to get API Group-Resources") return nil, err } // Create the cache for the cached read client and registering informers - cache, err := options.NewCache(config, cache.Options{Scheme: options.Scheme, Mapper: mapper, Resync: options.SyncPeriod, Namespace: options.Namespace}) + cacheOpts := options.Cache + { + if cacheOpts.Scheme == nil { + cacheOpts.Scheme = options.Scheme + } + if cacheOpts.Mapper == nil { + cacheOpts.Mapper = mapper + } + if cacheOpts.HTTPClient == nil { + cacheOpts.HTTPClient = options.HTTPClient + } + if cacheOpts.SyncPeriod == nil { + cacheOpts.SyncPeriod = options.SyncPeriod + } + if len(cacheOpts.Namespaces) == 0 && options.Namespace != "" { + cacheOpts.Namespaces = []string{options.Namespace} + } + } + cache, err := options.NewCache(config, cacheOpts) if err != nil { return nil, err } - clientOptions := client.Options{Scheme: options.Scheme, Mapper: mapper} + // Create the client, and default its options. + clientOpts := options.Client + { + if clientOpts.Scheme == nil { + clientOpts.Scheme = options.Scheme + } + if clientOpts.Mapper == nil { + clientOpts.Mapper = mapper + } + if clientOpts.HTTPClient == nil { + clientOpts.HTTPClient = options.HTTPClient + } + if clientOpts.Cache == nil { + clientOpts.Cache = &client.CacheOptions{ + Unstructured: false, + } + } + if clientOpts.Cache.Reader == nil { + clientOpts.Cache.Reader = cache + } - apiReader, err := client.New(config, clientOptions) + // For backward compatibility, the ClientDisableCacheFor option should + // be appended to the DisableFor option in the client. + clientOpts.Cache.DisableFor = append(clientOpts.Cache.DisableFor, options.ClientDisableCacheFor...) + + if clientOpts.DryRun == nil && options.DryRunClient { + // For backward compatibility, the DryRunClient (if set) option should override + // the DryRun option in the client (if unset). + clientOpts.DryRun = pointer.Bool(true) + } + } + clientWriter, err := options.NewClient(config, clientOpts) if err != nil { return nil, err } - writeObj, err := options.NewClient(cache, config, clientOptions, options.ClientDisableCacheFor...) + // Create the API Reader, a client with no cache. + clientReader, err := client.New(config, client.Options{ + HTTPClient: options.HTTPClient, + Scheme: options.Scheme, + Mapper: mapper, + }) if err != nil { return nil, err } - if options.DryRunClient { - writeObj = client.NewDryRunClient(writeObj) - } - // Create the recorder provider to inject event recorders for the components. // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific // to the particular controller that it's being injected into, rather than a generic one like is here. - recorderProvider, err := options.newRecorderProvider(config, options.Scheme, options.Logger.WithName("events"), options.makeBroadcaster) + recorderProvider, err := options.newRecorderProvider(config, options.HTTPClient, options.Scheme, options.Logger.WithName("events"), options.makeBroadcaster) if err != nil { return nil, err } return &cluster{ config: config, + httpClient: options.HTTPClient, scheme: options.Scheme, cache: cache, fieldIndexes: cache, - client: writeObj, - apiReader: apiReader, + client: clientWriter, + apiReader: clientReader, recorderProvider: recorderProvider, mapper: mapper, logger: options.Logger, @@ -206,21 +289,27 @@ func New(config *rest.Config, opts ...Option) (Cluster, error) { } // setOptionsDefaults set default values for Options fields. -func setOptionsDefaults(options Options) Options { +func setOptionsDefaults(options Options, config *rest.Config) (Options, error) { + if options.HTTPClient == nil { + var err error + options.HTTPClient, err = rest.HTTPClientFor(config) + if err != nil { + return options, err + } + } + // Use the Kubernetes client-go scheme if none is specified if options.Scheme == nil { options.Scheme = scheme.Scheme } if options.MapperProvider == nil { - options.MapperProvider = func(c *rest.Config) (meta.RESTMapper, error) { - return apiutil.NewDynamicRESTMapper(c) - } + options.MapperProvider = apiutil.NewDynamicRESTMapper } // Allow users to define how to create a new client if options.NewClient == nil { - options.NewClient = DefaultNewClient + options.NewClient = client.New } // Allow newCache to be mocked @@ -250,39 +339,5 @@ func setOptionsDefaults(options Options) Options { options.Logger = logf.RuntimeLog.WithName("cluster") } - return options -} - -// NewClientFunc allows a user to define how to create a client. -type NewClientFunc func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) - -// ClientOptions are the optional arguments for tuning the caching client. -type ClientOptions struct { - UncachedObjects []client.Object - CacheUnstructured bool -} - -// DefaultNewClient creates the default caching client, that will never cache Unstructured. -func DefaultNewClient(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) { - return ClientBuilderWithOptions(ClientOptions{})(cache, config, options, uncachedObjects...) -} - -// ClientBuilderWithOptions returns a Client constructor that will build a client -// honoring the options argument -func ClientBuilderWithOptions(options ClientOptions) NewClientFunc { - return func(cache cache.Cache, config *rest.Config, clientOpts client.Options, uncachedObjects ...client.Object) (client.Client, error) { - options.UncachedObjects = append(options.UncachedObjects, uncachedObjects...) - - c, err := client.New(config, clientOpts) - if err != nil { - return nil, err - } - - return client.NewDelegatingClient(client.NewDelegatingClientInput{ - CacheReader: cache, - Client: c, - UncachedObjects: options.UncachedObjects, - CacheUnstructured: options.CacheUnstructured, - }) - } + return options, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go index 125e1d144..274276423 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/internal.go @@ -18,6 +18,7 @@ package cluster import ( "context" + "net/http" "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/api/meta" @@ -28,22 +29,16 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) type cluster struct { // config is the rest.config used to talk to the apiserver. Required. config *rest.Config - // scheme is the scheme injected into Controllers, EventHandlers, Sources and Predicates. Defaults - // to scheme.scheme. - scheme *runtime.Scheme - - cache cache.Cache - - // TODO(directxman12): Provide an escape hatch to get individual indexers - // client is the client injected into Controllers (and EventHandlers, Sources and Predicates). - client client.Client + httpClient *http.Client + scheme *runtime.Scheme + cache cache.Cache + client client.Client // apiReader is the reader that will make requests to the api server and not the cache. apiReader client.Reader @@ -64,32 +59,14 @@ type cluster struct { logger logr.Logger } -func (c *cluster) SetFields(i interface{}) error { - if _, err := inject.ConfigInto(c.config, i); err != nil { - return err - } - if _, err := inject.ClientInto(c.client, i); err != nil { - return err - } - if _, err := inject.APIReaderInto(c.apiReader, i); err != nil { - return err - } - if _, err := inject.SchemeInto(c.scheme, i); err != nil { - return err - } - if _, err := inject.CacheInto(c.cache, i); err != nil { - return err - } - if _, err := inject.MapperInto(c.mapper, i); err != nil { - return err - } - return nil -} - func (c *cluster) GetConfig() *rest.Config { return c.config } +func (c *cluster) GetHTTPClient() *http.Client { + return c.httpClient +} + func (c *cluster) GetClient() client.Client { return c.client } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go index 8e853d6a0..9c7b875a8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go @@ -29,6 +29,8 @@ import ( // ControllerManagerConfiguration defines the functions necessary to parse a config file // and to configure the Options struct for the ctrl.Manager. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerManagerConfiguration interface { runtime.Object @@ -38,6 +40,8 @@ type ControllerManagerConfiguration interface { // DeferredFileLoader is used to configure the decoder for loading controller // runtime component config types. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type DeferredFileLoader struct { ControllerManagerConfiguration path string @@ -52,6 +56,8 @@ type DeferredFileLoader struct { // Defaults: // * Path: "./config.yaml" // * Kind: GenericControllerManagerConfiguration +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. func File() *DeferredFileLoader { scheme := runtime.NewScheme() utilruntime.Must(v1alpha1.AddToScheme(scheme)) @@ -83,12 +89,6 @@ func (d *DeferredFileLoader) OfKind(obj ControllerManagerConfiguration) *Deferre return d } -// InjectScheme will configure the scheme to be used for decoding the file. -func (d *DeferredFileLoader) InjectScheme(scheme *runtime.Scheme) error { - d.scheme = scheme - return nil -} - // loadFile is used from the mutex.Once to load the file. func (d *DeferredFileLoader) loadFile() { if d.scheme == nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go new file mode 100644 index 000000000..b37dffaee --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go @@ -0,0 +1,49 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import "time" + +// Controller contains configuration options for a controller. +type Controller struct { + // GroupKindConcurrency is a map from a Kind to the number of concurrent reconciliation + // allowed for that controller. + // + // When a controller is registered within this manager using the builder utilities, + // users have to specify the type the controller reconciles in the For(...) call. + // If the object's kind passed matches one of the keys in this map, the concurrency + // for that controller is set to the number specified. + // + // The key is expected to be consistent in form with GroupKind.String(), + // e.g. ReplicaSet in apps group (regardless of version) would be `ReplicaSet.apps`. + GroupKindConcurrency map[string]int + + // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. + MaxConcurrentReconciles int + + // CacheSyncTimeout refers to the time limit set to wait for syncing caches. + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + // Defaults to the Controller.RecoverPanic setting from the Manager if unset. + RecoverPanic *bool + + // NeedLeaderElection indicates whether the controller needs to use leader election. + // Defaults to true, which means the controller will use leader election. + NeedLeaderElection *bool +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go index a169ec559..47a5a2f1d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/doc.go @@ -14,12 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package config contains functionality for interacting with ComponentConfig -// files -// -// # DeferredFileLoader -// -// This uses a deferred file decoding allowing you to chain your configuration -// setup. You can pass this into manager.Options#File and it will load your -// config. +// Package config contains functionality for interacting with +// configuration for controller-runtime components. package config diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go index 1e3adbafb..8fdf14d39 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/doc.go @@ -17,4 +17,6 @@ limitations under the License. // Package v1alpha1 provides the ControllerManagerConfiguration used for // configuring ctrl.Manager // +kubebuilder:object:generate=true +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. package v1alpha1 diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go index 9efdbc066..ca854bcf3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go @@ -23,12 +23,18 @@ import ( var ( // GroupVersion is group version used to register these objects. + // + // Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. GroupVersion = schema.GroupVersion{Group: "controller-runtime.sigs.k8s.io", Version: "v1alpha1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + // + // Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. + // + // Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. AddToScheme = SchemeBuilder.AddToScheme ) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go index f2226278c..52c8ab300 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go @@ -25,6 +25,8 @@ import ( ) // ControllerManagerConfigurationSpec defines the desired state of GenericControllerManagerConfiguration. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerManagerConfigurationSpec struct { // SyncPeriod determines the minimum frequency at which watched resources are // reconciled. A lower period will correct entropy more quickly, but reduce @@ -60,7 +62,7 @@ type ControllerManagerConfigurationSpec struct { // +optional Controller *ControllerConfigurationSpec `json:"controller,omitempty"` - // Metrics contains thw controller metrics configuration + // Metrics contains the controller metrics configuration // +optional Metrics ControllerMetrics `json:"metrics,omitempty"` @@ -75,6 +77,11 @@ type ControllerManagerConfigurationSpec struct { // ControllerConfigurationSpec defines the global configuration for // controllers registered with the manager. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. +// +// Deprecated: Controller global configuration can now be set at the manager level, +// using the manager.Options.Controller field. type ControllerConfigurationSpec struct { // GroupKindConcurrency is a map from a Kind to the number of concurrent reconciliation // allowed for that controller. @@ -101,6 +108,8 @@ type ControllerConfigurationSpec struct { } // ControllerMetrics defines the metrics configs. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerMetrics struct { // BindAddress is the TCP address that the controller should bind to // for serving prometheus metrics. @@ -110,6 +119,8 @@ type ControllerMetrics struct { } // ControllerHealth defines the health configs. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerHealth struct { // HealthProbeBindAddress is the TCP address that the controller should bind to // for serving health probes @@ -127,6 +138,8 @@ type ControllerHealth struct { } // ControllerWebhook defines the webhook server for the controller. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerWebhook struct { // Port is the port that the webhook server serves at. // It is used to set webhook.Server.Port. @@ -149,6 +162,8 @@ type ControllerWebhook struct { // +kubebuilder:object:root=true // ControllerManagerConfiguration is the Schema for the GenericControllerManagerConfigurations API. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. type ControllerManagerConfiguration struct { metav1.TypeMeta `json:",inline"` @@ -157,6 +172,8 @@ type ControllerManagerConfiguration struct { } // Complete returns the configuration for controller-runtime. +// +// Deprecated: The component config package has been deprecated and will be removed in a future release. Users should migrate to their own config implementation, please share feedback in https://github.com/kubernetes-sigs/controller-runtime/issues/895. func (c *ControllerManagerConfigurationSpec) Complete() (ControllerManagerConfigurationSpec, error) { return *c, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go index fe7f94fdc..6732b6f70 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go @@ -39,6 +39,18 @@ type Options struct { // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. MaxConcurrentReconciles int + // CacheSyncTimeout refers to the time limit set to wait for syncing caches. + // Defaults to 2 minutes if not set. + CacheSyncTimeout time.Duration + + // RecoverPanic indicates whether the panic caused by reconcile should be recovered. + // Defaults to the Controller.RecoverPanic setting from the Manager if unset. + RecoverPanic *bool + + // NeedLeaderElection indicates whether the controller needs to use leader election. + // Defaults to true, which means the controller will use leader election. + NeedLeaderElection *bool + // Reconciler reconciles an object Reconciler reconcile.Reconciler @@ -50,14 +62,6 @@ type Options struct { // LogConstructor is used to construct a logger used for this controller and passed // to each reconciliation via the context field. LogConstructor func(request *reconcile.Request) logr.Logger - - // CacheSyncTimeout refers to the time limit set to wait for syncing caches. - // Defaults to 2 minutes if not set. - CacheSyncTimeout time.Duration - - // RecoverPanic indicates whether the panic caused by reconcile should be recovered. - // Defaults to the Controller.RecoverPanic setting from the Manager if unset. - RecoverPanic *bool } // Controller implements a Kubernetes API. A Controller manages a work queue fed reconcile.Requests @@ -124,26 +128,33 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller } if options.MaxConcurrentReconciles <= 0 { - options.MaxConcurrentReconciles = 1 + if mgr.GetControllerOptions().MaxConcurrentReconciles > 0 { + options.MaxConcurrentReconciles = mgr.GetControllerOptions().MaxConcurrentReconciles + } else { + options.MaxConcurrentReconciles = 1 + } } if options.CacheSyncTimeout == 0 { - options.CacheSyncTimeout = 2 * time.Minute + if mgr.GetControllerOptions().CacheSyncTimeout != 0 { + options.CacheSyncTimeout = mgr.GetControllerOptions().CacheSyncTimeout + } else { + options.CacheSyncTimeout = 2 * time.Minute + } } if options.RateLimiter == nil { options.RateLimiter = workqueue.DefaultControllerRateLimiter() } - // Inject dependencies into Reconciler - if err := mgr.SetFields(options.Reconciler); err != nil { - return nil, err - } - if options.RecoverPanic == nil { options.RecoverPanic = mgr.GetControllerOptions().RecoverPanic } + if options.NeedLeaderElection == nil { + options.NeedLeaderElection = mgr.GetControllerOptions().NeedLeaderElection + } + // Create controller with dependencies set return &controller.Controller{ Do: options.Reconciler, @@ -152,10 +163,10 @@ func NewUnmanaged(name string, mgr manager.Manager, options Options) (Controller }, MaxConcurrentReconciles: options.MaxConcurrentReconciles, CacheSyncTimeout: options.CacheSyncTimeout, - SetFields: mgr.SetFields, Name: name, LogConstructor: options.LogConstructor, RecoverPanic: options.RecoverPanic, + LeaderElected: options.NeedLeaderElection, }, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go index e6d3a4eaa..c72b2e1eb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go @@ -17,6 +17,8 @@ limitations under the License. package handler import ( + "context" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" @@ -36,7 +38,7 @@ var _ EventHandler = &EnqueueRequestForObject{} type EnqueueRequestForObject struct{} // Create implements EventHandler. -func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { +func (e *EnqueueRequestForObject) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "CreateEvent received with no metadata", "event", evt) return @@ -48,7 +50,7 @@ func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.Rate } // Update implements EventHandler. -func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (e *EnqueueRequestForObject) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { switch { case evt.ObjectNew != nil: q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ @@ -66,7 +68,7 @@ func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.Rate } // Delete implements EventHandler. -func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (e *EnqueueRequestForObject) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "DeleteEvent received with no metadata", "event", evt) return @@ -78,7 +80,7 @@ func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.Rate } // Generic implements EventHandler. -func (e *EnqueueRequestForObject) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { +func (e *EnqueueRequestForObject) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "GenericEvent received with no metadata", "event", evt) return diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go index 17401b1fd..b55fdde6b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go @@ -17,16 +17,17 @@ limitations under the License. package handler import ( + "context" + "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) // MapFunc is the signature required for enqueueing requests from a generic function. // This type is usually used with EnqueueRequestsFromMapFunc when registering an event handler. -type MapFunc func(client.Object) []reconcile.Request +type MapFunc func(context.Context, client.Object) []reconcile.Request // EnqueueRequestsFromMapFunc enqueues Requests by running a transformation function that outputs a collection // of reconcile.Requests on each Event. The reconcile.Requests may be for an arbitrary set of objects @@ -52,32 +53,32 @@ type enqueueRequestsFromMapFunc struct { } // Create implements EventHandler. -func (e *enqueueRequestsFromMapFunc) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestsFromMapFunc) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} - e.mapAndEnqueue(q, evt.Object, reqs) + e.mapAndEnqueue(ctx, q, evt.Object, reqs) } // Update implements EventHandler. -func (e *enqueueRequestsFromMapFunc) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestsFromMapFunc) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} - e.mapAndEnqueue(q, evt.ObjectOld, reqs) - e.mapAndEnqueue(q, evt.ObjectNew, reqs) + e.mapAndEnqueue(ctx, q, evt.ObjectOld, reqs) + e.mapAndEnqueue(ctx, q, evt.ObjectNew, reqs) } // Delete implements EventHandler. -func (e *enqueueRequestsFromMapFunc) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestsFromMapFunc) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} - e.mapAndEnqueue(q, evt.Object, reqs) + e.mapAndEnqueue(ctx, q, evt.Object, reqs) } // Generic implements EventHandler. -func (e *enqueueRequestsFromMapFunc) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestsFromMapFunc) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} - e.mapAndEnqueue(q, evt.Object, reqs) + e.mapAndEnqueue(ctx, q, evt.Object, reqs) } -func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInterface, object client.Object, reqs map[reconcile.Request]empty) { - for _, req := range e.toRequests(object) { +func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(ctx context.Context, q workqueue.RateLimitingInterface, object client.Object, reqs map[reconcile.Request]empty) { + for _, req := range e.toRequests(ctx, object) { _, ok := reqs[req] if !ok { q.Add(req) @@ -85,13 +86,3 @@ func (e *enqueueRequestsFromMapFunc) mapAndEnqueue(q workqueue.RateLimitingInter } } } - -// EnqueueRequestsFromMapFunc can inject fields into the mapper. - -// InjectFunc implements inject.Injector. -func (e *enqueueRequestsFromMapFunc) InjectFunc(f inject.Func) error { - if f == nil { - return nil - } - return f(e.toRequests) -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go index 63699893f..02e7d756f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go @@ -17,6 +17,7 @@ limitations under the License. package handler import ( + "context" "fmt" "k8s.io/apimachinery/pkg/api/meta" @@ -25,15 +26,18 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) -var _ EventHandler = &EnqueueRequestForOwner{} +var _ EventHandler = &enqueueRequestForOwner{} -var log = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForOwner") +var log = logf.RuntimeLog.WithName("eventhandler").WithName("enqueueRequestForOwner") + +// OwnerOption modifies an EnqueueRequestForOwner EventHandler. +type OwnerOption func(e *enqueueRequestForOwner) // EnqueueRequestForOwner enqueues Requests for the Owners of an object. E.g. the object that created // the object that was the source of the Event. @@ -42,13 +46,34 @@ var log = logf.RuntimeLog.WithName("eventhandler").WithName("EnqueueRequestForOw // // - a source.Kind Source with Type of Pod. // -// - a handler.EnqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and IsController set to true. -type EnqueueRequestForOwner struct { - // OwnerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared. - OwnerType runtime.Object +// - a handler.enqueueRequestForOwner EventHandler with an OwnerType of ReplicaSet and OnlyControllerOwner set to true. +func EnqueueRequestForOwner(scheme *runtime.Scheme, mapper meta.RESTMapper, ownerType client.Object, opts ...OwnerOption) EventHandler { + e := &enqueueRequestForOwner{ + ownerType: ownerType, + mapper: mapper, + } + if err := e.parseOwnerTypeGroupKind(scheme); err != nil { + panic(err) + } + for _, opt := range opts { + opt(e) + } + return e +} - // IsController if set will only look at the first OwnerReference with Controller: true. - IsController bool +// OnlyControllerOwner if provided will only look at the first OwnerReference with Controller: true. +func OnlyControllerOwner() OwnerOption { + return func(e *enqueueRequestForOwner) { + e.isController = true + } +} + +type enqueueRequestForOwner struct { + // ownerType is the type of the Owner object to look for in OwnerReferences. Only Group and Kind are compared. + ownerType runtime.Object + + // isController if set will only look at the first OwnerReference with Controller: true. + isController bool // groupKind is the cached Group and Kind from OwnerType groupKind schema.GroupKind @@ -58,7 +83,7 @@ type EnqueueRequestForOwner struct { } // Create implements EventHandler. -func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner) Create(ctx context.Context, evt event.CreateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) for req := range reqs { @@ -67,7 +92,7 @@ func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateL } // Update implements EventHandler. -func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner) Update(ctx context.Context, evt event.UpdateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.ObjectOld, reqs) e.getOwnerReconcileRequest(evt.ObjectNew, reqs) @@ -77,7 +102,7 @@ func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateL } // Delete implements EventHandler. -func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner) Delete(ctx context.Context, evt event.DeleteEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) for req := range reqs { @@ -86,7 +111,7 @@ func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateL } // Generic implements EventHandler. -func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { +func (e *enqueueRequestForOwner) Generic(ctx context.Context, evt event.GenericEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) for req := range reqs { @@ -96,17 +121,17 @@ func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.Rat // parseOwnerTypeGroupKind parses the OwnerType into a Group and Kind and caches the result. Returns false // if the OwnerType could not be parsed using the scheme. -func (e *EnqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { +func (e *enqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { // Get the kinds of the type - kinds, _, err := scheme.ObjectKinds(e.OwnerType) + kinds, _, err := scheme.ObjectKinds(e.ownerType) if err != nil { - log.Error(err, "Could not get ObjectKinds for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType)) + log.Error(err, "Could not get ObjectKinds for OwnerType", "owner type", fmt.Sprintf("%T", e.ownerType)) return err } // Expect only 1 kind. If there is more than one kind this is probably an edge case such as ListOptions. if len(kinds) != 1 { - err := fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.OwnerType, kinds) - log.Error(nil, "expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType), "kinds", kinds) + err := fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.ownerType, kinds) + log.Error(nil, "expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.ownerType), "kinds", kinds) return err } // Cache the Group and Kind for the OwnerType @@ -116,7 +141,7 @@ func (e *EnqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) // getOwnerReconcileRequest looks at object and builds a map of reconcile.Request to reconcile // owners of object that match e.OwnerType. -func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, result map[reconcile.Request]empty) { +func (e *enqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, result map[reconcile.Request]empty) { // Iterate through the OwnerReferences looking for a match on Group and Kind against what was requested // by the user for _, ref := range e.getOwnersReferences(object) { @@ -138,7 +163,7 @@ func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, Name: ref.Name, }} - // if owner is not namespaced then we should set the namespace to the empty + // if owner is not namespaced then we should not set the namespace mapping, err := e.mapper.RESTMapping(e.groupKind, refGV.Version) if err != nil { log.Error(err, "Could not retrieve rest mapping", "kind", e.groupKind) @@ -153,16 +178,16 @@ func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, } } -// getOwnersReferences returns the OwnerReferences for an object as specified by the EnqueueRequestForOwner +// getOwnersReferences returns the OwnerReferences for an object as specified by the enqueueRequestForOwner // - if IsController is true: only take the Controller OwnerReference (if found) // - if IsController is false: take all OwnerReferences. -func (e *EnqueueRequestForOwner) getOwnersReferences(object metav1.Object) []metav1.OwnerReference { +func (e *enqueueRequestForOwner) getOwnersReferences(object metav1.Object) []metav1.OwnerReference { if object == nil { return nil } // If not filtered as Controller only, then use all the OwnerReferences - if !e.IsController { + if !e.isController { return object.GetOwnerReferences() } // If filtered to a Controller, only take the Controller OwnerReference @@ -172,18 +197,3 @@ func (e *EnqueueRequestForOwner) getOwnersReferences(object metav1.Object) []met // No Controller OwnerReference found return nil } - -var _ inject.Scheme = &EnqueueRequestForOwner{} - -// InjectScheme is called by the Controller to provide a singleton scheme to the EnqueueRequestForOwner. -func (e *EnqueueRequestForOwner) InjectScheme(s *runtime.Scheme) error { - return e.parseOwnerTypeGroupKind(s) -} - -var _ inject.Mapper = &EnqueueRequestForOwner{} - -// InjectMapper is called by the Controller to provide the rest mapper used by the manager. -func (e *EnqueueRequestForOwner) InjectMapper(m meta.RESTMapper) error { - e.mapper = m - return nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go index 8652d22d7..2f380f4fc 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go @@ -17,6 +17,8 @@ limitations under the License. package handler import ( + "context" + "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" ) @@ -41,17 +43,17 @@ import ( // Most users shouldn't need to implement their own EventHandler. type EventHandler interface { // Create is called in response to an create event - e.g. Pod Creation. - Create(event.CreateEvent, workqueue.RateLimitingInterface) + Create(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) // Update is called in response to an update event - e.g. Pod Updated. - Update(event.UpdateEvent, workqueue.RateLimitingInterface) + Update(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) // Delete is called in response to a delete event - e.g. Pod Deleted. - Delete(event.DeleteEvent, workqueue.RateLimitingInterface) + Delete(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) // Generic is called in response to an event of an unknown type or a synthetic event triggered as a cron or // external trigger request - e.g. reconcile Autoscaling, or a Webhook. - Generic(event.GenericEvent, workqueue.RateLimitingInterface) + Generic(context.Context, event.GenericEvent, workqueue.RateLimitingInterface) } var _ EventHandler = Funcs{} @@ -60,45 +62,45 @@ var _ EventHandler = Funcs{} type Funcs struct { // Create is called in response to an add event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - CreateFunc func(event.CreateEvent, workqueue.RateLimitingInterface) + CreateFunc func(context.Context, event.CreateEvent, workqueue.RateLimitingInterface) // Update is called in response to an update event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - UpdateFunc func(event.UpdateEvent, workqueue.RateLimitingInterface) + UpdateFunc func(context.Context, event.UpdateEvent, workqueue.RateLimitingInterface) // Delete is called in response to a delete event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - DeleteFunc func(event.DeleteEvent, workqueue.RateLimitingInterface) + DeleteFunc func(context.Context, event.DeleteEvent, workqueue.RateLimitingInterface) // GenericFunc is called in response to a generic event. Defaults to no-op. // RateLimitingInterface is used to enqueue reconcile.Requests. - GenericFunc func(event.GenericEvent, workqueue.RateLimitingInterface) + GenericFunc func(context.Context, event.GenericEvent, workqueue.RateLimitingInterface) } // Create implements EventHandler. -func (h Funcs) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { +func (h Funcs) Create(ctx context.Context, e event.CreateEvent, q workqueue.RateLimitingInterface) { if h.CreateFunc != nil { - h.CreateFunc(e, q) + h.CreateFunc(ctx, e, q) } } // Delete implements EventHandler. -func (h Funcs) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { +func (h Funcs) Delete(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { if h.DeleteFunc != nil { - h.DeleteFunc(e, q) + h.DeleteFunc(ctx, e, q) } } // Update implements EventHandler. -func (h Funcs) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { +func (h Funcs) Update(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { if h.UpdateFunc != nil { - h.UpdateFunc(e, q) + h.UpdateFunc(ctx, e, q) } } // Generic implements EventHandler. -func (h Funcs) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) { +func (h Funcs) Generic(ctx context.Context, e event.GenericEvent, q workqueue.RateLimitingInterface) { if h.GenericFunc != nil { - h.GenericFunc(e, q) + h.GenericFunc(ctx, e, q) } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go index f7734695c..83aba28cb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go @@ -33,12 +33,9 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/source" ) -var _ inject.Injector = &Controller{} - // Controller implements controller.Controller. type Controller struct { // Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required. @@ -61,10 +58,6 @@ type Controller struct { // the Queue for processing Queue workqueue.RateLimitingInterface - // SetFields is used to inject dependencies into other objects such as Sources, EventHandlers and Predicates - // Deprecated: the caller should handle injected fields itself. - SetFields func(i interface{}) error - // mu is used to synchronize Controller setup mu sync.Mutex @@ -93,6 +86,9 @@ type Controller struct { // RecoverPanic indicates whether the panic caused by reconcile should be recovered. RecoverPanic *bool + + // LeaderElected indicates whether the controller is leader elected or always running. + LeaderElected *bool } // watchDescription contains all the information necessary to start a watch. @@ -127,19 +123,6 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc c.mu.Lock() defer c.mu.Unlock() - // Inject Cache into arguments - if err := c.SetFields(src); err != nil { - return err - } - if err := c.SetFields(evthdler); err != nil { - return err - } - for _, pr := range prct { - if err := c.SetFields(pr); err != nil { - return err - } - } - // Controller hasn't started yet, store the watches locally and return. // // These watches are going to be held on the controller struct until the manager or user calls Start(...). @@ -152,6 +135,14 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc return src.Start(c.ctx, evthdler, c.Queue, prct...) } +// NeedLeaderElection implements the manager.LeaderElectionRunnable interface. +func (c *Controller) NeedLeaderElection() bool { + if c.LeaderElected == nil { + return true + } + return *c.LeaderElected +} + // Start implements controller.Controller. func (c *Controller) Start(ctx context.Context) error { // use an IIFE to get proper lock handling @@ -323,7 +314,11 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { result, err := c.Reconcile(ctx, req) switch { case err != nil: - c.Queue.AddRateLimited(req) + if errors.Is(err, reconcile.TerminalError(nil)) { + ctrlmetrics.TerminalReconcileErrors.WithLabelValues(c.Name).Inc() + } else { + c.Queue.AddRateLimited(req) + } ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Inc() ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Inc() log.Error(err, "Reconciler error") @@ -351,12 +346,6 @@ func (c *Controller) GetLogger() logr.Logger { return c.LogConstructor(nil) } -// InjectFunc implement SetFields.Injector. -func (c *Controller) InjectFunc(f inject.Func) error { - c.SetFields = f - return nil -} - // updateMetrics updates prometheus metrics within the controller. func (c *Controller) updateMetrics(reconcileTime time.Duration) { ctrlmetrics.ReconcileTime.WithLabelValues(c.Name).Observe(reconcileTime.Seconds()) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go index baec66927..b74ce062b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go @@ -39,6 +39,13 @@ var ( Help: "Total number of reconciliation errors per controller", }, []string{"controller"}) + // TerminalReconcileErrors is a prometheus counter metrics which holds the total + // number of terminal errors from the Reconciler. + TerminalReconcileErrors = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "controller_runtime_terminal_reconcile_errors_total", + Help: "Total number of terminal reconciliation errors per controller", + }, []string{"controller"}) + // ReconcileTime is a prometheus metric which keeps track of the duration // of reconciliations. ReconcileTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ @@ -67,6 +74,7 @@ func init() { metrics.Registry.MustRegister( ReconcileTotal, ReconcileErrors, + TerminalReconcileErrors, ReconcileTime, WorkerCount, ActiveWorkers, diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go deleted file mode 100644 index 7057f3dbe..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package objectutil - -import ( - "errors" - "fmt" - - apimeta "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -// FilterWithLabels returns a copy of the items in objs matching labelSel. -func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { - outItems := make([]runtime.Object, 0, len(objs)) - for _, obj := range objs { - meta, err := apimeta.Accessor(obj) - if err != nil { - return nil, err - } - if labelSel != nil { - lbls := labels.Set(meta.GetLabels()) - if !labelSel.Matches(lbls) { - continue - } - } - outItems = append(outItems, obj.DeepCopyObject()) - } - return outItems, nil -} - -// IsAPINamespaced returns true if the object is namespace scoped. -// For unstructured objects the gvk is found from the object itself. -func IsAPINamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { - gvk, err := apiutil.GVKForObject(obj, scheme) - if err != nil { - return false, err - } - - return IsAPINamespacedWithGVK(gvk, scheme, restmapper) -} - -// IsAPINamespacedWithGVK returns true if the object having the provided -// GVK is namespace scoped. -func IsAPINamespacedWithGVK(gk schema.GroupVersionKind, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { - restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind}) - if err != nil { - return false, fmt.Errorf("failed to get restmapping: %w", err) - } - - scope := restmapping.Scope.Name() - - if scope == "" { - return false, errors.New("scope cannot be identified, empty scope returned") - } - - if scope != apimeta.RESTScopeNameRoot { - return true, nil - } - return false, nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go index 9d8b2f074..21f0146ba 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go @@ -19,6 +19,7 @@ package recorder import ( "context" "fmt" + "net/http" "sync" "github.com/go-logr/logr" @@ -110,8 +111,12 @@ func (p *Provider) getBroadcaster() record.EventBroadcaster { } // NewProvider create a new Provider instance. -func NewProvider(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster EventBroadcasterProducer) (*Provider, error) { - corev1Client, err := corev1client.NewForConfig(config) +func NewProvider(config *rest.Config, httpClient *http.Client, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster EventBroadcasterProducer) (*Provider, error) { + if httpClient == nil { + panic("httpClient must not be nil") + } + + corev1Client, err := corev1client.NewForConfigAndClient(config, httpClient) if err != nil { return nil, fmt.Errorf("failed to init client: %w", err) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go similarity index 67% rename from vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go rename to vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go index f0cfe212e..ae8404a1f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/event_handler.go @@ -17,6 +17,7 @@ limitations under the License. package internal import ( + "context" "fmt" "k8s.io/client-go/tools/cache" @@ -31,17 +32,39 @@ import ( var log = logf.RuntimeLog.WithName("source").WithName("EventHandler") -var _ cache.ResourceEventHandler = EventHandler{} +// NewEventHandler creates a new EventHandler. +func NewEventHandler(ctx context.Context, queue workqueue.RateLimitingInterface, handler handler.EventHandler, predicates []predicate.Predicate) *EventHandler { + return &EventHandler{ + ctx: ctx, + handler: handler, + queue: queue, + predicates: predicates, + } +} // EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface. type EventHandler struct { - EventHandler handler.EventHandler - Queue workqueue.RateLimitingInterface - Predicates []predicate.Predicate + // ctx stores the context that created the event handler + // that is used to propagate cancellation signals to each handler function. + ctx context.Context + + handler handler.EventHandler + queue workqueue.RateLimitingInterface + predicates []predicate.Predicate +} + +// HandlerFuncs converts EventHandler to a ResourceEventHandlerFuncs +// TODO: switch to ResourceEventHandlerDetailedFuncs with client-go 1.27 +func (e *EventHandler) HandlerFuncs() cache.ResourceEventHandlerFuncs { + return cache.ResourceEventHandlerFuncs{ + AddFunc: e.OnAdd, + UpdateFunc: e.OnUpdate, + DeleteFunc: e.OnDelete, + } } // OnAdd creates CreateEvent and calls Create on EventHandler. -func (e EventHandler) OnAdd(obj interface{}) { +func (e *EventHandler) OnAdd(obj interface{}) { c := event.CreateEvent{} // Pull Object out of the object @@ -53,18 +76,20 @@ func (e EventHandler) OnAdd(obj interface{}) { return } - for _, p := range e.Predicates { + for _, p := range e.predicates { if !p.Create(c) { return } } // Invoke create handler - e.EventHandler.Create(c, e.Queue) + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Create(ctx, c, e.queue) } // OnUpdate creates UpdateEvent and calls Update on EventHandler. -func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { +func (e *EventHandler) OnUpdate(oldObj, newObj interface{}) { u := event.UpdateEvent{} if o, ok := oldObj.(client.Object); ok { @@ -84,18 +109,20 @@ func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { return } - for _, p := range e.Predicates { + for _, p := range e.predicates { if !p.Update(u) { return } } // Invoke update handler - e.EventHandler.Update(u, e.Queue) + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Update(ctx, u, e.queue) } // OnDelete creates DeleteEvent and calls Delete on EventHandler. -func (e EventHandler) OnDelete(obj interface{}) { +func (e *EventHandler) OnDelete(obj interface{}) { d := event.DeleteEvent{} // Deal with tombstone events by pulling the object out. Tombstone events wrap the object in a @@ -114,6 +141,9 @@ func (e EventHandler) OnDelete(obj interface{}) { return } + // Set DeleteStateUnknown to true + d.DeleteStateUnknown = true + // Set obj to the tombstone obj obj = tombstone.Obj } @@ -127,12 +157,14 @@ func (e EventHandler) OnDelete(obj interface{}) { return } - for _, p := range e.Predicates { + for _, p := range e.predicates { if !p.Delete(d) { return } } // Invoke delete handler - e.EventHandler.Delete(d, e.Queue) + ctx, cancel := context.WithCancel(e.ctx) + defer cancel() + e.handler.Delete(ctx, d, e.queue) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go new file mode 100644 index 000000000..b3a822712 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/source/kind.go @@ -0,0 +1,117 @@ +package internal + +import ( + "context" + "errors" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). +type Kind struct { + // Type is the type of object to watch. e.g. &v1.Pod{} + Type client.Object + + // Cache used to watch APIs + Cache cache.Cache + + // started may contain an error if one was encountered during startup. If its closed and does not + // contain an error, startup and syncing finished. + started chan error + startCancel func() +} + +// Start is internal and should be called only by the Controller to register an EventHandler with the Informer +// to enqueue reconcile.Requests. +func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, + prct ...predicate.Predicate) error { + if ks.Type == nil { + return fmt.Errorf("must create Kind with a non-nil object") + } + if ks.Cache == nil { + return fmt.Errorf("must create Kind with a non-nil cache") + } + + // cache.GetInformer will block until its context is cancelled if the cache was already started and it can not + // sync that informer (most commonly due to RBAC issues). + ctx, ks.startCancel = context.WithCancel(ctx) + ks.started = make(chan error) + go func() { + var ( + i cache.Informer + lastErr error + ) + + // Tries to get an informer until it returns true, + // an error or the specified context is cancelled or expired. + if err := wait.PollUntilContextCancel(ctx, 10*time.Second, true, func(ctx context.Context) (bool, error) { + // Lookup the Informer from the Cache and add an EventHandler which populates the Queue + i, lastErr = ks.Cache.GetInformer(ctx, ks.Type) + if lastErr != nil { + kindMatchErr := &meta.NoKindMatchError{} + switch { + case errors.As(lastErr, &kindMatchErr): + log.Error(lastErr, "if kind is a CRD, it should be installed before calling Start", + "kind", kindMatchErr.GroupKind) + case runtime.IsNotRegisteredError(lastErr): + log.Error(lastErr, "kind must be registered to the Scheme") + default: + log.Error(lastErr, "failed to get informer from cache") + } + return false, nil // Retry. + } + return true, nil + }); err != nil { + if lastErr != nil { + ks.started <- fmt.Errorf("failed to get informer from cache: %w", lastErr) + return + } + ks.started <- err + return + } + + _, err := i.AddEventHandler(NewEventHandler(ctx, queue, handler, prct).HandlerFuncs()) + if err != nil { + ks.started <- err + return + } + if !ks.Cache.WaitForCacheSync(ctx) { + // Would be great to return something more informative here + ks.started <- errors.New("cache did not sync") + } + close(ks.started) + }() + + return nil +} + +func (ks *Kind) String() string { + if ks.Type != nil { + return fmt.Sprintf("kind source: %T", ks.Type) + } + return "kind source: unknown type" +} + +// WaitForSync implements SyncingSource to allow controllers to wait with starting +// workers until the cache is synced. +func (ks *Kind) WaitForSync(ctx context.Context) error { + select { + case err := <-ks.started: + return err + case <-ctx.Done(): + ks.startCancel() + if errors.Is(ctx.Err(), context.Canceled) { + return nil + } + return fmt.Errorf("timed out waiting for cache to be synced for Kind %T", ks.Type) + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go index c82447d91..c27b4305f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go @@ -25,7 +25,7 @@ import ( // loggerPromise knows how to populate a concrete logr.Logger // with options, given an actual base logger later on down the line. type loggerPromise struct { - logger *DelegatingLogSink + logger *delegatingLogSink childPromises []*loggerPromise promisesLock sync.Mutex @@ -33,7 +33,7 @@ type loggerPromise struct { tags []interface{} } -func (p *loggerPromise) WithName(l *DelegatingLogSink, name string) *loggerPromise { +func (p *loggerPromise) WithName(l *delegatingLogSink, name string) *loggerPromise { res := &loggerPromise{ logger: l, name: &name, @@ -47,7 +47,7 @@ func (p *loggerPromise) WithName(l *DelegatingLogSink, name string) *loggerPromi } // WithValues provides a new Logger with the tags appended. -func (p *loggerPromise) WithValues(l *DelegatingLogSink, tags ...interface{}) *loggerPromise { +func (p *loggerPromise) WithValues(l *delegatingLogSink, tags ...interface{}) *loggerPromise { res := &loggerPromise{ logger: l, tags: tags, @@ -84,12 +84,12 @@ func (p *loggerPromise) Fulfill(parentLogSink logr.LogSink) { } } -// DelegatingLogSink is a logsink that delegates to another logr.LogSink. +// delegatingLogSink is a logsink that delegates to another logr.LogSink. // If the underlying promise is not nil, it registers calls to sub-loggers with // the logging factory to be populated later, and returns a new delegating // logger. It expects to have *some* logr.Logger set at all times (generally // a no-op logger before the promises are fulfilled). -type DelegatingLogSink struct { +type delegatingLogSink struct { lock sync.RWMutex logger logr.LogSink promise *loggerPromise @@ -97,7 +97,8 @@ type DelegatingLogSink struct { } // Init implements logr.LogSink. -func (l *DelegatingLogSink) Init(info logr.RuntimeInfo) { +func (l *delegatingLogSink) Init(info logr.RuntimeInfo) { + eventuallyFulfillRoot() l.lock.Lock() defer l.lock.Unlock() l.info = info @@ -106,7 +107,8 @@ func (l *DelegatingLogSink) Init(info logr.RuntimeInfo) { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info // logs. -func (l *DelegatingLogSink) Enabled(level int) bool { +func (l *delegatingLogSink) Enabled(level int) bool { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() return l.logger.Enabled(level) @@ -118,7 +120,8 @@ func (l *DelegatingLogSink) Enabled(level int) bool { // the log line. The key/value pairs can then be used to add additional // variable information. The key/value pairs should alternate string // keys and arbitrary values. -func (l *DelegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) { +func (l *delegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() l.logger.Info(level, msg, keysAndValues...) @@ -132,14 +135,16 @@ func (l *DelegatingLogSink) Info(level int, msg string, keysAndValues ...interfa // The msg field should be used to add context to any underlying error, // while the err field should be used to attach the actual error that // triggered this log line, if present. -func (l *DelegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) { +func (l *delegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() l.logger.Error(err, msg, keysAndValues...) } // WithName provides a new Logger with the name appended. -func (l *DelegatingLogSink) WithName(name string) logr.LogSink { +func (l *delegatingLogSink) WithName(name string) logr.LogSink { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() @@ -151,7 +156,7 @@ func (l *DelegatingLogSink) WithName(name string) logr.LogSink { return sink } - res := &DelegatingLogSink{logger: l.logger} + res := &delegatingLogSink{logger: l.logger} promise := l.promise.WithName(res, name) res.promise = promise @@ -159,7 +164,8 @@ func (l *DelegatingLogSink) WithName(name string) logr.LogSink { } // WithValues provides a new Logger with the tags appended. -func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { +func (l *delegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { + eventuallyFulfillRoot() l.lock.RLock() defer l.lock.RUnlock() @@ -171,7 +177,7 @@ func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { return sink } - res := &DelegatingLogSink{logger: l.logger} + res := &delegatingLogSink{logger: l.logger} promise := l.promise.WithValues(res, tags...) res.promise = promise @@ -181,16 +187,16 @@ func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { // Fulfill switches the logger over to use the actual logger // provided, instead of the temporary initial one, if this method // has not been previously called. -func (l *DelegatingLogSink) Fulfill(actual logr.LogSink) { +func (l *delegatingLogSink) Fulfill(actual logr.LogSink) { if l.promise != nil { l.promise.Fulfill(actual) } } -// NewDelegatingLogSink constructs a new DelegatingLogSink which uses +// newDelegatingLogSink constructs a new DelegatingLogSink which uses // the given logger before its promise is fulfilled. -func NewDelegatingLogSink(initial logr.LogSink) *DelegatingLogSink { - l := &DelegatingLogSink{ +func newDelegatingLogSink(initial logr.LogSink) *delegatingLogSink { + l := &delegatingLogSink{ logger: initial, promise: &loggerPromise{promisesLock: sync.Mutex{}}, } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go index 082dce3ad..a79151c69 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go @@ -35,7 +35,10 @@ package log import ( "context" - "sync" + "fmt" + "os" + "runtime/debug" + "sync/atomic" "time" "github.com/go-logr/logr" @@ -43,35 +46,24 @@ import ( // SetLogger sets a concrete logging implementation for all deferred Loggers. func SetLogger(l logr.Logger) { - loggerWasSetLock.Lock() - defer loggerWasSetLock.Unlock() - - loggerWasSet = true - dlog.Fulfill(l.GetSink()) + logFullfilled.Store(true) + rootLog.Fulfill(l.GetSink()) } -// It is safe to assume that if this wasn't set within the first 30 seconds of a binaries -// lifetime, it will never get set. The DelegatingLogSink causes a high number of memory -// allocations when not given an actual Logger, so we set a NullLogSink to avoid that. -// -// We need to keep the DelegatingLogSink because we have various inits() that get a logger from -// here. They will always get executed before any code that imports controller-runtime -// has a chance to run and hence to set an actual logger. -func init() { - // Init is blocking, so start a new goroutine - go func() { - time.Sleep(30 * time.Second) - loggerWasSetLock.Lock() - defer loggerWasSetLock.Unlock() - if !loggerWasSet { - dlog.Fulfill(NullLogSink{}) +func eventuallyFulfillRoot() { + if logFullfilled.Load() { + return + } + if time.Since(rootLogCreated).Seconds() >= 30 { + if logFullfilled.CompareAndSwap(false, true) { + fmt.Fprintf(os.Stderr, "[controller-runtime] log.SetLogger(...) was never called, logs will not be displayed:\n%s", debug.Stack()) + SetLogger(logr.New(NullLogSink{})) } - }() + } } var ( - loggerWasSetLock sync.Mutex - loggerWasSet bool + logFullfilled atomic.Bool ) // Log is the base logger used by kubebuilder. It delegates @@ -80,8 +72,10 @@ var ( // the first 30 seconds of a binaries lifetime, it will get // set to a NullLogSink. var ( - dlog = NewDelegatingLogSink(NullLogSink{}) - Log = logr.New(dlog) + rootLog, rootLogCreated = func() (*delegatingLogSink, time.Time) { + return newDelegatingLogSink(NullLogSink{}), time.Now() + }() + Log = logr.New(rootLog) ) // FromContext returns a logger with predefined values from a context.Context. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index 5ccff8b78..f298229e5 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -18,11 +18,11 @@ package manager import ( "context" - "crypto/tls" "errors" "fmt" "net" "net/http" + "net/http/pprof" "sync" "sync/atomic" "time" @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/rest" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" @@ -41,12 +40,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/cluster" - "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/config" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder" "sigs.k8s.io/controller-runtime/pkg/metrics" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook" ) @@ -107,8 +105,11 @@ type controllerManager struct { // Healthz probe handler healthzHandler *healthz.Handler - // controllerOptions are the global controller options. - controllerOptions v1alpha1.ControllerConfigurationSpec + // pprofListener is used to serve pprof + pprofListener net.Listener + + // controllerConfig are the global controller options. + controllerConfig config.Controller // Logger is the logger that should be used by this manager. // If none is set, it defaults to log.Log global logger. @@ -128,18 +129,7 @@ type controllerManager struct { // election was configured. elected chan struct{} - // port is the port that the webhook server serves at. - port int - // host is the hostname that the webhook server binds to. - host string - // CertDir is the directory that contains the server key and certificate. - // if not set, webhook server would look up the server key and certificate in - // {TempDir}/k8s-webhook-server/serving-certs - certDir string - // tlsOpts is used to allow configuring the TLS config used for the webhook server. - tlsOpts []func(*tls.Config) - - webhookServer *webhook.Server + webhookServer webhook.Server // webhookServerOnce will be called in GetWebhookServer() to optionally initialize // webhookServer if unset, and Add() it to controllerManager. webhookServerOnce sync.Once @@ -191,31 +181,9 @@ func (cm *controllerManager) Add(r Runnable) error { } func (cm *controllerManager) add(r Runnable) error { - // Set dependencies on the object - if err := cm.SetFields(r); err != nil { - return err - } return cm.runnables.Add(r) } -// Deprecated: use the equivalent Options field to set a field. This method will be removed in v0.10. -func (cm *controllerManager) SetFields(i interface{}) error { - if err := cm.cluster.SetFields(i); err != nil { - return err - } - if _, err := inject.InjectorInto(cm.SetFields, i); err != nil { - return err - } - if _, err := inject.StopChannelInto(cm.internalProceduresStop, i); err != nil { - return err - } - if _, err := inject.LoggerInto(cm.logger, i); err != nil { - return err - } - - return nil -} - // AddMetricsExtraHandler adds extra handler served on path to the http server that serves metrics. func (cm *controllerManager) AddMetricsExtraHandler(path string, handler http.Handler) error { cm.Lock() @@ -272,6 +240,10 @@ func (cm *controllerManager) AddReadyzCheck(name string, check healthz.Checker) return nil } +func (cm *controllerManager) GetHTTPClient() *http.Client { + return cm.cluster.GetHTTPClient() +} + func (cm *controllerManager) GetConfig() *rest.Config { return cm.cluster.GetConfig() } @@ -304,15 +276,10 @@ func (cm *controllerManager) GetAPIReader() client.Reader { return cm.cluster.GetAPIReader() } -func (cm *controllerManager) GetWebhookServer() *webhook.Server { +func (cm *controllerManager) GetWebhookServer() webhook.Server { cm.webhookServerOnce.Do(func() { if cm.webhookServer == nil { - cm.webhookServer = &webhook.Server{ - Port: cm.port, - Host: cm.host, - CertDir: cm.certDir, - TLSOpts: cm.tlsOpts, - } + panic("webhook should not be nil") } if err := cm.Add(cm.webhookServer); err != nil { panic(fmt.Sprintf("unable to add webhook server to the controller manager: %s", err)) @@ -325,23 +292,29 @@ func (cm *controllerManager) GetLogger() logr.Logger { return cm.logger } -func (cm *controllerManager) GetControllerOptions() v1alpha1.ControllerConfigurationSpec { - return cm.controllerOptions +func (cm *controllerManager) GetControllerOptions() config.Controller { + return cm.controllerConfig } -func (cm *controllerManager) serveMetrics() { +func (cm *controllerManager) addMetricsServer() error { + mux := http.NewServeMux() + srv := httpserver.New(mux) + handler := promhttp.HandlerFor(metrics.Registry, promhttp.HandlerOpts{ ErrorHandling: promhttp.HTTPErrorOnError, }) // TODO(JoelSpeed): Use existing Kubernetes machinery for serving metrics - mux := http.NewServeMux() mux.Handle(defaultMetricsEndpoint, handler) for path, extraHandler := range cm.metricsExtraHandlers { mux.Handle(path, extraHandler) } - server := httpserver.New(mux) - go cm.httpServe("metrics", cm.logger.WithValues("path", defaultMetricsEndpoint), server, cm.metricsListener) + return cm.add(&server{ + Kind: "metrics", + Log: cm.logger.WithValues("path", defaultMetricsEndpoint), + Server: srv, + Listener: cm.metricsListener, + }) } func (cm *controllerManager) serveHealthProbes() { @@ -362,6 +335,24 @@ func (cm *controllerManager) serveHealthProbes() { go cm.httpServe("health probe", cm.logger, server, cm.healthProbeListener) } +func (cm *controllerManager) addPprofServer() error { + mux := http.NewServeMux() + srv := httpserver.New(mux) + + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + + return cm.add(&server{ + Kind: "pprof", + Log: cm.logger, + Server: srv, + Listener: cm.pprofListener, + }) +} + func (cm *controllerManager) httpServe(kind string, log logr.Logger, server *http.Server, ln net.Listener) { log = log.WithValues("kind", kind, "addr", ln.Addr()) @@ -451,7 +442,9 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { // (If we don't serve metrics for non-leaders, prometheus will still scrape // the pod but will get a connection refused). if cm.metricsListener != nil { - cm.serveMetrics() + if err := cm.addMetricsServer(); err != nil { + return fmt.Errorf("failed to add metrics server: %w", err) + } } // Serve health probes. @@ -459,6 +452,13 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { cm.serveHealthProbes() } + // Add pprof server + if cm.pprofListener != nil { + if err := cm.addPprofServer(); err != nil { + return fmt.Errorf("failed to add pprof server: %w", err) + } + } + // First start any webhook servers, which includes conversion, validation, and defaulting // webhooks that are registered. // @@ -466,22 +466,22 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { // between conversion webhooks and the cache sync (usually initial list) which causes the webhooks // to never start because no cache can be populated. if err := cm.runnables.Webhooks.Start(cm.internalCtx); err != nil { - if !errors.Is(err, wait.ErrWaitTimeout) { - return err + if err != nil { + return fmt.Errorf("failed to start webhooks: %w", err) } } // Start and wait for caches. if err := cm.runnables.Caches.Start(cm.internalCtx); err != nil { - if !errors.Is(err, wait.ErrWaitTimeout) { - return err + if err != nil { + return fmt.Errorf("failed to start caches: %w", err) } } // Start the non-leaderelection Runnables after the cache has synced. if err := cm.runnables.Others.Start(cm.internalCtx); err != nil { - if !errors.Is(err, wait.ErrWaitTimeout) { - return err + if err != nil { + return fmt.Errorf("failed to start other runnables: %w", err) } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go index 2facb1c91..7e65ef0c3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -33,6 +33,7 @@ import ( "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/cluster" @@ -44,7 +45,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/recorder" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook" ) @@ -55,8 +55,7 @@ type Manager interface { cluster.Cluster // Add will set requested dependencies on the component, and cause the component to be - // started when Start is called. Add will inject any dependencies for which the argument - // implements the inject interface - e.g. inject.Client. + // started when Start is called. // Depending on if a Runnable implements LeaderElectionRunnable interface, a Runnable can be run in either // non-leaderelection mode (always running) or leader election mode (managed by leader election if enabled). Add(Runnable) error @@ -88,13 +87,13 @@ type Manager interface { Start(ctx context.Context) error // GetWebhookServer returns a webhook.Server - GetWebhookServer() *webhook.Server + GetWebhookServer() webhook.Server // GetLogger returns this manager's logger. GetLogger() logr.Logger // GetControllerOptions returns controller global configuration options. - GetControllerOptions() v1alpha1.ControllerConfigurationSpec + GetControllerOptions() config.Controller } // Options are the arguments for creating a new Manager. @@ -102,10 +101,44 @@ type Options struct { // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources. // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better // to pass your own scheme in. See the documentation in pkg/scheme for more information. + // + // If set, the Scheme will be used to create the default Client and Cache. Scheme *runtime.Scheme - // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs - MapperProvider func(c *rest.Config) (meta.RESTMapper, error) + // MapperProvider provides the rest mapper used to map go types to Kubernetes APIs. + // + // If set, the RESTMapper returned by this function is used to create the RESTMapper + // used by the Client and Cache. + MapperProvider func(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) + + // Cache is the cache.Options that will be used to create the default Cache. + // By default, the cache will watch and list requested objects in all namespaces. + Cache cache.Options + + // NewCache is the function that will create the cache to be used + // by the manager. If not set this will use the default new cache function. + // + // When using a custom NewCache, the Cache options will be passed to the + // NewCache function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewCache if you know what you are doing. + NewCache cache.NewCacheFunc + + // Client is the client.Options that will be used to create the default Client. + // By default, the client will use the cache for reads and direct calls for writes. + Client client.Options + + // NewClient is the func that creates the client to be used by the manager. + // If not set this will create a Client backed by a Cache for read operations + // and a direct Client for write operations. + // + // When using a custom NewClient, the Client options will be passed to the + // NewClient function. + // + // NOTE: LOW LEVEL PRIMITIVE! + // Only use a custom NewClient if you know what you are doing. + NewClient client.NewClientFunc // SyncPeriod determines the minimum frequency at which watched resources are // reconciled. A lower period will correct entropy more quickly, but reduce @@ -132,6 +165,8 @@ type Options struct { // is "done" with an object, and would otherwise not requeue it, i.e., we // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`, // instead of `reconcile.Result{}`. + // + // Deprecated: Use Cache.SyncPeriod instead. SyncPeriod *time.Duration // Logger is the logger that should be used by this manager. @@ -217,6 +252,8 @@ type Options struct { // Note: If a namespace is specified, controllers can still Watch for a // cluster-scoped resource (e.g Node). For namespaced resources, the cache // will only hold objects from the desired namespace. + // + // Deprecated: Use Cache.Namespaces instead. Namespace string // MetricsBindAddress is the TCP address that the controller should bind to @@ -235,11 +272,22 @@ type Options struct { // Liveness probe endpoint name, defaults to "healthz" LivenessEndpointName string + // PprofBindAddress is the TCP address that the controller should bind to + // for serving pprof. + // It can be set to "" or "0" to disable the pprof serving. + // Since pprof may contain sensitive information, make sure to protect it + // before exposing it to public. + PprofBindAddress string + // Port is the port that the webhook server serves at. // It is used to set webhook.Server.Port if WebhookServer is not set. + // + // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. Port int // Host is the hostname that the webhook server binds to. // It is used to set webhook.Server.Host if WebhookServer is not set. + // + // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. Host string // CertDir is the directory that contains the server key and certificate. @@ -247,26 +295,19 @@ type Options struct { // {TempDir}/k8s-webhook-server/serving-certs. The server key and certificate // must be named tls.key and tls.crt, respectively. // It is used to set webhook.Server.CertDir if WebhookServer is not set. + // + // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. CertDir string // TLSOpts is used to allow configuring the TLS config used for the webhook server. + // + // Deprecated: Use WebhookServer instead. A WebhookServer can be created via webhook.NewServer. TLSOpts []func(*tls.Config) // WebhookServer is an externally configured webhook.Server. By default, // a Manager will create a default server using Port, Host, and CertDir; // if this is set, the Manager will use this server instead. - WebhookServer *webhook.Server - - // Functions to allow for a user to customize values that will be injected. - - // NewCache is the function that will create the cache to be used - // by the manager. If not set this will use the default new cache function. - NewCache cache.NewCacheFunc - - // NewClient is the func that creates the client to be used by the manager. - // If not set this will create the default DelegatingClient that will - // use the cache for reads and the client for writes. - NewClient cluster.NewClientFunc + WebhookServer webhook.Server // BaseContext is the function that provides Context values to Runnables // managed by the Manager. If a BaseContext function isn't provided, Runnables @@ -275,10 +316,14 @@ type Options struct { // ClientDisableCacheFor tells the client that, if any cache is used, to bypass it // for the given objects. + // + // Deprecated: Use Client.Cache.DisableCacheFor instead. ClientDisableCacheFor []client.Object // DryRunClient specifies whether the client should be configured to enforce // dryRun mode. + // + // Deprecated: Use Client.DryRun instead. DryRunClient bool // EventBroadcaster records Events emitted by the manager and sends them to the Kubernetes API @@ -297,7 +342,7 @@ type Options struct { // Controller contains global configuration options for controllers // registered within this manager. // +optional - Controller v1alpha1.ControllerConfigurationSpec + Controller config.Controller // makeBroadcaster allows deferring the creation of the broadcaster to // avoid leaking goroutines if we never call Start on this manager. It also @@ -306,10 +351,11 @@ type Options struct { makeBroadcaster intrec.EventBroadcasterProducer // Dependency injection for testing - newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) + newRecorderProvider func(config *rest.Config, httpClient *http.Client, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) newResourceLock func(config *rest.Config, recorderProvider recorder.Provider, options leaderelection.Options) (resourcelock.Interface, error) newMetricsListener func(addr string) (net.Listener, error) newHealthProbeListener func(addr string) (net.Listener, error) + newPprofListener func(addr string) (net.Listener, error) } // BaseContextFunc is a function used to provide a base Context to Runnables @@ -353,12 +399,14 @@ func New(config *rest.Config, options Options) (Manager, error) { clusterOptions.MapperProvider = options.MapperProvider clusterOptions.Logger = options.Logger clusterOptions.SyncPeriod = options.SyncPeriod - clusterOptions.Namespace = options.Namespace clusterOptions.NewCache = options.NewCache clusterOptions.NewClient = options.NewClient - clusterOptions.ClientDisableCacheFor = options.ClientDisableCacheFor - clusterOptions.DryRunClient = options.DryRunClient - clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck + clusterOptions.Cache = options.Cache + clusterOptions.Client = options.Client + clusterOptions.Namespace = options.Namespace //nolint:staticcheck + clusterOptions.ClientDisableCacheFor = options.ClientDisableCacheFor //nolint:staticcheck + clusterOptions.DryRunClient = options.DryRunClient //nolint:staticcheck + clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck }) if err != nil { return nil, err @@ -367,7 +415,7 @@ func New(config *rest.Config, options Options) (Manager, error) { // Create the recorder provider to inject event recorders for the components. // TODO(directxman12): the log for the event provider should have a context (name, tags, etc) specific // to the particular controller that it's being injected into, rather than a generic one like is here. - recorderProvider, err := options.newRecorderProvider(config, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) + recorderProvider, err := options.newRecorderProvider(config, cluster.GetHTTPClient(), cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) if err != nil { return nil, err } @@ -381,7 +429,7 @@ func New(config *rest.Config, options Options) (Manager, error) { leaderRecorderProvider = recorderProvider } else { leaderConfig = rest.CopyConfig(options.LeaderElectionConfig) - leaderRecorderProvider, err = options.newRecorderProvider(leaderConfig, cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) + leaderRecorderProvider, err = options.newRecorderProvider(leaderConfig, cluster.GetHTTPClient(), cluster.GetScheme(), options.Logger.WithName("events"), options.makeBroadcaster) if err != nil { return nil, err } @@ -419,6 +467,13 @@ func New(config *rest.Config, options Options) (Manager, error) { return nil, err } + // Create pprof listener. This will throw an error if the bind + // address is invalid or already in use. + pprofListener, err := options.newPprofListener(options.PprofBindAddress) + if err != nil { + return nil, fmt.Errorf("failed to new pprof listener: %w", err) + } + errChan := make(chan error) runnables := newRunnables(options.BaseContext, errChan) @@ -431,13 +486,9 @@ func New(config *rest.Config, options Options) (Manager, error) { resourceLock: resourceLock, metricsListener: metricsListener, metricsExtraHandlers: metricsExtraHandlers, - controllerOptions: options.Controller, + controllerConfig: options.Controller, logger: options.Logger, elected: make(chan struct{}), - port: options.Port, - host: options.Host, - certDir: options.CertDir, - tlsOpts: options.TLSOpts, webhookServer: options.WebhookServer, leaderElectionID: options.LeaderElectionID, leaseDuration: *options.LeaseDuration, @@ -446,6 +497,7 @@ func New(config *rest.Config, options Options) (Manager, error) { healthProbeListener: healthProbeListener, readinessEndpointName: options.ReadinessEndpointName, livenessEndpointName: options.LivenessEndpointName, + pprofListener: pprofListener, gracefulShutdownTimeout: *options.GracefulShutdownTimeout, internalProceduresStop: make(chan struct{}), leaderElectionStopped: make(chan struct{}), @@ -456,14 +508,14 @@ func New(config *rest.Config, options Options) (Manager, error) { // AndFrom will use a supplied type and convert to Options // any options already set on Options will be ignored, this is used to allow // cli flags to override anything specified in the config file. +// +// Deprecated: This function has been deprecated and will be removed in a future release, +// The Component Configuration package has been unmaintained for over a year and is no longer +// actively developed. Users should migrate to their own configuration format +// and configure Manager.Options directly. +// See https://github.com/kubernetes-sigs/controller-runtime/issues/895 +// for more information, feedback, and comments. func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, error) { - if inj, wantsScheme := loader.(inject.Scheme); wantsScheme { - err := inj.InjectScheme(o.Scheme) - if err != nil { - return o, err - } - } - newObj, err := loader.Complete() if err != nil { return o, err @@ -498,18 +550,23 @@ func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, if o.Port == 0 && newObj.Webhook.Port != nil { o.Port = *newObj.Webhook.Port } - if o.Host == "" && newObj.Webhook.Host != "" { o.Host = newObj.Webhook.Host } - if o.CertDir == "" && newObj.Webhook.CertDir != "" { o.CertDir = newObj.Webhook.CertDir } + if o.WebhookServer == nil { + o.WebhookServer = webhook.NewServer(webhook.Options{ + Port: o.Port, + Host: o.Host, + CertDir: o.CertDir, + }) + } if newObj.Controller != nil { - if o.Controller.CacheSyncTimeout == nil && newObj.Controller.CacheSyncTimeout != nil { - o.Controller.CacheSyncTimeout = newObj.Controller.CacheSyncTimeout + if o.Controller.CacheSyncTimeout == 0 && newObj.Controller.CacheSyncTimeout != nil { + o.Controller.CacheSyncTimeout = *newObj.Controller.CacheSyncTimeout } if len(o.Controller.GroupKindConcurrency) == 0 && len(newObj.Controller.GroupKindConcurrency) > 0 { @@ -521,6 +578,13 @@ func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, } // AndFromOrDie will use options.AndFrom() and will panic if there are errors. +// +// Deprecated: This function has been deprecated and will be removed in a future release, +// The Component Configuration package has been unmaintained for over a year and is no longer +// actively developed. Users should migrate to their own configuration format +// and configure Manager.Options directly. +// See https://github.com/kubernetes-sigs/controller-runtime/issues/895 +// for more information, feedback, and comments. func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Options { o, err := o.AndFrom(loader) if err != nil { @@ -579,6 +643,19 @@ func defaultHealthProbeListener(addr string) (net.Listener, error) { return ln, nil } +// defaultPprofListener creates the default pprof listener bound to the given address. +func defaultPprofListener(addr string) (net.Listener, error) { + if addr == "" || addr == "0" { + return nil, nil + } + + ln, err := net.Listen("tcp", addr) + if err != nil { + return nil, fmt.Errorf("error listening on %s: %w", addr, err) + } + return ln, nil +} + // defaultBaseContext is used as the BaseContext value in Options if one // has not already been set. func defaultBaseContext() context.Context { @@ -639,6 +716,10 @@ func setOptionsDefaults(options Options) Options { options.newHealthProbeListener = defaultHealthProbeListener } + if options.newPprofListener == nil { + options.newPprofListener = defaultPprofListener + } + if options.GracefulShutdownTimeout == nil { gracefulShutdownTimeout := defaultGracefulShutdownPeriod options.GracefulShutdownTimeout = &gracefulShutdownTimeout @@ -652,5 +733,14 @@ func setOptionsDefaults(options Options) Options { options.BaseContext = defaultBaseContext } + if options.WebhookServer == nil { + options.WebhookServer = webhook.NewServer(webhook.Options{ + Host: options.Host, + Port: options.Port, + CertDir: options.CertDir, + TLSOpts: options.TLSOpts, + }) + } + return options } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go index f7b91a209..549741e6e 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go @@ -56,7 +56,7 @@ func (r *runnables) Add(fn Runnable) error { return r.Caches.Add(fn, func(ctx context.Context) bool { return runnable.GetCache().WaitForCacheSync(ctx) }) - case *webhook.Server: + case webhook.Server: return r.Webhooks.Add(fn, nil) case LeaderElectionRunnable: if !runnable.NeedLeaderElection() { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go new file mode 100644 index 000000000..b6509f48f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go @@ -0,0 +1,61 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package manager + +import ( + "context" + "errors" + "net" + "net/http" + + "github.com/go-logr/logr" +) + +// server is a general purpose HTTP server Runnable for a manager +// to serve some internal handlers such as health probes, metrics and profiling. +type server struct { + Kind string + Log logr.Logger + Server *http.Server + Listener net.Listener +} + +func (s *server) Start(ctx context.Context) error { + log := s.Log.WithValues("kind", s.Kind, "addr", s.Listener.Addr()) + + serverShutdown := make(chan struct{}) + go func() { + <-ctx.Done() + log.Info("shutting down server") + if err := s.Server.Shutdown(context.Background()); err != nil { + log.Error(err, "error shutting down server") + } + close(serverShutdown) + }() + + log.Info("starting server") + if err := s.Server.Serve(s.Listener); err != nil && !errors.Is(err, http.ErrServerClosed) { + return err + } + + <-serverShutdown + return nil +} + +func (s *server) NeedLeaderElection() bool { + return false +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go index a8b43ea0a..ff28998c4 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go @@ -18,8 +18,6 @@ package metrics import ( "context" - "net/url" - "time" "github.com/prometheus/client_golang/prometheus" clientmetrics "k8s.io/client-go/tools/metrics" @@ -29,70 +27,9 @@ import ( // that client-go registers metrics. We copy the names and formats // from Kubernetes so that we match the core controllers. -// Metrics subsystem and all of the keys used by the rest client. -const ( - RestClientSubsystem = "rest_client" - LatencyKey = "request_latency_seconds" - ResultKey = "requests_total" -) - var ( // client metrics. - // RequestLatency reports the request latency in seconds per verb/URL. - // Deprecated: This metric is deprecated for removal in a future release: using the URL as a - // dimension results in cardinality explosion for some consumers. It was deprecated upstream - // in k8s v1.14 and hidden in v1.17 via https://github.com/kubernetes/kubernetes/pull/83836. - // It is not registered by default. To register: - // import ( - // clientmetrics "k8s.io/client-go/tools/metrics" - // clmetrics "sigs.k8s.io/controller-runtime/metrics" - // ) - // - // func init() { - // clmetrics.Registry.MustRegister(clmetrics.RequestLatency) - // clientmetrics.Register(clientmetrics.RegisterOpts{ - // RequestLatency: clmetrics.LatencyAdapter - // }) - // } - RequestLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Subsystem: RestClientSubsystem, - Name: LatencyKey, - Help: "Request latency in seconds. Broken down by verb and URL.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), - }, []string{"verb", "url"}) - - // requestLatency is a Prometheus Histogram metric type partitioned by - // "verb", and "host" labels. It is used for the rest client latency metrics. - requestLatency = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rest_client_request_duration_seconds", - Help: "Request latency in seconds. Broken down by verb, and host.", - Buckets: []float64{0.005, 0.025, 0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0, 15.0, 30.0, 60.0}, - }, - []string{"verb", "host"}, - ) - - requestSize = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rest_client_request_size_bytes", - Help: "Request size in bytes. Broken down by verb and host.", - // 64 bytes to 16MB - Buckets: []float64{64, 256, 512, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216}, - }, - []string{"verb", "host"}, - ) - - responseSize = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "rest_client_response_size_bytes", - Help: "Response size in bytes. Broken down by verb and host.", - // 64 bytes to 16MB - Buckets: []float64{64, 256, 512, 1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216}, - }, - []string{"verb", "host"}, - ) - requestResult = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "rest_client_requests_total", @@ -109,17 +46,11 @@ func init() { // registerClientMetrics sets up the client latency metrics from client-go. func registerClientMetrics() { // register the metrics with our registry - Registry.MustRegister(requestLatency) - Registry.MustRegister(requestSize) - Registry.MustRegister(responseSize) Registry.MustRegister(requestResult) // register the metrics with client-go clientmetrics.Register(clientmetrics.RegisterOpts{ - RequestLatency: &LatencyAdapter{metric: requestLatency}, - RequestSize: &sizeAdapter{metric: requestSize}, - ResponseSize: &sizeAdapter{metric: responseSize}, - RequestResult: &resultAdapter{metric: requestResult}, + RequestResult: &resultAdapter{metric: requestResult}, }) } @@ -131,24 +62,6 @@ func registerClientMetrics() { // copied (more-or-less directly) from k8s.io/kubernetes setup code // (which isn't anywhere in an easily-importable place). -// LatencyAdapter implements LatencyMetric. -type LatencyAdapter struct { - metric *prometheus.HistogramVec -} - -// Observe increments the request latency metric for the given verb/URL. -func (l *LatencyAdapter) Observe(_ context.Context, verb string, u url.URL, latency time.Duration) { - l.metric.WithLabelValues(verb, u.String()).Observe(latency.Seconds()) -} - -type sizeAdapter struct { - metric *prometheus.HistogramVec -} - -func (s *sizeAdapter) Observe(ctx context.Context, verb string, host string, size float64) { - s.metric.WithLabelValues(verb, host).Observe(size) -} - type resultAdapter struct { metric *prometheus.CounterVec } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go index 8b0f3634e..314635875 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go @@ -24,7 +24,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) var log = logf.RuntimeLog.WithName("predicate").WithName("eventFilters") @@ -242,15 +241,6 @@ type and struct { predicates []Predicate } -func (a and) InjectFunc(f inject.Func) error { - for _, p := range a.predicates { - if err := f(p); err != nil { - return err - } - } - return nil -} - func (a and) Create(e event.CreateEvent) bool { for _, p := range a.predicates { if !p.Create(e) { @@ -296,15 +286,6 @@ type or struct { predicates []Predicate } -func (o or) InjectFunc(f inject.Func) error { - for _, p := range o.predicates { - if err := f(p); err != nil { - return err - } - } - return nil -} - func (o or) Create(e event.CreateEvent) bool { for _, p := range o.predicates { if p.Create(e) { @@ -350,10 +331,6 @@ type not struct { predicate Predicate } -func (n not) InjectFunc(f inject.Func) error { - return f(n.predicate) -} - func (n not) Create(e event.CreateEvent) bool { return !n.predicate.Create(e) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go index 8285e2ca9..d51cfc34a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go @@ -18,6 +18,7 @@ package reconcile import ( "context" + "errors" "time" "k8s.io/apimachinery/pkg/types" @@ -100,3 +101,26 @@ var _ Reconciler = Func(nil) // Reconcile implements Reconciler. func (r Func) Reconcile(ctx context.Context, o Request) (Result, error) { return r(ctx, o) } + +// TerminalError is an error that will not be retried but still be logged +// and recorded in metrics. +func TerminalError(wrapped error) error { + return &terminalError{err: wrapped} +} + +type terminalError struct { + err error +} + +func (te *terminalError) Unwrap() error { + return te.err +} + +func (te *terminalError) Error() string { + return "terminal error: " + te.err.Error() +} + +func (te *terminalError) Is(target error) bool { + tp := &terminalError{} + return errors.As(target, &tp) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go deleted file mode 100644 index 17c60895f..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package inject defines interfaces and functions for propagating dependencies from a ControllerManager to -the components registered with it. Dependencies are propagated to Reconciler, Source, EventHandler and Predicate -objects which implement the Injectable interfaces. -*/ -package inject diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go deleted file mode 100644 index c8c56ba81..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package inject is used by a Manager to inject types into Sources, EventHandlers, Predicates, and Reconciles. -// Deprecated: Use manager.Options fields directly. This package will be removed in v0.10. -package inject - -import ( - "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/rest" - - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// Cache is used by the ControllerManager to inject Cache into Sources, EventHandlers, Predicates, and -// Reconciles. -type Cache interface { - InjectCache(cache cache.Cache) error -} - -// CacheInto will set informers on i and return the result if it implements Cache. Returns -// false if i does not implement Cache. -func CacheInto(c cache.Cache, i interface{}) (bool, error) { - if s, ok := i.(Cache); ok { - return true, s.InjectCache(c) - } - return false, nil -} - -// APIReader is used by the Manager to inject the APIReader into necessary types. -type APIReader interface { - InjectAPIReader(client.Reader) error -} - -// APIReaderInto will set APIReader on i and return the result if it implements APIReaderInto. -// Returns false if i does not implement APIReader. -func APIReaderInto(reader client.Reader, i interface{}) (bool, error) { - if s, ok := i.(APIReader); ok { - return true, s.InjectAPIReader(reader) - } - return false, nil -} - -// Config is used by the ControllerManager to inject Config into Sources, EventHandlers, Predicates, and -// Reconciles. -type Config interface { - InjectConfig(*rest.Config) error -} - -// ConfigInto will set config on i and return the result if it implements Config. Returns -// false if i does not implement Config. -func ConfigInto(config *rest.Config, i interface{}) (bool, error) { - if s, ok := i.(Config); ok { - return true, s.InjectConfig(config) - } - return false, nil -} - -// Client is used by the ControllerManager to inject client into Sources, EventHandlers, Predicates, and -// Reconciles. -type Client interface { - InjectClient(client.Client) error -} - -// ClientInto will set client on i and return the result if it implements Client. Returns -// false if i does not implement Client. -func ClientInto(client client.Client, i interface{}) (bool, error) { - if s, ok := i.(Client); ok { - return true, s.InjectClient(client) - } - return false, nil -} - -// Scheme is used by the ControllerManager to inject Scheme into Sources, EventHandlers, Predicates, and -// Reconciles. -type Scheme interface { - InjectScheme(scheme *runtime.Scheme) error -} - -// SchemeInto will set scheme and return the result on i if it implements Scheme. Returns -// false if i does not implement Scheme. -func SchemeInto(scheme *runtime.Scheme, i interface{}) (bool, error) { - if is, ok := i.(Scheme); ok { - return true, is.InjectScheme(scheme) - } - return false, nil -} - -// Stoppable is used by the ControllerManager to inject stop channel into Sources, -// EventHandlers, Predicates, and Reconciles. -type Stoppable interface { - InjectStopChannel(<-chan struct{}) error -} - -// StopChannelInto will set stop channel on i and return the result if it implements Stoppable. -// Returns false if i does not implement Stoppable. -func StopChannelInto(stop <-chan struct{}, i interface{}) (bool, error) { - if s, ok := i.(Stoppable); ok { - return true, s.InjectStopChannel(stop) - } - return false, nil -} - -// Mapper is used to inject the rest mapper to components that may need it. -type Mapper interface { - InjectMapper(meta.RESTMapper) error -} - -// MapperInto will set the rest mapper on i and return the result if it implements Mapper. -// Returns false if i does not implement Mapper. -func MapperInto(mapper meta.RESTMapper, i interface{}) (bool, error) { - if m, ok := i.(Mapper); ok { - return true, m.InjectMapper(mapper) - } - return false, nil -} - -// Func injects dependencies into i. -type Func func(i interface{}) error - -// Injector is used by the ControllerManager to inject Func into Controllers. -type Injector interface { - InjectFunc(f Func) error -} - -// InjectorInto will set f and return the result on i if it implements Injector. Returns -// false if i does not implement Injector. -func InjectorInto(f Func, i interface{}) (bool, error) { - if ii, ok := i.(Injector); ok { - return true, ii.InjectFunc(f) - } - return false, nil -} - -// Logger is used to inject Loggers into components that need them -// and don't otherwise have opinions. -type Logger interface { - InjectLogger(l logr.Logger) error -} - -// LoggerInto will set the logger on the given object if it implements inject.Logger, -// returning true if a InjectLogger was called, and false otherwise. -func LoggerInto(l logr.Logger, i interface{}) (bool, error) { - if injectable, wantsLogger := i.(Logger); wantsLogger { - return true, injectable.InjectLogger(l) - } - return false, nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go index 6b6756392..099c8d68f 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go @@ -18,28 +18,19 @@ package source import ( "context" - "errors" "fmt" "sync" - "time" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" - "sigs.k8s.io/controller-runtime/pkg/source/internal" + internal "sigs.k8s.io/controller-runtime/pkg/internal/source" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/predicate" ) -var log = logf.RuntimeLog.WithName("source") - const ( // defaultBufferSize is the default number of event notifications that can be buffered. defaultBufferSize = 1024 @@ -52,8 +43,7 @@ const ( // // * Use Channel for events originating outside the cluster (eh.g. GitHub Webhook callback, Polling external urls). // -// Users may build their own Source implementations. If their implementations implement any of the inject package -// interfaces, the dependencies will be injected by the Controller when Watch is called. +// Users may build their own Source implementations. type Source interface { // Start is internal and should be called only by the Controller to register an EventHandler with the Informer // to enqueue reconcile.Requests. @@ -67,144 +57,9 @@ type SyncingSource interface { WaitForSync(ctx context.Context) error } -// NewKindWithCache creates a Source without InjectCache, so that it is assured that the given cache is used -// and not overwritten. It can be used to watch objects in a different cluster by passing the cache -// from that other cluster. -func NewKindWithCache(object client.Object, cache cache.Cache) SyncingSource { - return &kindWithCache{kind: Kind{Type: object, cache: cache}} -} - -type kindWithCache struct { - kind Kind -} - -func (ks *kindWithCache) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, - prct ...predicate.Predicate) error { - return ks.kind.Start(ctx, handler, queue, prct...) -} - -func (ks *kindWithCache) String() string { - return ks.kind.String() -} - -func (ks *kindWithCache) WaitForSync(ctx context.Context) error { - return ks.kind.WaitForSync(ctx) -} - -// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). -type Kind struct { - // Type is the type of object to watch. e.g. &v1.Pod{} - Type client.Object - - // cache used to watch APIs - cache cache.Cache - - // started may contain an error if one was encountered during startup. If its closed and does not - // contain an error, startup and syncing finished. - started chan error - startCancel func() -} - -var _ SyncingSource = &Kind{} - -// Start is internal and should be called only by the Controller to register an EventHandler with the Informer -// to enqueue reconcile.Requests. -func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, - prct ...predicate.Predicate) error { - // Type should have been specified by the user. - if ks.Type == nil { - return fmt.Errorf("must specify Kind.Type") - } - - // cache should have been injected before Start was called - if ks.cache == nil { - return fmt.Errorf("must call CacheInto on Kind before calling Start") - } - - // cache.GetInformer will block until its context is cancelled if the cache was already started and it can not - // sync that informer (most commonly due to RBAC issues). - ctx, ks.startCancel = context.WithCancel(ctx) - ks.started = make(chan error) - go func() { - var ( - i cache.Informer - lastErr error - ) - - // Tries to get an informer until it returns true, - // an error or the specified context is cancelled or expired. - if err := wait.PollImmediateUntilWithContext(ctx, 10*time.Second, func(ctx context.Context) (bool, error) { - // Lookup the Informer from the Cache and add an EventHandler which populates the Queue - i, lastErr = ks.cache.GetInformer(ctx, ks.Type) - if lastErr != nil { - kindMatchErr := &meta.NoKindMatchError{} - switch { - case errors.As(lastErr, &kindMatchErr): - log.Error(lastErr, "if kind is a CRD, it should be installed before calling Start", - "kind", kindMatchErr.GroupKind) - case runtime.IsNotRegisteredError(lastErr): - log.Error(lastErr, "kind must be registered to the Scheme") - default: - log.Error(lastErr, "failed to get informer from cache") - } - return false, nil // Retry. - } - return true, nil - }); err != nil { - if lastErr != nil { - ks.started <- fmt.Errorf("failed to get informer from cache: %w", lastErr) - return - } - ks.started <- err - return - } - - _, err := i.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct}) - if err != nil { - ks.started <- err - return - } - if !ks.cache.WaitForCacheSync(ctx) { - // Would be great to return something more informative here - ks.started <- errors.New("cache did not sync") - } - close(ks.started) - }() - - return nil -} - -func (ks *Kind) String() string { - if ks.Type != nil { - return fmt.Sprintf("kind source: %T", ks.Type) - } - return "kind source: unknown type" -} - -// WaitForSync implements SyncingSource to allow controllers to wait with starting -// workers until the cache is synced. -func (ks *Kind) WaitForSync(ctx context.Context) error { - select { - case err := <-ks.started: - return err - case <-ctx.Done(): - ks.startCancel() - if errors.Is(ctx.Err(), context.Canceled) { - return nil - } - return errors.New("timed out waiting for cache to be synced") - } -} - -var _ inject.Cache = &Kind{} - -// InjectCache is internal should be called only by the Controller. InjectCache is used to inject -// the Cache dependency initialized by the ControllerManager. -func (ks *Kind) InjectCache(c cache.Cache) error { - if ks.cache == nil { - ks.cache = c - } - return nil +// Kind creates a KindSource with the given cache provider. +func Kind(cache cache.Cache, object client.Object) SyncingSource { + return &internal.Kind{Type: object, Cache: cache} } var _ Source = &Channel{} @@ -219,9 +74,6 @@ type Channel struct { // Source is the source channel to fetch GenericEvents Source <-chan event.GenericEvent - // stop is to end ongoing goroutine, and close the channels - stop <-chan struct{} - // dest is the destination channels of the added event handlers dest []chan event.GenericEvent @@ -237,18 +89,6 @@ func (cs *Channel) String() string { return fmt.Sprintf("channel source: %p", cs) } -var _ inject.Stoppable = &Channel{} - -// InjectStopChannel is internal should be called only by the Controller. -// It is used to inject the stop channel initialized by the ControllerManager. -func (cs *Channel) InjectStopChannel(stop <-chan struct{}) error { - if cs.stop == nil { - cs.stop = stop - } - - return nil -} - // Start implements Source and should only be called by the Controller. func (cs *Channel) Start( ctx context.Context, @@ -260,11 +100,6 @@ func (cs *Channel) Start( return fmt.Errorf("must specify Channel.Source") } - // stop should have been injected before Start was called - if cs.stop == nil { - return fmt.Errorf("must call InjectStop on Channel before calling Start") - } - // use default value if DestBufferSize not specified if cs.DestBufferSize == 0 { cs.DestBufferSize = defaultBufferSize @@ -292,7 +127,11 @@ func (cs *Channel) Start( } if shouldHandle { - handler.Generic(evt, queue) + func() { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + handler.Generic(ctx, evt, queue) + }() } } }() @@ -359,7 +198,7 @@ func (is *Informer) Start(ctx context.Context, handler handler.EventHandler, que return fmt.Errorf("must specify Informer.Informer") } - _, err := is.Informer.AddEventHandler(internal.EventHandler{Queue: queue, EventHandler: handler, Predicates: prct}) + _, err := is.Informer.AddEventHandler(internal.NewEventHandler(ctx, queue, handler, prct).HandlerFuncs()) if err != nil { return err } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go index c7cb71b75..f14f130f7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go @@ -19,7 +19,6 @@ package admission import ( "fmt" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/json" @@ -32,8 +31,11 @@ type Decoder struct { } // NewDecoder creates a Decoder given the runtime.Scheme. -func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { - return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil +func NewDecoder(scheme *runtime.Scheme) *Decoder { + if scheme == nil { + panic("scheme should never be nil") + } + return &Decoder{codecs: serializer.NewCodecFactory(scheme)} } // Decode decodes the inlined object in the AdmissionRequest into the passed-in runtime.Object. @@ -62,9 +64,13 @@ func (d *Decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) er if len(rawObj.Raw) == 0 { return fmt.Errorf("there is no content to decode") } - if unstructuredInto, isUnstructured := into.(*unstructured.Unstructured); isUnstructured { + if unstructuredInto, isUnstructured := into.(runtime.Unstructured); isUnstructured { // unmarshal into unstructured's underlying object to avoid calling the decoder - return json.Unmarshal(rawObj.Raw, &unstructuredInto.Object) + var object map[string]interface{} + if err := json.Unmarshal(rawObj.Raw, &object); err != nil { + return err + } + unstructuredInto.SetUnstructuredContent(object) } deserializer := d.codecs.UniversalDeserializer() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go index e4e0778f5..a3b720716 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go @@ -33,9 +33,9 @@ type Defaulter interface { } // DefaultingWebhookFor creates a new Webhook for Defaulting the provided type. -func DefaultingWebhookFor(defaulter Defaulter) *Webhook { +func DefaultingWebhookFor(scheme *runtime.Scheme, defaulter Defaulter) *Webhook { return &Webhook{ - Handler: &mutatingHandler{defaulter: defaulter}, + Handler: &mutatingHandler{defaulter: defaulter, decoder: NewDecoder(scheme)}, } } @@ -44,16 +44,11 @@ type mutatingHandler struct { decoder *Decoder } -var _ DecoderInjector = &mutatingHandler{} - -// InjectDecoder injects the decoder into a mutatingHandler. -func (h *mutatingHandler) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - // Handle handles admission requests. func (h *mutatingHandler) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } if h.defaulter == nil { panic("defaulter should never be nil") } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go index 700798424..5f697e7dc 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter_custom.go @@ -34,9 +34,9 @@ type CustomDefaulter interface { } // WithCustomDefaulter creates a new Webhook for a CustomDefaulter interface. -func WithCustomDefaulter(obj runtime.Object, defaulter CustomDefaulter) *Webhook { +func WithCustomDefaulter(scheme *runtime.Scheme, obj runtime.Object, defaulter CustomDefaulter) *Webhook { return &Webhook{ - Handler: &defaulterForType{object: obj, defaulter: defaulter}, + Handler: &defaulterForType{object: obj, defaulter: defaulter, decoder: NewDecoder(scheme)}, } } @@ -46,15 +46,11 @@ type defaulterForType struct { decoder *Decoder } -var _ DecoderInjector = &defaulterForType{} - -func (h *defaulterForType) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - // Handle handles admission requests. func (h *defaulterForType) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } if h.defaulter == nil { panic("defaulter should never be nil") } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go index 0b274dd02..8dc0cbec6 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go @@ -20,9 +20,3 @@ Package admission provides implementation for admission webhook and methods to i See examples/mutatingwebhook.go and examples/validatingwebhook.go for examples of admission webhooks. */ package admission - -import ( - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" -) - -var log = logf.RuntimeLog.WithName("admission") diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go index 066cc4225..84ab5e75a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go @@ -52,7 +52,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { var reviewResponse Response if r.Body == nil { err = errors.New("request body is empty") - wh.log.Error(err, "bad request") + wh.getLogger(nil).Error(err, "bad request") reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return @@ -60,7 +60,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() if body, err = io.ReadAll(r.Body); err != nil { - wh.log.Error(err, "unable to read the body from the incoming request") + wh.getLogger(nil).Error(err, "unable to read the body from the incoming request") reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return @@ -69,7 +69,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { // verify the content type is accurate if contentType := r.Header.Get("Content-Type"); contentType != "application/json" { err = fmt.Errorf("contentType=%s, expected application/json", contentType) - wh.log.Error(err, "unable to process a request with an unknown content type", "content type", contentType) + wh.getLogger(nil).Error(err, "unable to process a request with unknown content type") reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return @@ -88,12 +88,12 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { ar.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("AdmissionReview")) _, actualAdmRevGVK, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar) if err != nil { - wh.log.Error(err, "unable to decode the request") + wh.getLogger(nil).Error(err, "unable to decode the request") reviewResponse = Errored(http.StatusBadRequest, err) wh.writeResponse(w, reviewResponse) return } - wh.log.V(1).Info("received request", "UID", req.UID, "kind", req.Kind, "resource", req.Resource) + wh.getLogger(&req).V(4).Info("received request") reviewResponse = wh.Handle(ctx, req) wh.writeResponseTyped(w, reviewResponse, actualAdmRevGVK) @@ -124,7 +124,7 @@ func (wh *Webhook) writeResponseTyped(w io.Writer, response Response, admRevGVK // writeAdmissionResponse writes ar to w. func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) { if err := json.NewEncoder(w).Encode(ar); err != nil { - wh.log.Error(err, "unable to encode and write the response") + wh.getLogger(nil).Error(err, "unable to encode and write the response") // Since the `ar v1.AdmissionReview` is a clear and legal object, // it should not have problem to be marshalled into bytes. // The error here is probably caused by the abnormal HTTP connection, @@ -132,15 +132,15 @@ func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) { // to avoid endless circular calling. serverError := Errored(http.StatusInternalServerError, err) if err = json.NewEncoder(w).Encode(v1.AdmissionReview{Response: &serverError.AdmissionResponse}); err != nil { - wh.log.Error(err, "still unable to encode and write the InternalServerError response") + wh.getLogger(nil).Error(err, "still unable to encode and write the InternalServerError response") } } else { res := ar.Response - if log := wh.log; log.V(1).Enabled() { + if log := wh.getLogger(nil); log.V(4).Enabled() { if res.Result != nil { - log = log.WithValues("code", res.Result.Code, "reason", res.Result.Reason) + log = log.WithValues("code", res.Result.Code, "reason", res.Result.Reason, "message", res.Result.Message) } - log.V(1).Info("wrote response", "UID", res.UID, "allowed", res.Allowed) + log.V(4).Info("wrote response", "requestID", res.UID, "allowed", res.Allowed) } } } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go deleted file mode 100644 index d5af0d598..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/inject.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package admission - -// DecoderInjector is used by the ControllerManager to inject decoder into webhook handlers. -type DecoderInjector interface { - InjectDecoder(*Decoder) error -} - -// InjectDecoderInto will set decoder on i and return the result if it implements Decoder. Returns -// false if i does not implement Decoder. -func InjectDecoderInto(decoder *Decoder, i interface{}) (bool, error) { - if s, ok := i.(DecoderInjector); ok { - return true, s.InjectDecoder(decoder) - } - return false, nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go index 26900cf2e..2f7820d04 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go @@ -25,8 +25,6 @@ import ( jsonpatch "gomodules.xyz/jsonpatch/v2" admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) type multiMutating []Handler @@ -62,31 +60,6 @@ func (hs multiMutating) Handle(ctx context.Context, req Request) Response { } } -// InjectFunc injects the field setter into the handlers. -func (hs multiMutating) InjectFunc(f inject.Func) error { - // inject directly into the handlers. It would be more correct - // to do this in a sync.Once in Handle (since we don't have some - // other start/finalize-type method), but it's more efficient to - // do it here, presumably. - for _, handler := range hs { - if err := f(handler); err != nil { - return err - } - } - - return nil -} - -// InjectDecoder injects the decoder into the handlers. -func (hs multiMutating) InjectDecoder(d *Decoder) error { - for _, handler := range hs { - if _, err := InjectDecoderInto(d, handler); err != nil { - return err - } - } - return nil -} - // MultiMutatingHandler combines multiple mutating webhook handlers into a single // mutating webhook handler. Handlers are called in sequential order, and the first // `allowed: false` response may short-circuit the rest. Users must take care to @@ -120,28 +93,3 @@ func (hs multiValidating) Handle(ctx context.Context, req Request) Response { func MultiValidatingHandler(handlers ...Handler) Handler { return multiValidating(handlers) } - -// InjectFunc injects the field setter into the handlers. -func (hs multiValidating) InjectFunc(f inject.Func) error { - // inject directly into the handlers. It would be more correct - // to do this in a sync.Once in Handle (since we don't have some - // other start/finalize-type method), but it's more efficient to - // do it here, presumably. - for _, handler := range hs { - if err := f(handler); err != nil { - return err - } - } - - return nil -} - -// InjectDecoder injects the decoder into the handlers. -func (hs multiValidating) InjectDecoder(d *Decoder) error { - for _, handler := range hs { - if _, err := InjectDecoderInto(d, handler); err != nil { - return err - } - } - return nil -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go index 24ff1dee3..ec1c88c98 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go @@ -26,21 +26,21 @@ import ( // Allowed constructs a response indicating that the given operation // is allowed (without any patches). -func Allowed(reason string) Response { - return ValidationResponse(true, reason) +func Allowed(message string) Response { + return ValidationResponse(true, message) } // Denied constructs a response indicating that the given operation // is not allowed. -func Denied(reason string) Response { - return ValidationResponse(false, reason) +func Denied(message string) Response { + return ValidationResponse(false, message) } // Patched constructs a response indicating that the given operation is // allowed, and that the target object should be modified by the given // JSONPatch operations. -func Patched(reason string, patches ...jsonpatch.JsonPatchOperation) Response { - resp := Allowed(reason) +func Patched(message string, patches ...jsonpatch.JsonPatchOperation) Response { + resp := Allowed(message) resp.Patches = patches return resp @@ -60,21 +60,24 @@ func Errored(code int32, err error) Response { } // ValidationResponse returns a response for admitting a request. -func ValidationResponse(allowed bool, reason string) Response { +func ValidationResponse(allowed bool, message string) Response { code := http.StatusForbidden + reason := metav1.StatusReasonForbidden if allowed { code = http.StatusOK + reason = "" } resp := Response{ AdmissionResponse: admissionv1.AdmissionResponse{ Allowed: allowed, Result: &metav1.Status{ - Code: int32(code), + Code: int32(code), + Reason: reason, }, }, } - if len(reason) > 0 { - resp.Result.Reason = metav1.StatusReason(reason) + if len(message) > 0 { + resp.Result.Message = message } return resp } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go index 4b27e75ed..00bda8a4c 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go @@ -18,7 +18,8 @@ package admission import ( "context" - goerrors "errors" + "errors" + "fmt" "net/http" v1 "k8s.io/api/admission/v1" @@ -26,18 +27,35 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// Warnings represents warning messages. +type Warnings []string + // Validator defines functions for validating an operation. +// The custom resource kind which implements this interface can validate itself. +// To validate the custom resource with another specific struct, use CustomValidator instead. type Validator interface { runtime.Object - ValidateCreate() error - ValidateUpdate(old runtime.Object) error - ValidateDelete() error + + // ValidateCreate validates the object on creation. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateCreate() (warnings Warnings, err error) + + // ValidateUpdate validates the object on update. The oldObj is the object before the update. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateUpdate(old runtime.Object) (warnings Warnings, err error) + + // ValidateDelete validates the object on deletion. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateDelete() (warnings Warnings, err error) } // ValidatingWebhookFor creates a new Webhook for validating the provided type. -func ValidatingWebhookFor(validator Validator) *Webhook { +func ValidatingWebhookFor(scheme *runtime.Scheme, validator Validator) *Webhook { return &Webhook{ - Handler: &validatingHandler{validator: validator}, + Handler: &validatingHandler{validator: validator, decoder: NewDecoder(scheme)}, } } @@ -46,42 +64,34 @@ type validatingHandler struct { decoder *Decoder } -var _ DecoderInjector = &validatingHandler{} - -// InjectDecoder injects the decoder into a validatingHandler. -func (h *validatingHandler) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - // Handle handles admission requests. func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } if h.validator == nil { panic("validator should never be nil") } - // Get the object in the request obj := h.validator.DeepCopyObject().(Validator) - if req.Operation == v1.Create { - err := h.decoder.Decode(req, obj) - if err != nil { + + var err error + var warnings []string + + switch req.Operation { + case v1.Connect: + // No validation for connect requests. + // TODO(vincepri): Should we validate CONNECT requests? In what cases? + case v1.Create: + if err = h.decoder.Decode(req, obj); err != nil { return Errored(http.StatusBadRequest, err) } - err = obj.ValidateCreate() - if err != nil { - var apiStatus apierrors.APIStatus - if goerrors.As(err, &apiStatus) { - return validationResponseFromStatus(false, apiStatus.Status()) - } - return Denied(err.Error()) - } - } - - if req.Operation == v1.Update { + warnings, err = obj.ValidateCreate() + case v1.Update: oldObj := obj.DeepCopyObject() - err := h.decoder.DecodeRaw(req.Object, obj) + err = h.decoder.DecodeRaw(req.Object, obj) if err != nil { return Errored(http.StatusBadRequest, err) } @@ -90,33 +100,26 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { return Errored(http.StatusBadRequest, err) } - err = obj.ValidateUpdate(oldObj) - if err != nil { - var apiStatus apierrors.APIStatus - if goerrors.As(err, &apiStatus) { - return validationResponseFromStatus(false, apiStatus.Status()) - } - return Denied(err.Error()) - } - } - - if req.Operation == v1.Delete { + warnings, err = obj.ValidateUpdate(oldObj) + case v1.Delete: // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 // OldObject contains the object being deleted - err := h.decoder.DecodeRaw(req.OldObject, obj) + err = h.decoder.DecodeRaw(req.OldObject, obj) if err != nil { return Errored(http.StatusBadRequest, err) } - err = obj.ValidateDelete() - if err != nil { - var apiStatus apierrors.APIStatus - if goerrors.As(err, &apiStatus) { - return validationResponseFromStatus(false, apiStatus.Status()) - } - return Denied(err.Error()) - } + warnings, err = obj.ValidateDelete() + default: + return Errored(http.StatusBadRequest, fmt.Errorf("unknown operation %q", req.Operation)) } - return Allowed("") + if err != nil { + var apiStatus apierrors.APIStatus + if errors.As(err, &apiStatus) { + return validationResponseFromStatus(false, apiStatus.Status()).WithWarnings(warnings...) + } + return Denied(err.Error()).WithWarnings(warnings...) + } + return Allowed("").WithWarnings(warnings...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go index 33252f113..e99fbd8a8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator_custom.go @@ -28,16 +28,29 @@ import ( ) // CustomValidator defines functions for validating an operation. +// The object to be validated is passed into methods as a parameter. type CustomValidator interface { - ValidateCreate(ctx context.Context, obj runtime.Object) error - ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error - ValidateDelete(ctx context.Context, obj runtime.Object) error + + // ValidateCreate validates the object on creation. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateCreate(ctx context.Context, obj runtime.Object) (warnings Warnings, err error) + + // ValidateUpdate validates the object on update. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warnings Warnings, err error) + + // ValidateDelete validates the object on deletion. + // The optional warnings will be added to the response as warning messages. + // Return an error if the object is invalid. + ValidateDelete(ctx context.Context, obj runtime.Object) (warnings Warnings, err error) } // WithCustomValidator creates a new Webhook for validating the provided type. -func WithCustomValidator(obj runtime.Object, validator CustomValidator) *Webhook { +func WithCustomValidator(scheme *runtime.Scheme, obj runtime.Object, validator CustomValidator) *Webhook { return &Webhook{ - Handler: &validatorForType{object: obj, validator: validator}, + Handler: &validatorForType{object: obj, validator: validator, decoder: NewDecoder(scheme)}, } } @@ -47,16 +60,11 @@ type validatorForType struct { decoder *Decoder } -var _ DecoderInjector = &validatorForType{} - -// InjectDecoder injects the decoder into a validatingHandler. -func (h *validatorForType) InjectDecoder(d *Decoder) error { - h.decoder = d - return nil -} - // Handle handles admission requests. func (h *validatorForType) Handle(ctx context.Context, req Request) Response { + if h.decoder == nil { + panic("decoder should never be nil") + } if h.validator == nil { panic("validator should never be nil") } @@ -70,13 +78,18 @@ func (h *validatorForType) Handle(ctx context.Context, req Request) Response { obj := h.object.DeepCopyObject() var err error + var warnings []string + switch req.Operation { + case v1.Connect: + // No validation for connect requests. + // TODO(vincepri): Should we validate CONNECT requests? In what cases? case v1.Create: if err := h.decoder.Decode(req, obj); err != nil { return Errored(http.StatusBadRequest, err) } - err = h.validator.ValidateCreate(ctx, obj) + warnings, err = h.validator.ValidateCreate(ctx, obj) case v1.Update: oldObj := obj.DeepCopyObject() if err := h.decoder.DecodeRaw(req.Object, obj); err != nil { @@ -86,7 +99,7 @@ func (h *validatorForType) Handle(ctx context.Context, req Request) Response { return Errored(http.StatusBadRequest, err) } - err = h.validator.ValidateUpdate(ctx, oldObj, obj) + warnings, err = h.validator.ValidateUpdate(ctx, oldObj, obj) case v1.Delete: // In reference to PR: https://github.com/kubernetes/kubernetes/pull/76346 // OldObject contains the object being deleted @@ -94,20 +107,20 @@ func (h *validatorForType) Handle(ctx context.Context, req Request) Response { return Errored(http.StatusBadRequest, err) } - err = h.validator.ValidateDelete(ctx, obj) + warnings, err = h.validator.ValidateDelete(ctx, obj) default: - return Errored(http.StatusBadRequest, fmt.Errorf("unknown operation request %q", req.Operation)) + return Errored(http.StatusBadRequest, fmt.Errorf("unknown operation %q", req.Operation)) } // Check the error message first. if err != nil { var apiStatus apierrors.APIStatus if errors.As(err, &apiStatus) { - return validationResponseFromStatus(false, apiStatus.Status()) + return validationResponseFromStatus(false, apiStatus.Status()).WithWarnings(warnings...) } - return Denied(err.Error()) + return Denied(err.Error()).WithWarnings(warnings...) } // Return allowed if everything succeeded. - return Allowed("") + return Allowed("").WithWarnings(warnings...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go index d10b97ddd..f1767f31b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go @@ -21,18 +21,17 @@ import ( "errors" "fmt" "net/http" + "sync" "github.com/go-logr/logr" - jsonpatch "gomodules.xyz/jsonpatch/v2" + "gomodules.xyz/jsonpatch/v2" admissionv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog/v2" - logf "sigs.k8s.io/controller-runtime/pkg/internal/log" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" ) @@ -131,16 +130,14 @@ type Webhook struct { // headers thus allowing you to read them from within the handler WithContextFunc func(context.Context, *http.Request) context.Context - // decoder is constructed on receiving a scheme and passed down to then handler - decoder *Decoder + // LogConstructor is used to construct a logger for logging messages during webhook calls + // based on the given base logger (which might carry more values like the webhook's path). + // Note: LogConstructor has to be able to handle nil requests as we are also using it + // outside the context of requests. + LogConstructor func(base logr.Logger, req *Request) logr.Logger - log logr.Logger -} - -// InjectLogger gets a handle to a logging instance, hopefully with more info about this particular webhook. -func (wh *Webhook) InjectLogger(l logr.Logger) error { - wh.log = l - return nil + setupLogOnce sync.Once + log logr.Logger } // WithRecoverPanic takes a bool flag which indicates whether the panic caused by webhook should be recovered. @@ -166,79 +163,47 @@ func (wh *Webhook) Handle(ctx context.Context, req Request) (response Response) }() } + reqLog := wh.getLogger(&req) + ctx = logf.IntoContext(ctx, reqLog) + resp := wh.Handler.Handle(ctx, req) if err := resp.Complete(req); err != nil { - wh.log.Error(err, "unable to encode response") + reqLog.Error(err, "unable to encode response") return Errored(http.StatusInternalServerError, errUnableToEncodeResponse) } return resp } -// InjectScheme injects a scheme into the webhook, in order to construct a Decoder. -func (wh *Webhook) InjectScheme(s *runtime.Scheme) error { - // TODO(directxman12): we should have a better way to pass this down - - var err error - wh.decoder, err = NewDecoder(s) - if err != nil { - return err - } - - // inject the decoder here too, just in case the order of calling this is not - // scheme first, then inject func - if wh.Handler != nil { - if _, err := InjectDecoderInto(wh.GetDecoder(), wh.Handler); err != nil { - return err +// getLogger constructs a logger from the injected log and LogConstructor. +func (wh *Webhook) getLogger(req *Request) logr.Logger { + wh.setupLogOnce.Do(func() { + if wh.log.GetSink() == nil { + wh.log = logf.Log.WithName("admission") } - } + }) - return nil + logConstructor := wh.LogConstructor + if logConstructor == nil { + logConstructor = DefaultLogConstructor + } + return logConstructor(wh.log, req) } -// GetDecoder returns a decoder to decode the objects embedded in admission requests. -// It may be nil if we haven't received a scheme to use to determine object types yet. -func (wh *Webhook) GetDecoder() *Decoder { - return wh.decoder -} - -// InjectFunc injects the field setter into the webhook. -func (wh *Webhook) InjectFunc(f inject.Func) error { - // inject directly into the handlers. It would be more correct - // to do this in a sync.Once in Handle (since we don't have some - // other start/finalize-type method), but it's more efficient to - // do it here, presumably. - - // also inject a decoder, and wrap this so that we get a setFields - // that injects a decoder (hopefully things don't ignore the duplicate - // InjectorInto call). - - var setFields inject.Func - setFields = func(target interface{}) error { - if err := f(target); err != nil { - return err - } - - if _, err := inject.InjectorInto(setFields, target); err != nil { - return err - } - - if _, err := InjectDecoderInto(wh.GetDecoder(), target); err != nil { - return err - } - - return nil +// DefaultLogConstructor adds some commonly interesting fields to the given logger. +func DefaultLogConstructor(base logr.Logger, req *Request) logr.Logger { + if req != nil { + return base.WithValues("object", klog.KRef(req.Namespace, req.Name), + "namespace", req.Namespace, "name", req.Name, + "resource", req.Resource, "user", req.UserInfo.Username, + "requestID", req.UID, + ) } - - return setFields(wh.Handler) + return base } // StandaloneOptions let you configure a StandaloneWebhook. type StandaloneOptions struct { - // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources - // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better - // idea to pass your own scheme in. See the documentation in pkg/scheme for more information. - Scheme *runtime.Scheme // Logger to be used by the webhook. // If none is set, it defaults to log.Log global logger. Logger logr.Logger @@ -258,19 +223,9 @@ type StandaloneOptions struct { // in your own server/mux. In order to be accessed by a kubernetes cluster, // all webhook servers require TLS. func StandaloneWebhook(hook *Webhook, opts StandaloneOptions) (http.Handler, error) { - if opts.Scheme == nil { - opts.Scheme = scheme.Scheme + if opts.Logger.GetSink() != nil { + hook.log = opts.Logger } - - if err := hook.InjectScheme(opts.Scheme); err != nil { - return nil, err - } - - if opts.Logger.GetSink() == nil { - opts.Logger = logf.RuntimeLog.WithName("webhook") - } - hook.log = opts.Logger - if opts.MetricsPath == "" { return hook, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go index 99c863264..23d5bf435 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go @@ -29,12 +29,9 @@ import ( "sync" "time" - "k8s.io/apimachinery/pkg/runtime" - kscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/certwatcher" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/internal/httpserver" - "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" ) @@ -49,7 +46,29 @@ var DefaultPort = 9443 // at the default locations (tls.crt and tls.key). If you do not // want to configure TLS (i.e for testing purposes) run an // admission.StandaloneWebhook in your own server. -type Server struct { +type Server interface { + // NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates + // the webhook server doesn't need leader election. + NeedLeaderElection() bool + + // Register marks the given webhook as being served at the given path. + // It panics if two hooks are registered on the same path. + Register(path string, hook http.Handler) + + // Start runs the server. + // It will install the webhook related resources depend on the server configuration. + Start(ctx context.Context) error + + // StartedChecker returns an healthz.Checker which is healthy after the + // server has been started. + StartedChecker() healthz.Checker + + // WebhookMux returns the servers WebhookMux + WebhookMux() *http.ServeMux +} + +// Options are all the available options for a webhook.Server +type Options struct { // Host is the address that the server will listen on. // Defaults to "" - all addresses. Host string @@ -63,9 +82,13 @@ type Server struct { CertDir string // CertName is the server certificate name. Defaults to tls.crt. + // + // Note: This option should only be set when TLSOpts does not override GetCertificate. CertName string // KeyName is the server key name. Defaults to tls.key. + // + // Note: This option should only be set when TLSOpts does not override GetCertificate. KeyName string // ClientCAName is the CA certificate name which server used to verify remote(client)'s certificate. @@ -82,14 +105,22 @@ type Server struct { // WebhookMux is the multiplexer that handles different webhooks. WebhookMux *http.ServeMux +} - // webhooks keep track of all registered webhooks for dependency injection, - // and to provide better panic messages on duplicate webhook registration. +// NewServer constructs a new Server from the provided options. +func NewServer(o Options) Server { + return &DefaultServer{ + Options: o, + } +} + +// DefaultServer is the default implementation used for Server. +type DefaultServer struct { + Options Options + + // webhooks keep track of all registered webhooks webhooks map[string]http.Handler - // setFields allows injecting dependencies from an external source - setFields inject.Func - // defaultingOnce ensures that the default fields are only ever set once. defaultingOnce sync.Once @@ -99,41 +130,49 @@ type Server struct { // mu protects access to the webhook map & setFields for Start, Register, etc mu sync.Mutex + + webhookMux *http.ServeMux } // setDefaults does defaulting for the Server. -func (s *Server) setDefaults() { +func (o *Options) setDefaults() { + if o.WebhookMux == nil { + o.WebhookMux = http.NewServeMux() + } + + if o.Port <= 0 { + o.Port = DefaultPort + } + + if len(o.CertDir) == 0 { + o.CertDir = filepath.Join(os.TempDir(), "k8s-webhook-server", "serving-certs") + } + + if len(o.CertName) == 0 { + o.CertName = "tls.crt" + } + + if len(o.KeyName) == 0 { + o.KeyName = "tls.key" + } +} + +func (s *DefaultServer) setDefaults() { s.webhooks = map[string]http.Handler{} - if s.WebhookMux == nil { - s.WebhookMux = http.NewServeMux() - } + s.Options.setDefaults() - if s.Port <= 0 { - s.Port = DefaultPort - } - - if len(s.CertDir) == 0 { - s.CertDir = filepath.Join(os.TempDir(), "k8s-webhook-server", "serving-certs") - } - - if len(s.CertName) == 0 { - s.CertName = "tls.crt" - } - - if len(s.KeyName) == 0 { - s.KeyName = "tls.key" - } + s.webhookMux = s.Options.WebhookMux } // NeedLeaderElection implements the LeaderElectionRunnable interface, which indicates // the webhook server doesn't need leader election. -func (*Server) NeedLeaderElection() bool { +func (*DefaultServer) NeedLeaderElection() bool { return false } // Register marks the given webhook as being served at the given path. // It panics if two hooks are registered on the same path. -func (s *Server) Register(path string, hook http.Handler) { +func (s *DefaultServer) Register(path string, hook http.Handler) { s.mu.Lock() defer s.mu.Unlock() @@ -141,51 +180,11 @@ func (s *Server) Register(path string, hook http.Handler) { if _, found := s.webhooks[path]; found { panic(fmt.Errorf("can't register duplicate path: %v", path)) } - // TODO(directxman12): call setfields if we've already started the server s.webhooks[path] = hook - s.WebhookMux.Handle(path, metrics.InstrumentedHook(path, hook)) + s.webhookMux.Handle(path, metrics.InstrumentedHook(path, hook)) regLog := log.WithValues("path", path) regLog.Info("Registering webhook") - - // we've already been "started", inject dependencies here. - // Otherwise, InjectFunc will do this for us later. - if s.setFields != nil { - if err := s.setFields(hook); err != nil { - // TODO(directxman12): swallowing this error isn't great, but we'd have to - // change the signature to fix that - regLog.Error(err, "unable to inject fields into webhook during registration") - } - - baseHookLog := log.WithName("webhooks") - - // NB(directxman12): we don't propagate this further by wrapping setFields because it's - // unclear if this is how we want to deal with log propagation. In this specific instance, - // we want to be able to pass a logger to webhooks because they don't know their own path. - if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", path), hook); err != nil { - regLog.Error(err, "unable to logger into webhook during registration") - } - } -} - -// StartStandalone runs a webhook server without -// a controller manager. -func (s *Server) StartStandalone(ctx context.Context, scheme *runtime.Scheme) error { - // Use the Kubernetes client-go scheme if none is specified - if scheme == nil { - scheme = kscheme.Scheme - } - - if err := s.InjectFunc(func(i interface{}) error { - if _, err := inject.SchemeInto(scheme, i); err != nil { - return err - } - return nil - }); err != nil { - return err - } - - return s.Start(ctx) } // tlsVersion converts from human-readable TLS version (for example "1.1") @@ -210,41 +209,49 @@ func tlsVersion(version string) (uint16, error) { // Start runs the server. // It will install the webhook related resources depend on the server configuration. -func (s *Server) Start(ctx context.Context) error { +func (s *DefaultServer) Start(ctx context.Context) error { s.defaultingOnce.Do(s.setDefaults) baseHookLog := log.WithName("webhooks") baseHookLog.Info("Starting webhook server") - certPath := filepath.Join(s.CertDir, s.CertName) - keyPath := filepath.Join(s.CertDir, s.KeyName) - - certWatcher, err := certwatcher.New(certPath, keyPath) - if err != nil { - return err - } - - go func() { - if err := certWatcher.Start(ctx); err != nil { - log.Error(err, "certificate watcher error") - } - }() - - tlsMinVersion, err := tlsVersion(s.TLSMinVersion) + tlsMinVersion, err := tlsVersion(s.Options.TLSMinVersion) if err != nil { return err } cfg := &tls.Config{ //nolint:gosec - NextProtos: []string{"h2"}, - GetCertificate: certWatcher.GetCertificate, - MinVersion: tlsMinVersion, + NextProtos: []string{"h2"}, + MinVersion: tlsMinVersion, + } + // fallback TLS config ready, will now mutate if passer wants full control over it + for _, op := range s.Options.TLSOpts { + op(cfg) } - // load CA to verify client certificate - if s.ClientCAName != "" { + if cfg.GetCertificate == nil { + certPath := filepath.Join(s.Options.CertDir, s.Options.CertName) + keyPath := filepath.Join(s.Options.CertDir, s.Options.KeyName) + + // Create the certificate watcher and + // set the config's GetCertificate on the TLSConfig + certWatcher, err := certwatcher.New(certPath, keyPath) + if err != nil { + return err + } + cfg.GetCertificate = certWatcher.GetCertificate + + go func() { + if err := certWatcher.Start(ctx); err != nil { + log.Error(err, "certificate watcher error") + } + }() + } + + // Load CA to verify client certificate, if configured. + if s.Options.ClientCAName != "" { certPool := x509.NewCertPool() - clientCABytes, err := os.ReadFile(filepath.Join(s.CertDir, s.ClientCAName)) + clientCABytes, err := os.ReadFile(filepath.Join(s.Options.CertDir, s.Options.ClientCAName)) if err != nil { return fmt.Errorf("failed to read client CA cert: %w", err) } @@ -258,27 +265,23 @@ func (s *Server) Start(ctx context.Context) error { cfg.ClientAuth = tls.RequireAndVerifyClientCert } - // fallback TLS config ready, will now mutate if passer wants full control over it - for _, op := range s.TLSOpts { - op(cfg) - } - - listener, err := tls.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), cfg) + listener, err := tls.Listen("tcp", net.JoinHostPort(s.Options.Host, strconv.Itoa(s.Options.Port)), cfg) if err != nil { return err } - log.Info("Serving webhook server", "host", s.Host, "port", s.Port) + log.Info("Serving webhook server", "host", s.Options.Host, "port", s.Options.Port) - srv := httpserver.New(s.WebhookMux) + srv := httpserver.New(s.webhookMux) idleConnsClosed := make(chan struct{}) go func() { <-ctx.Done() - log.Info("shutting down webhook server") + log.Info("Shutting down webhook server with timeout of 1 minute") - // TODO: use a context with reasonable timeout - if err := srv.Shutdown(context.Background()); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + if err := srv.Shutdown(ctx); err != nil { // Error from closing listeners, or context timeout log.Error(err, "error shutting down the HTTP server") } @@ -298,7 +301,7 @@ func (s *Server) Start(ctx context.Context) error { // StartedChecker returns an healthz.Checker which is healthy after the // server has been started. -func (s *Server) StartedChecker() healthz.Checker { +func (s *DefaultServer) StartedChecker() healthz.Checker { config := &tls.Config{ InsecureSkipVerify: true, //nolint:gosec // config is used to connect to our own webhook port. } @@ -311,7 +314,7 @@ func (s *Server) StartedChecker() healthz.Checker { } d := &net.Dialer{Timeout: 10 * time.Second} - conn, err := tls.DialWithDialer(d, "tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), config) + conn, err := tls.DialWithDialer(d, "tcp", net.JoinHostPort(s.Options.Host, strconv.Itoa(s.Options.Port)), config) if err != nil { return fmt.Errorf("webhook server is not reachable: %w", err) } @@ -324,23 +327,7 @@ func (s *Server) StartedChecker() healthz.Checker { } } -// InjectFunc injects the field setter into the server. -func (s *Server) InjectFunc(f inject.Func) error { - s.setFields = f - - // inject fields here that weren't injected in Register because we didn't have setFields yet. - baseHookLog := log.WithName("webhooks") - for hookPath, webhook := range s.webhooks { - if err := s.setFields(webhook); err != nil { - return err - } - - // NB(directxman12): we don't propagate this further by wrapping setFields because it's - // unclear if this is how we want to deal with log propagation. In this specific instance, - // we want to be able to pass a logger to webhooks because they don't know their own path. - if _, err := inject.LoggerInto(baseHookLog.WithValues("webhook", hookPath), webhook); err != nil { - return err - } - } - return nil +// WebhookMux returns the servers WebhookMux +func (s *DefaultServer) WebhookMux() *http.ServeMux { + return s.webhookMux }