mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: update all k8s packages to 0.27.2
Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
07b05616a0
commit
2551a0b05f
2
vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
generated
vendored
2
vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
generated
vendored
@ -214,7 +214,7 @@ func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate {
|
||||
return NewAggregate(result)
|
||||
}
|
||||
|
||||
// Reduce will return err or, if err is an Aggregate and only has one item,
|
||||
// Reduce will return err or nil, if err is an Aggregate and only has one item,
|
||||
// the first item in the aggregate.
|
||||
func Reduce(err error) error {
|
||||
if agg, ok := err.(Aggregate); ok && err != nil {
|
||||
|
2
vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
generated
vendored
2
vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
generated
vendored
@ -32,7 +32,7 @@ func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer {
|
||||
return &lengthDelimitedFrameWriter{w: w}
|
||||
}
|
||||
|
||||
// Write writes a single frame to the nested writer, prepending it with the length in
|
||||
// Write writes a single frame to the nested writer, prepending it with the length
|
||||
// in bytes of data (as a 4 byte, bigendian uint32).
|
||||
func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) {
|
||||
binary.BigEndian.PutUint32(w.h[:], uint32(len(data)))
|
||||
|
7018
vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml
generated
vendored
Normal file
7018
vendor/k8s.io/apimachinery/pkg/util/managedfields/endpoints.yaml
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
57
vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go
generated
vendored
Normal file
57
vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package managedfields
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/managedfields/internal"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// FieldManager updates the managed fields and merges applied
|
||||
// configurations.
|
||||
type FieldManager = internal.FieldManager
|
||||
|
||||
// NewDefaultFieldManager creates a new FieldManager that merges apply requests
|
||||
// and update managed fields for other types of requests.
|
||||
func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (*FieldManager, error) {
|
||||
f, err := internal.NewStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create field manager: %v", err)
|
||||
}
|
||||
return internal.NewDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil
|
||||
}
|
||||
|
||||
// NewDefaultCRDFieldManager creates a new FieldManager specifically for
|
||||
// CRDs. This allows for the possibility of fields which are not defined
|
||||
// in models, as well as having no models defined at all.
|
||||
func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ *FieldManager, err error) {
|
||||
f, err := internal.NewCRDStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create field manager: %v", err)
|
||||
}
|
||||
return internal.NewDefaultFieldManager(f, typeConverter, objectConverter, objectCreater, kind, subresource), nil
|
||||
}
|
||||
|
||||
func ValidateManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) error {
|
||||
_, err := internal.DecodeManagedFields(encodedManagedFields)
|
||||
return err
|
||||
}
|
60
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go
generated
vendored
Normal file
60
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/atmostevery.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AtMostEvery will never run the method more than once every specified
|
||||
// duration.
|
||||
type AtMostEvery struct {
|
||||
delay time.Duration
|
||||
lastCall time.Time
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// NewAtMostEvery creates a new AtMostEvery, that will run the method at
|
||||
// most every given duration.
|
||||
func NewAtMostEvery(delay time.Duration) *AtMostEvery {
|
||||
return &AtMostEvery{
|
||||
delay: delay,
|
||||
}
|
||||
}
|
||||
|
||||
// updateLastCall returns true if the lastCall time has been updated,
|
||||
// false if it was too early.
|
||||
func (s *AtMostEvery) updateLastCall() bool {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
if time.Since(s.lastCall) < s.delay {
|
||||
return false
|
||||
}
|
||||
s.lastCall = time.Now()
|
||||
return true
|
||||
}
|
||||
|
||||
// Do will run the method if enough time has passed, and return true.
|
||||
// Otherwise, it does nothing and returns false.
|
||||
func (s *AtMostEvery) Do(fn func()) bool {
|
||||
if !s.updateLastCall() {
|
||||
return false
|
||||
}
|
||||
fn()
|
||||
return true
|
||||
}
|
74
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go
generated
vendored
Normal file
74
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/buildmanagerinfo.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
type buildManagerInfoManager struct {
|
||||
fieldManager Manager
|
||||
groupVersion schema.GroupVersion
|
||||
subresource string
|
||||
}
|
||||
|
||||
var _ Manager = &buildManagerInfoManager{}
|
||||
|
||||
// NewBuildManagerInfoManager creates a new Manager that converts the manager name into a unique identifier
|
||||
// combining operation and version for update requests, and just operation for apply requests.
|
||||
func NewBuildManagerInfoManager(f Manager, gv schema.GroupVersion, subresource string) Manager {
|
||||
return &buildManagerInfoManager{
|
||||
fieldManager: f,
|
||||
groupVersion: gv,
|
||||
subresource: subresource,
|
||||
}
|
||||
}
|
||||
|
||||
// Update implements Manager.
|
||||
func (f *buildManagerInfoManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
|
||||
manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationUpdate)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err)
|
||||
}
|
||||
return f.fieldManager.Update(liveObj, newObj, managed, manager)
|
||||
}
|
||||
|
||||
// Apply implements Manager.
|
||||
func (f *buildManagerInfoManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) {
|
||||
manager, err := f.buildManagerInfo(manager, metav1.ManagedFieldsOperationApply)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to build manager identifier: %v", err)
|
||||
}
|
||||
return f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force)
|
||||
}
|
||||
|
||||
func (f *buildManagerInfoManager) buildManagerInfo(prefix string, operation metav1.ManagedFieldsOperationType) (string, error) {
|
||||
managerInfo := metav1.ManagedFieldsEntry{
|
||||
Manager: prefix,
|
||||
Operation: operation,
|
||||
APIVersion: f.groupVersion.String(),
|
||||
Subresource: f.subresource,
|
||||
}
|
||||
if managerInfo.Manager == "" {
|
||||
managerInfo.Manager = "unknown"
|
||||
}
|
||||
return BuildManagerIdentifier(&managerInfo)
|
||||
}
|
133
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go
generated
vendored
Normal file
133
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/capmanagers.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
type capManagersManager struct {
|
||||
fieldManager Manager
|
||||
maxUpdateManagers int
|
||||
oldUpdatesManagerName string
|
||||
}
|
||||
|
||||
var _ Manager = &capManagersManager{}
|
||||
|
||||
// NewCapManagersManager creates a new wrapped FieldManager which ensures that the number of managers from updates
|
||||
// does not exceed maxUpdateManagers, by merging some of the oldest entries on each update.
|
||||
func NewCapManagersManager(fieldManager Manager, maxUpdateManagers int) Manager {
|
||||
return &capManagersManager{
|
||||
fieldManager: fieldManager,
|
||||
maxUpdateManagers: maxUpdateManagers,
|
||||
oldUpdatesManagerName: "ancient-changes",
|
||||
}
|
||||
}
|
||||
|
||||
// Update implements Manager.
|
||||
func (f *capManagersManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
|
||||
object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager)
|
||||
if err != nil {
|
||||
return object, managed, err
|
||||
}
|
||||
if managed, err = f.capUpdateManagers(managed); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to cap update managers: %v", err)
|
||||
}
|
||||
return object, managed, nil
|
||||
}
|
||||
|
||||
// Apply implements Manager.
|
||||
func (f *capManagersManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) {
|
||||
return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force)
|
||||
}
|
||||
|
||||
// capUpdateManagers merges a number of the oldest update entries into versioned buckets,
|
||||
// such that the number of entries from updates does not exceed f.maxUpdateManagers.
|
||||
func (f *capManagersManager) capUpdateManagers(managed Managed) (newManaged Managed, err error) {
|
||||
// Gather all entries from updates
|
||||
updaters := []string{}
|
||||
for manager, fields := range managed.Fields() {
|
||||
if !fields.Applied() {
|
||||
updaters = append(updaters, manager)
|
||||
}
|
||||
}
|
||||
if len(updaters) <= f.maxUpdateManagers {
|
||||
return managed, nil
|
||||
}
|
||||
|
||||
// If we have more than the maximum, sort the update entries by time, oldest first.
|
||||
sort.Slice(updaters, func(i, j int) bool {
|
||||
iTime, jTime, iSeconds, jSeconds := managed.Times()[updaters[i]], managed.Times()[updaters[j]], int64(0), int64(0)
|
||||
if iTime != nil {
|
||||
iSeconds = iTime.Unix()
|
||||
}
|
||||
if jTime != nil {
|
||||
jSeconds = jTime.Unix()
|
||||
}
|
||||
if iSeconds != jSeconds {
|
||||
return iSeconds < jSeconds
|
||||
}
|
||||
return updaters[i] < updaters[j]
|
||||
})
|
||||
|
||||
// Merge the oldest updaters with versioned bucket managers until the number of updaters is under the cap
|
||||
versionToFirstManager := map[string]string{}
|
||||
for i, length := 0, len(updaters); i < len(updaters) && length > f.maxUpdateManagers; i++ {
|
||||
manager := updaters[i]
|
||||
vs := managed.Fields()[manager]
|
||||
time := managed.Times()[manager]
|
||||
version := string(vs.APIVersion())
|
||||
|
||||
// Create a new manager identifier for the versioned bucket entry.
|
||||
// The version for this manager comes from the version of the update being merged into the bucket.
|
||||
bucket, err := BuildManagerIdentifier(&metav1.ManagedFieldsEntry{
|
||||
Manager: f.oldUpdatesManagerName,
|
||||
Operation: metav1.ManagedFieldsOperationUpdate,
|
||||
APIVersion: version,
|
||||
})
|
||||
if err != nil {
|
||||
return managed, fmt.Errorf("failed to create bucket manager for version %v: %v", version, err)
|
||||
}
|
||||
|
||||
// Merge the fieldets if this is not the first time the version was seen.
|
||||
// Otherwise just record the manager name in versionToFirstManager
|
||||
if first, ok := versionToFirstManager[version]; ok {
|
||||
// If the bucket doesn't exists yet, create one.
|
||||
if _, ok := managed.Fields()[bucket]; !ok {
|
||||
s := managed.Fields()[first]
|
||||
delete(managed.Fields(), first)
|
||||
managed.Fields()[bucket] = s
|
||||
}
|
||||
|
||||
managed.Fields()[bucket] = fieldpath.NewVersionedSet(vs.Set().Union(managed.Fields()[bucket].Set()), vs.APIVersion(), vs.Applied())
|
||||
delete(managed.Fields(), manager)
|
||||
length--
|
||||
|
||||
// Use the time from the update being merged into the bucket, since it is more recent.
|
||||
managed.Times()[bucket] = time
|
||||
} else {
|
||||
versionToFirstManager[version] = manager
|
||||
}
|
||||
}
|
||||
|
||||
return managed, nil
|
||||
}
|
89
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go
generated
vendored
Normal file
89
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/conflict.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/merge"
|
||||
)
|
||||
|
||||
// NewConflictError returns an error including details on the requests apply conflicts
|
||||
func NewConflictError(conflicts merge.Conflicts) *errors.StatusError {
|
||||
causes := []metav1.StatusCause{}
|
||||
for _, conflict := range conflicts {
|
||||
causes = append(causes, metav1.StatusCause{
|
||||
Type: metav1.CauseTypeFieldManagerConflict,
|
||||
Message: fmt.Sprintf("conflict with %v", printManager(conflict.Manager)),
|
||||
Field: conflict.Path.String(),
|
||||
})
|
||||
}
|
||||
return errors.NewApplyConflict(causes, getConflictMessage(conflicts))
|
||||
}
|
||||
|
||||
func getConflictMessage(conflicts merge.Conflicts) string {
|
||||
if len(conflicts) == 1 {
|
||||
return fmt.Sprintf("Apply failed with 1 conflict: conflict with %v: %v", printManager(conflicts[0].Manager), conflicts[0].Path)
|
||||
}
|
||||
|
||||
m := map[string][]fieldpath.Path{}
|
||||
for _, conflict := range conflicts {
|
||||
m[conflict.Manager] = append(m[conflict.Manager], conflict.Path)
|
||||
}
|
||||
|
||||
uniqueManagers := []string{}
|
||||
for manager := range m {
|
||||
uniqueManagers = append(uniqueManagers, manager)
|
||||
}
|
||||
|
||||
// Print conflicts by sorted managers.
|
||||
sort.Strings(uniqueManagers)
|
||||
|
||||
messages := []string{}
|
||||
for _, manager := range uniqueManagers {
|
||||
messages = append(messages, fmt.Sprintf("conflicts with %v:", printManager(manager)))
|
||||
for _, path := range m[manager] {
|
||||
messages = append(messages, fmt.Sprintf("- %v", path))
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("Apply failed with %d conflicts: %s", len(conflicts), strings.Join(messages, "\n"))
|
||||
}
|
||||
|
||||
func printManager(manager string) string {
|
||||
encodedManager := &metav1.ManagedFieldsEntry{}
|
||||
if err := json.Unmarshal([]byte(manager), encodedManager); err != nil {
|
||||
return fmt.Sprintf("%q", manager)
|
||||
}
|
||||
managerStr := fmt.Sprintf("%q", encodedManager.Manager)
|
||||
if encodedManager.Subresource != "" {
|
||||
managerStr = fmt.Sprintf("%s with subresource %q", managerStr, encodedManager.Subresource)
|
||||
}
|
||||
if encodedManager.Operation == metav1.ManagedFieldsOperationUpdate {
|
||||
if encodedManager.Time == nil {
|
||||
return fmt.Sprintf("%s using %v", managerStr, encodedManager.APIVersion)
|
||||
}
|
||||
return fmt.Sprintf("%s using %v at %v", managerStr, encodedManager.APIVersion, encodedManager.Time.UTC().Format(time.RFC3339))
|
||||
}
|
||||
return managerStr
|
||||
}
|
206
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go
generated
vendored
Normal file
206
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go
generated
vendored
Normal file
@ -0,0 +1,206 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/merge"
|
||||
)
|
||||
|
||||
// DefaultMaxUpdateManagers defines the default maximum retained number of managedFields entries from updates
|
||||
// if the number of update managers exceeds this, the oldest entries will be merged until the number is below the maximum.
|
||||
// TODO(jennybuckley): Determine if this is really the best value. Ideally we wouldn't unnecessarily merge too many entries.
|
||||
const DefaultMaxUpdateManagers int = 10
|
||||
|
||||
// DefaultTrackOnCreateProbability defines the default probability that the field management of an object
|
||||
// starts being tracked from the object's creation, instead of from the first time the object is applied to.
|
||||
const DefaultTrackOnCreateProbability float32 = 1
|
||||
|
||||
var atMostEverySecond = NewAtMostEvery(time.Second)
|
||||
|
||||
// FieldManager updates the managed fields and merges applied
|
||||
// configurations.
|
||||
type FieldManager struct {
|
||||
fieldManager Manager
|
||||
subresource string
|
||||
}
|
||||
|
||||
// NewFieldManager creates a new FieldManager that decodes, manages, then re-encodes managedFields
|
||||
// on update and apply requests.
|
||||
func NewFieldManager(f Manager, subresource string) *FieldManager {
|
||||
return &FieldManager{fieldManager: f, subresource: subresource}
|
||||
}
|
||||
|
||||
// newDefaultFieldManager is a helper function which wraps a Manager with certain default logic.
|
||||
func NewDefaultFieldManager(f Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, subresource string) *FieldManager {
|
||||
return NewFieldManager(
|
||||
NewLastAppliedUpdater(
|
||||
NewLastAppliedManager(
|
||||
NewProbabilisticSkipNonAppliedManager(
|
||||
NewCapManagersManager(
|
||||
NewBuildManagerInfoManager(
|
||||
NewManagedFieldsUpdater(
|
||||
NewStripMetaManager(f),
|
||||
), kind.GroupVersion(), subresource,
|
||||
), DefaultMaxUpdateManagers,
|
||||
), objectCreater, kind, DefaultTrackOnCreateProbability,
|
||||
), typeConverter, objectConverter, kind.GroupVersion()),
|
||||
), subresource,
|
||||
)
|
||||
}
|
||||
|
||||
func decodeLiveOrNew(liveObj, newObj runtime.Object, ignoreManagedFieldsFromRequestObject bool) (Managed, error) {
|
||||
liveAccessor, err := meta.Accessor(liveObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We take the managedFields of the live object in case the request tries to
|
||||
// manually set managedFields via a subresource.
|
||||
if ignoreManagedFieldsFromRequestObject {
|
||||
return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields()))
|
||||
}
|
||||
|
||||
// If the object doesn't have metadata, we should just return without trying to
|
||||
// set the managedFields at all, so creates/updates/patches will work normally.
|
||||
newAccessor, err := meta.Accessor(newObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isResetManagedFields(newAccessor.GetManagedFields()) {
|
||||
return NewEmptyManaged(), nil
|
||||
}
|
||||
|
||||
// If the managed field is empty or we failed to decode it,
|
||||
// let's try the live object. This is to prevent clients who
|
||||
// don't understand managedFields from deleting it accidentally.
|
||||
managed, err := DecodeManagedFields(newAccessor.GetManagedFields())
|
||||
if err != nil || len(managed.Fields()) == 0 {
|
||||
return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields()))
|
||||
}
|
||||
return managed, nil
|
||||
}
|
||||
|
||||
func emptyManagedFieldsOnErr(managed Managed, err error) (Managed, error) {
|
||||
if err != nil {
|
||||
return NewEmptyManaged(), nil
|
||||
}
|
||||
return managed, nil
|
||||
}
|
||||
|
||||
// Update is used when the object has already been merged (non-apply
|
||||
// use-case), and simply updates the managed fields in the output
|
||||
// object.
|
||||
func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (object runtime.Object, err error) {
|
||||
// First try to decode the managed fields provided in the update,
|
||||
// This is necessary to allow directly updating managed fields.
|
||||
isSubresource := f.subresource != ""
|
||||
managed, err := decodeLiveOrNew(liveObj, newObj, isSubresource)
|
||||
if err != nil {
|
||||
return newObj, nil
|
||||
}
|
||||
|
||||
RemoveObjectManagedFields(newObj)
|
||||
|
||||
if object, managed, err = f.fieldManager.Update(liveObj, newObj, managed, manager); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = EncodeObjectManagedFields(object, managed); err != nil {
|
||||
return nil, fmt.Errorf("failed to encode managed fields: %v", err)
|
||||
}
|
||||
|
||||
return object, nil
|
||||
}
|
||||
|
||||
// UpdateNoErrors is the same as Update, but it will not return
|
||||
// errors. If an error happens, the object is returned with
|
||||
// managedFields cleared.
|
||||
func (f *FieldManager) UpdateNoErrors(liveObj, newObj runtime.Object, manager string) runtime.Object {
|
||||
obj, err := f.Update(liveObj, newObj, manager)
|
||||
if err != nil {
|
||||
atMostEverySecond.Do(func() {
|
||||
ns, name := "unknown", "unknown"
|
||||
if accessor, err := meta.Accessor(newObj); err == nil {
|
||||
ns = accessor.GetNamespace()
|
||||
name = accessor.GetName()
|
||||
}
|
||||
|
||||
klog.ErrorS(err, "[SHOULD NOT HAPPEN] failed to update managedFields", "versionKind",
|
||||
newObj.GetObjectKind().GroupVersionKind(), "namespace", ns, "name", name)
|
||||
})
|
||||
// Explicitly remove managedFields on failure, so that
|
||||
// we can't have garbage in it.
|
||||
RemoveObjectManagedFields(newObj)
|
||||
return newObj
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
// Returns true if the managedFields indicate that the user is trying to
|
||||
// reset the managedFields, i.e. if the list is non-nil but empty, or if
|
||||
// the list has one empty item.
|
||||
func isResetManagedFields(managedFields []metav1.ManagedFieldsEntry) bool {
|
||||
if len(managedFields) == 0 {
|
||||
return managedFields != nil
|
||||
}
|
||||
|
||||
if len(managedFields) == 1 {
|
||||
return reflect.DeepEqual(managedFields[0], metav1.ManagedFieldsEntry{})
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Apply is used when server-side apply is called, as it merges the
|
||||
// object and updates the managed fields.
|
||||
func (f *FieldManager) Apply(liveObj, appliedObj runtime.Object, manager string, force bool) (object runtime.Object, err error) {
|
||||
// If the object doesn't have metadata, apply isn't allowed.
|
||||
accessor, err := meta.Accessor(liveObj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't get accessor: %v", err)
|
||||
}
|
||||
|
||||
// Decode the managed fields in the live object, since it isn't allowed in the patch.
|
||||
managed, err := DecodeManagedFields(accessor.GetManagedFields())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode managed fields: %v", err)
|
||||
}
|
||||
|
||||
object, managed, err = f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force)
|
||||
if err != nil {
|
||||
if conflicts, ok := err.(merge.Conflicts); ok {
|
||||
return nil, NewConflictError(conflicts)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = EncodeObjectManagedFields(object, managed); err != nil {
|
||||
return nil, fmt.Errorf("failed to encode managed fields: %v", err)
|
||||
}
|
||||
|
||||
return object, nil
|
||||
}
|
47
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go
generated
vendored
Normal file
47
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fields.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// EmptyFields represents a set with no paths
|
||||
// It looks like metav1.Fields{Raw: []byte("{}")}
|
||||
var EmptyFields = func() metav1.FieldsV1 {
|
||||
f, err := SetToFields(*fieldpath.NewSet())
|
||||
if err != nil {
|
||||
panic("should never happen")
|
||||
}
|
||||
return f
|
||||
}()
|
||||
|
||||
// FieldsToSet creates a set paths from an input trie of fields
|
||||
func FieldsToSet(f metav1.FieldsV1) (s fieldpath.Set, err error) {
|
||||
err = s.FromJSON(bytes.NewReader(f.Raw))
|
||||
return s, err
|
||||
}
|
||||
|
||||
// SetToFields creates a trie of fields from an input set of paths
|
||||
func SetToFields(s fieldpath.Set) (f metav1.FieldsV1, err error) {
|
||||
f.Raw, err = s.ToJSON()
|
||||
return f, err
|
||||
}
|
50
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go
generated
vendored
Normal file
50
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastapplied.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// LastAppliedConfigAnnotation is the annotation used to store the previous
|
||||
// configuration of a resource for use in a three way diff by UpdateApplyAnnotation.
|
||||
//
|
||||
// This is a copy of the corev1 annotation since we don't want to depend on the whole package.
|
||||
const LastAppliedConfigAnnotation = "kubectl.kubernetes.io/last-applied-configuration"
|
||||
|
||||
// SetLastApplied sets the last-applied annotation the given value in
|
||||
// the object.
|
||||
func SetLastApplied(obj runtime.Object, value string) error {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("couldn't get accessor: %v", err))
|
||||
}
|
||||
var annotations = accessor.GetAnnotations()
|
||||
if annotations == nil {
|
||||
annotations = map[string]string{}
|
||||
}
|
||||
annotations[LastAppliedConfigAnnotation] = value
|
||||
if err := apimachineryvalidation.ValidateAnnotationsSize(annotations); err != nil {
|
||||
delete(annotations, LastAppliedConfigAnnotation)
|
||||
}
|
||||
accessor.SetAnnotations(annotations)
|
||||
return nil
|
||||
}
|
171
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go
generated
vendored
Normal file
171
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go
generated
vendored
Normal file
@ -0,0 +1,171 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/merge"
|
||||
)
|
||||
|
||||
type lastAppliedManager struct {
|
||||
fieldManager Manager
|
||||
typeConverter TypeConverter
|
||||
objectConverter runtime.ObjectConvertor
|
||||
groupVersion schema.GroupVersion
|
||||
}
|
||||
|
||||
var _ Manager = &lastAppliedManager{}
|
||||
|
||||
// NewLastAppliedManager converts the client-side apply annotation to
|
||||
// server-side apply managed fields
|
||||
func NewLastAppliedManager(fieldManager Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, groupVersion schema.GroupVersion) Manager {
|
||||
return &lastAppliedManager{
|
||||
fieldManager: fieldManager,
|
||||
typeConverter: typeConverter,
|
||||
objectConverter: objectConverter,
|
||||
groupVersion: groupVersion,
|
||||
}
|
||||
}
|
||||
|
||||
// Update implements Manager.
|
||||
func (f *lastAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
|
||||
return f.fieldManager.Update(liveObj, newObj, managed, manager)
|
||||
}
|
||||
|
||||
// Apply will consider the last-applied annotation
|
||||
// for upgrading an object managed by client-side apply to server-side apply
|
||||
// without conflicts.
|
||||
func (f *lastAppliedManager) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) {
|
||||
newLiveObj, newManaged, newErr := f.fieldManager.Apply(liveObj, newObj, managed, manager, force)
|
||||
// Upgrade the client-side apply annotation only from kubectl server-side-apply.
|
||||
// To opt-out of this behavior, users may specify a different field manager.
|
||||
if manager != "kubectl" {
|
||||
return newLiveObj, newManaged, newErr
|
||||
}
|
||||
|
||||
// Check if we have conflicts
|
||||
if newErr == nil {
|
||||
return newLiveObj, newManaged, newErr
|
||||
}
|
||||
conflicts, ok := newErr.(merge.Conflicts)
|
||||
if !ok {
|
||||
return newLiveObj, newManaged, newErr
|
||||
}
|
||||
conflictSet := conflictsToSet(conflicts)
|
||||
|
||||
// Check if conflicts are allowed due to client-side apply,
|
||||
// and if so, then force apply
|
||||
allowedConflictSet, err := f.allowedConflictsFromLastApplied(liveObj)
|
||||
if err != nil {
|
||||
return newLiveObj, newManaged, newErr
|
||||
}
|
||||
if !conflictSet.Difference(allowedConflictSet).Empty() {
|
||||
newConflicts := conflictsDifference(conflicts, allowedConflictSet)
|
||||
return newLiveObj, newManaged, newConflicts
|
||||
}
|
||||
|
||||
return f.fieldManager.Apply(liveObj, newObj, managed, manager, true)
|
||||
}
|
||||
|
||||
func (f *lastAppliedManager) allowedConflictsFromLastApplied(liveObj runtime.Object) (*fieldpath.Set, error) {
|
||||
var accessor, err = meta.Accessor(liveObj)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("couldn't get accessor: %v", err))
|
||||
}
|
||||
|
||||
// If there is no client-side apply annotation, then there is nothing to do
|
||||
var annotations = accessor.GetAnnotations()
|
||||
if annotations == nil {
|
||||
return nil, fmt.Errorf("no last applied annotation")
|
||||
}
|
||||
var lastApplied, ok = annotations[LastAppliedConfigAnnotation]
|
||||
if !ok || lastApplied == "" {
|
||||
return nil, fmt.Errorf("no last applied annotation")
|
||||
}
|
||||
|
||||
liveObjVersioned, err := f.objectConverter.ConvertToVersion(liveObj, f.groupVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert live obj to versioned: %v", err)
|
||||
}
|
||||
|
||||
liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert live obj to typed: %v", err)
|
||||
}
|
||||
|
||||
var lastAppliedObj = &unstructured.Unstructured{Object: map[string]interface{}{}}
|
||||
err = json.Unmarshal([]byte(lastApplied), lastAppliedObj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode last applied obj: %v in '%s'", err, lastApplied)
|
||||
}
|
||||
|
||||
if lastAppliedObj.GetAPIVersion() != f.groupVersion.String() {
|
||||
return nil, fmt.Errorf("expected version of last applied to match live object '%s', but got '%s': %v", f.groupVersion.String(), lastAppliedObj.GetAPIVersion(), err)
|
||||
}
|
||||
|
||||
lastAppliedObjTyped, err := f.typeConverter.ObjectToTyped(lastAppliedObj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert last applied to typed: %v", err)
|
||||
}
|
||||
|
||||
lastAppliedObjFieldSet, err := lastAppliedObjTyped.ToFieldSet()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create fieldset for last applied object: %v", err)
|
||||
}
|
||||
|
||||
comparison, err := lastAppliedObjTyped.Compare(liveObjTyped)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to compare last applied object and live object: %v", err)
|
||||
}
|
||||
|
||||
// Remove fields in last applied that are different, added, or missing in
|
||||
// the live object.
|
||||
// Because last-applied fields don't match the live object fields,
|
||||
// then we don't own these fields.
|
||||
lastAppliedObjFieldSet = lastAppliedObjFieldSet.
|
||||
Difference(comparison.Modified).
|
||||
Difference(comparison.Added).
|
||||
Difference(comparison.Removed)
|
||||
|
||||
return lastAppliedObjFieldSet, nil
|
||||
}
|
||||
|
||||
// TODO: replace with merge.Conflicts.ToSet()
|
||||
func conflictsToSet(conflicts merge.Conflicts) *fieldpath.Set {
|
||||
conflictSet := fieldpath.NewSet()
|
||||
for _, conflict := range []merge.Conflict(conflicts) {
|
||||
conflictSet.Insert(conflict.Path)
|
||||
}
|
||||
return conflictSet
|
||||
}
|
||||
|
||||
func conflictsDifference(conflicts merge.Conflicts, s *fieldpath.Set) merge.Conflicts {
|
||||
newConflicts := []merge.Conflict{}
|
||||
for _, conflict := range []merge.Conflict(conflicts) {
|
||||
if !s.Has(conflict.Path) {
|
||||
newConflicts = append(newConflicts, conflict)
|
||||
}
|
||||
}
|
||||
return newConflicts
|
||||
}
|
102
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go
generated
vendored
Normal file
102
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedupdater.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
type lastAppliedUpdater struct {
|
||||
fieldManager Manager
|
||||
}
|
||||
|
||||
var _ Manager = &lastAppliedUpdater{}
|
||||
|
||||
// NewLastAppliedUpdater sets the client-side apply annotation up to date with
|
||||
// server-side apply managed fields
|
||||
func NewLastAppliedUpdater(fieldManager Manager) Manager {
|
||||
return &lastAppliedUpdater{
|
||||
fieldManager: fieldManager,
|
||||
}
|
||||
}
|
||||
|
||||
// Update implements Manager.
|
||||
func (f *lastAppliedUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
|
||||
return f.fieldManager.Update(liveObj, newObj, managed, manager)
|
||||
}
|
||||
|
||||
// server-side apply managed fields
|
||||
func (f *lastAppliedUpdater) Apply(liveObj, newObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) {
|
||||
liveObj, managed, err := f.fieldManager.Apply(liveObj, newObj, managed, manager, force)
|
||||
if err != nil {
|
||||
return liveObj, managed, err
|
||||
}
|
||||
|
||||
// Sync the client-side apply annotation only from kubectl server-side apply.
|
||||
// To opt-out of this behavior, users may specify a different field manager.
|
||||
//
|
||||
// If the client-side apply annotation doesn't exist,
|
||||
// then continue because we have no annotation to update
|
||||
if manager == "kubectl" && hasLastApplied(liveObj) {
|
||||
lastAppliedValue, err := buildLastApplied(newObj)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to build last-applied annotation: %v", err)
|
||||
}
|
||||
err = SetLastApplied(liveObj, lastAppliedValue)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to set last-applied annotation: %v", err)
|
||||
}
|
||||
}
|
||||
return liveObj, managed, err
|
||||
}
|
||||
|
||||
func hasLastApplied(obj runtime.Object) bool {
|
||||
var accessor, err = meta.Accessor(obj)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("couldn't get accessor: %v", err))
|
||||
}
|
||||
var annotations = accessor.GetAnnotations()
|
||||
if annotations == nil {
|
||||
return false
|
||||
}
|
||||
lastApplied, ok := annotations[LastAppliedConfigAnnotation]
|
||||
return ok && len(lastApplied) > 0
|
||||
}
|
||||
|
||||
func buildLastApplied(obj runtime.Object) (string, error) {
|
||||
obj = obj.DeepCopyObject()
|
||||
|
||||
var accessor, err = meta.Accessor(obj)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("couldn't get accessor: %v", err))
|
||||
}
|
||||
|
||||
// Remove the annotation from the object before encoding the object
|
||||
var annotations = accessor.GetAnnotations()
|
||||
delete(annotations, LastAppliedConfigAnnotation)
|
||||
accessor.SetAnnotations(annotations)
|
||||
|
||||
lastApplied, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("couldn't encode object into last applied annotation: %v", err)
|
||||
}
|
||||
return string(lastApplied), nil
|
||||
}
|
248
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go
generated
vendored
Normal file
248
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfields.go
generated
vendored
Normal file
@ -0,0 +1,248 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// ManagedInterface groups a fieldpath.ManagedFields together with the timestamps associated with each operation.
|
||||
type ManagedInterface interface {
|
||||
// Fields gets the fieldpath.ManagedFields.
|
||||
Fields() fieldpath.ManagedFields
|
||||
|
||||
// Times gets the timestamps associated with each operation.
|
||||
Times() map[string]*metav1.Time
|
||||
}
|
||||
|
||||
type managedStruct struct {
|
||||
fields fieldpath.ManagedFields
|
||||
times map[string]*metav1.Time
|
||||
}
|
||||
|
||||
var _ ManagedInterface = &managedStruct{}
|
||||
|
||||
// Fields implements ManagedInterface.
|
||||
func (m *managedStruct) Fields() fieldpath.ManagedFields {
|
||||
return m.fields
|
||||
}
|
||||
|
||||
// Times implements ManagedInterface.
|
||||
func (m *managedStruct) Times() map[string]*metav1.Time {
|
||||
return m.times
|
||||
}
|
||||
|
||||
// NewEmptyManaged creates an empty ManagedInterface.
|
||||
func NewEmptyManaged() ManagedInterface {
|
||||
return NewManaged(fieldpath.ManagedFields{}, map[string]*metav1.Time{})
|
||||
}
|
||||
|
||||
// NewManaged creates a ManagedInterface from a fieldpath.ManagedFields and the timestamps associated with each operation.
|
||||
func NewManaged(f fieldpath.ManagedFields, t map[string]*metav1.Time) ManagedInterface {
|
||||
return &managedStruct{
|
||||
fields: f,
|
||||
times: t,
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveObjectManagedFields removes the ManagedFields from the object
|
||||
// before we merge so that it doesn't appear in the ManagedFields
|
||||
// recursively.
|
||||
func RemoveObjectManagedFields(obj runtime.Object) {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("couldn't get accessor: %v", err))
|
||||
}
|
||||
accessor.SetManagedFields(nil)
|
||||
}
|
||||
|
||||
// EncodeObjectManagedFields converts and stores the fieldpathManagedFields into the objects ManagedFields
|
||||
func EncodeObjectManagedFields(obj runtime.Object, managed ManagedInterface) error {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("couldn't get accessor: %v", err))
|
||||
}
|
||||
|
||||
encodedManagedFields, err := encodeManagedFields(managed)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert back managed fields to API: %v", err)
|
||||
}
|
||||
accessor.SetManagedFields(encodedManagedFields)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DecodeManagedFields converts ManagedFields from the wire format (api format)
|
||||
// to the format used by sigs.k8s.io/structured-merge-diff
|
||||
func DecodeManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (ManagedInterface, error) {
|
||||
managed := managedStruct{}
|
||||
managed.fields = make(fieldpath.ManagedFields, len(encodedManagedFields))
|
||||
managed.times = make(map[string]*metav1.Time, len(encodedManagedFields))
|
||||
|
||||
for i, encodedVersionedSet := range encodedManagedFields {
|
||||
switch encodedVersionedSet.Operation {
|
||||
case metav1.ManagedFieldsOperationApply, metav1.ManagedFieldsOperationUpdate:
|
||||
default:
|
||||
return nil, fmt.Errorf("operation must be `Apply` or `Update`")
|
||||
}
|
||||
if len(encodedVersionedSet.APIVersion) < 1 {
|
||||
return nil, fmt.Errorf("apiVersion must not be empty")
|
||||
}
|
||||
switch encodedVersionedSet.FieldsType {
|
||||
case "FieldsV1":
|
||||
// Valid case.
|
||||
case "":
|
||||
return nil, fmt.Errorf("missing fieldsType in managed fields entry %d", i)
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid fieldsType %q in managed fields entry %d", encodedVersionedSet.FieldsType, i)
|
||||
}
|
||||
manager, err := BuildManagerIdentifier(&encodedVersionedSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding manager from %v: %v", encodedVersionedSet, err)
|
||||
}
|
||||
managed.fields[manager], err = decodeVersionedSet(&encodedVersionedSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding versioned set from %v: %v", encodedVersionedSet, err)
|
||||
}
|
||||
managed.times[manager] = encodedVersionedSet.Time
|
||||
}
|
||||
return &managed, nil
|
||||
}
|
||||
|
||||
// BuildManagerIdentifier creates a manager identifier string from a ManagedFieldsEntry
|
||||
func BuildManagerIdentifier(encodedManager *metav1.ManagedFieldsEntry) (manager string, err error) {
|
||||
encodedManagerCopy := *encodedManager
|
||||
|
||||
// Never include fields type in the manager identifier
|
||||
encodedManagerCopy.FieldsType = ""
|
||||
|
||||
// Never include the fields in the manager identifier
|
||||
encodedManagerCopy.FieldsV1 = nil
|
||||
|
||||
// Never include the time in the manager identifier
|
||||
encodedManagerCopy.Time = nil
|
||||
|
||||
// For appliers, don't include the APIVersion in the manager identifier,
|
||||
// so it will always have the same manager identifier each time it applied.
|
||||
if encodedManager.Operation == metav1.ManagedFieldsOperationApply {
|
||||
encodedManagerCopy.APIVersion = ""
|
||||
}
|
||||
|
||||
// Use the remaining fields to build the manager identifier
|
||||
b, err := json.Marshal(&encodedManagerCopy)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error marshalling manager identifier: %v", err)
|
||||
}
|
||||
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
func decodeVersionedSet(encodedVersionedSet *metav1.ManagedFieldsEntry) (versionedSet fieldpath.VersionedSet, err error) {
|
||||
fields := EmptyFields
|
||||
if encodedVersionedSet.FieldsV1 != nil {
|
||||
fields = *encodedVersionedSet.FieldsV1
|
||||
}
|
||||
set, err := FieldsToSet(fields)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding set: %v", err)
|
||||
}
|
||||
return fieldpath.NewVersionedSet(&set, fieldpath.APIVersion(encodedVersionedSet.APIVersion), encodedVersionedSet.Operation == metav1.ManagedFieldsOperationApply), nil
|
||||
}
|
||||
|
||||
// encodeManagedFields converts ManagedFields from the format used by
|
||||
// sigs.k8s.io/structured-merge-diff to the wire format (api format)
|
||||
func encodeManagedFields(managed ManagedInterface) (encodedManagedFields []metav1.ManagedFieldsEntry, err error) {
|
||||
if len(managed.Fields()) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
encodedManagedFields = []metav1.ManagedFieldsEntry{}
|
||||
for manager := range managed.Fields() {
|
||||
versionedSet := managed.Fields()[manager]
|
||||
v, err := encodeManagerVersionedSet(manager, versionedSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error encoding versioned set for %v: %v", manager, err)
|
||||
}
|
||||
if t, ok := managed.Times()[manager]; ok {
|
||||
v.Time = t
|
||||
}
|
||||
encodedManagedFields = append(encodedManagedFields, *v)
|
||||
}
|
||||
return sortEncodedManagedFields(encodedManagedFields)
|
||||
}
|
||||
|
||||
func sortEncodedManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (sortedManagedFields []metav1.ManagedFieldsEntry, err error) {
|
||||
sort.Slice(encodedManagedFields, func(i, j int) bool {
|
||||
p, q := encodedManagedFields[i], encodedManagedFields[j]
|
||||
|
||||
if p.Operation != q.Operation {
|
||||
return p.Operation < q.Operation
|
||||
}
|
||||
|
||||
pSeconds, qSeconds := int64(0), int64(0)
|
||||
if p.Time != nil {
|
||||
pSeconds = p.Time.Unix()
|
||||
}
|
||||
if q.Time != nil {
|
||||
qSeconds = q.Time.Unix()
|
||||
}
|
||||
if pSeconds != qSeconds {
|
||||
return pSeconds < qSeconds
|
||||
}
|
||||
|
||||
if p.Manager != q.Manager {
|
||||
return p.Manager < q.Manager
|
||||
}
|
||||
|
||||
if p.APIVersion != q.APIVersion {
|
||||
return p.APIVersion < q.APIVersion
|
||||
}
|
||||
return p.Subresource < q.Subresource
|
||||
})
|
||||
|
||||
return encodedManagedFields, nil
|
||||
}
|
||||
|
||||
func encodeManagerVersionedSet(manager string, versionedSet fieldpath.VersionedSet) (encodedVersionedSet *metav1.ManagedFieldsEntry, err error) {
|
||||
encodedVersionedSet = &metav1.ManagedFieldsEntry{}
|
||||
|
||||
// Get as many fields as we can from the manager identifier
|
||||
err = json.Unmarshal([]byte(manager), encodedVersionedSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshalling manager identifier %v: %v", manager, err)
|
||||
}
|
||||
|
||||
// Get the APIVersion, Operation, and Fields from the VersionedSet
|
||||
encodedVersionedSet.APIVersion = string(versionedSet.APIVersion())
|
||||
if versionedSet.Applied() {
|
||||
encodedVersionedSet.Operation = metav1.ManagedFieldsOperationApply
|
||||
}
|
||||
encodedVersionedSet.FieldsType = "FieldsV1"
|
||||
fields, err := SetToFields(*versionedSet.Set())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error encoding set: %v", err)
|
||||
}
|
||||
encodedVersionedSet.FieldsV1 = &fields
|
||||
|
||||
return encodedVersionedSet, nil
|
||||
}
|
82
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go
generated
vendored
Normal file
82
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
type managedFieldsUpdater struct {
|
||||
fieldManager Manager
|
||||
}
|
||||
|
||||
var _ Manager = &managedFieldsUpdater{}
|
||||
|
||||
// NewManagedFieldsUpdater is responsible for updating the managedfields
|
||||
// in the object, updating the time of the operation as necessary. For
|
||||
// updates, it uses a hard-coded manager to detect if things have
|
||||
// changed, and swaps back the correct manager after the operation is
|
||||
// done.
|
||||
func NewManagedFieldsUpdater(fieldManager Manager) Manager {
|
||||
return &managedFieldsUpdater{
|
||||
fieldManager: fieldManager,
|
||||
}
|
||||
}
|
||||
|
||||
// Update implements Manager.
|
||||
func (f *managedFieldsUpdater) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
|
||||
self := "current-operation"
|
||||
object, managed, err := f.fieldManager.Update(liveObj, newObj, managed, self)
|
||||
if err != nil {
|
||||
return object, managed, err
|
||||
}
|
||||
|
||||
// If the current operation took any fields from anything, it means the object changed,
|
||||
// so update the timestamp of the managedFieldsEntry and merge with any previous updates from the same manager
|
||||
if vs, ok := managed.Fields()[self]; ok {
|
||||
delete(managed.Fields(), self)
|
||||
|
||||
if previous, ok := managed.Fields()[manager]; ok {
|
||||
managed.Fields()[manager] = fieldpath.NewVersionedSet(vs.Set().Union(previous.Set()), vs.APIVersion(), vs.Applied())
|
||||
} else {
|
||||
managed.Fields()[manager] = vs
|
||||
}
|
||||
|
||||
managed.Times()[manager] = &metav1.Time{Time: time.Now().UTC()}
|
||||
}
|
||||
|
||||
return object, managed, nil
|
||||
}
|
||||
|
||||
// Apply implements Manager.
|
||||
func (f *managedFieldsUpdater) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) {
|
||||
object, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force)
|
||||
if err != nil {
|
||||
return object, managed, err
|
||||
}
|
||||
if object != nil {
|
||||
managed.Times()[fieldManager] = &metav1.Time{Time: time.Now().UTC()}
|
||||
} else {
|
||||
object = liveObj.DeepCopyObject()
|
||||
RemoveObjectManagedFields(object)
|
||||
}
|
||||
return object, managed, nil
|
||||
}
|
52
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go
generated
vendored
Normal file
52
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/manager.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// Managed groups a fieldpath.ManagedFields together with the timestamps associated with each operation.
|
||||
type Managed interface {
|
||||
// Fields gets the fieldpath.ManagedFields.
|
||||
Fields() fieldpath.ManagedFields
|
||||
|
||||
// Times gets the timestamps associated with each operation.
|
||||
Times() map[string]*metav1.Time
|
||||
}
|
||||
|
||||
// Manager updates the managed fields and merges applied configurations.
|
||||
type Manager interface {
|
||||
// Update is used when the object has already been merged (non-apply
|
||||
// use-case), and simply updates the managed fields in the output
|
||||
// object.
|
||||
// * `liveObj` is not mutated by this function
|
||||
// * `newObj` may be mutated by this function
|
||||
// Returns the new object with managedFields removed, and the object's new
|
||||
// proposed managedFields separately.
|
||||
Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error)
|
||||
|
||||
// Apply is used when server-side apply is called, as it merges the
|
||||
// object and updates the managed fields.
|
||||
// * `liveObj` is not mutated by this function
|
||||
// * `newObj` may be mutated by this function
|
||||
// Returns the new object with managedFields removed, and the object's new
|
||||
// proposed managedFields separately.
|
||||
Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error)
|
||||
}
|
140
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go
generated
vendored
Normal file
140
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/pathelement.go
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/value"
|
||||
)
|
||||
|
||||
const (
|
||||
// Field indicates that the content of this path element is a field's name
|
||||
Field = "f"
|
||||
|
||||
// Value indicates that the content of this path element is a field's value
|
||||
Value = "v"
|
||||
|
||||
// Index indicates that the content of this path element is an index in an array
|
||||
Index = "i"
|
||||
|
||||
// Key indicates that the content of this path element is a key value map
|
||||
Key = "k"
|
||||
|
||||
// Separator separates the type of a path element from the contents
|
||||
Separator = ":"
|
||||
)
|
||||
|
||||
// NewPathElement parses a serialized path element
|
||||
func NewPathElement(s string) (fieldpath.PathElement, error) {
|
||||
split := strings.SplitN(s, Separator, 2)
|
||||
if len(split) < 2 {
|
||||
return fieldpath.PathElement{}, fmt.Errorf("missing colon: %v", s)
|
||||
}
|
||||
switch split[0] {
|
||||
case Field:
|
||||
return fieldpath.PathElement{
|
||||
FieldName: &split[1],
|
||||
}, nil
|
||||
case Value:
|
||||
val, err := value.FromJSON([]byte(split[1]))
|
||||
if err != nil {
|
||||
return fieldpath.PathElement{}, err
|
||||
}
|
||||
return fieldpath.PathElement{
|
||||
Value: &val,
|
||||
}, nil
|
||||
case Index:
|
||||
i, err := strconv.Atoi(split[1])
|
||||
if err != nil {
|
||||
return fieldpath.PathElement{}, err
|
||||
}
|
||||
return fieldpath.PathElement{
|
||||
Index: &i,
|
||||
}, nil
|
||||
case Key:
|
||||
kv := map[string]json.RawMessage{}
|
||||
err := json.Unmarshal([]byte(split[1]), &kv)
|
||||
if err != nil {
|
||||
return fieldpath.PathElement{}, err
|
||||
}
|
||||
fields := value.FieldList{}
|
||||
for k, v := range kv {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return fieldpath.PathElement{}, err
|
||||
}
|
||||
val, err := value.FromJSON(b)
|
||||
if err != nil {
|
||||
return fieldpath.PathElement{}, err
|
||||
}
|
||||
|
||||
fields = append(fields, value.Field{
|
||||
Name: k,
|
||||
Value: val,
|
||||
})
|
||||
}
|
||||
return fieldpath.PathElement{
|
||||
Key: &fields,
|
||||
}, nil
|
||||
default:
|
||||
// Ignore unknown key types
|
||||
return fieldpath.PathElement{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// PathElementString serializes a path element
|
||||
func PathElementString(pe fieldpath.PathElement) (string, error) {
|
||||
switch {
|
||||
case pe.FieldName != nil:
|
||||
return Field + Separator + *pe.FieldName, nil
|
||||
case pe.Key != nil:
|
||||
kv := map[string]json.RawMessage{}
|
||||
for _, k := range *pe.Key {
|
||||
b, err := value.ToJSON(k.Value)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
m := json.RawMessage{}
|
||||
err = json.Unmarshal(b, &m)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
kv[k.Name] = m
|
||||
}
|
||||
b, err := json.Marshal(kv)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return Key + ":" + string(b), nil
|
||||
case pe.Value != nil:
|
||||
b, err := value.ToJSON(*pe.Value)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return Value + ":" + string(b), nil
|
||||
case pe.Index != nil:
|
||||
return Index + ":" + strconv.Itoa(*pe.Index), nil
|
||||
default:
|
||||
return "", errors.New("Invalid type of path element")
|
||||
}
|
||||
}
|
91
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go
generated
vendored
Normal file
91
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
type skipNonAppliedManager struct {
|
||||
fieldManager Manager
|
||||
objectCreater runtime.ObjectCreater
|
||||
gvk schema.GroupVersionKind
|
||||
beforeApplyManagerName string
|
||||
probability float32
|
||||
}
|
||||
|
||||
var _ Manager = &skipNonAppliedManager{}
|
||||
|
||||
// NewSkipNonAppliedManager creates a new wrapped FieldManager that only starts tracking managers after the first apply.
|
||||
func NewSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, gvk schema.GroupVersionKind) Manager {
|
||||
return NewProbabilisticSkipNonAppliedManager(fieldManager, objectCreater, gvk, 0.0)
|
||||
}
|
||||
|
||||
// NewProbabilisticSkipNonAppliedManager creates a new wrapped FieldManager that starts tracking managers after the first apply,
|
||||
// or starts tracking on create with p probability.
|
||||
func NewProbabilisticSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, gvk schema.GroupVersionKind, p float32) Manager {
|
||||
return &skipNonAppliedManager{
|
||||
fieldManager: fieldManager,
|
||||
objectCreater: objectCreater,
|
||||
gvk: gvk,
|
||||
beforeApplyManagerName: "before-first-apply",
|
||||
probability: p,
|
||||
}
|
||||
}
|
||||
|
||||
// Update implements Manager.
|
||||
func (f *skipNonAppliedManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
|
||||
accessor, err := meta.Accessor(liveObj)
|
||||
if err != nil {
|
||||
return newObj, managed, nil
|
||||
}
|
||||
|
||||
// If managed fields is empty, we need to determine whether to skip tracking managed fields.
|
||||
if len(managed.Fields()) == 0 {
|
||||
// Check if the operation is a create, by checking whether lastObj's UID is empty.
|
||||
// If the operation is create, P(tracking managed fields) = f.probability
|
||||
// If the operation is update, skip tracking managed fields, since we already know managed fields is empty.
|
||||
if len(accessor.GetUID()) == 0 {
|
||||
if f.probability <= rand.Float32() {
|
||||
return newObj, managed, nil
|
||||
}
|
||||
} else {
|
||||
return newObj, managed, nil
|
||||
}
|
||||
}
|
||||
return f.fieldManager.Update(liveObj, newObj, managed, manager)
|
||||
}
|
||||
|
||||
// Apply implements Manager.
|
||||
func (f *skipNonAppliedManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) {
|
||||
if len(managed.Fields()) == 0 {
|
||||
emptyObj, err := f.objectCreater.New(f.gvk)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create empty object of type %v: %v", f.gvk, err)
|
||||
}
|
||||
liveObj, managed, err = f.fieldManager.Update(emptyObj, liveObj, managed, f.beforeApplyManagerName)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create manager for existing fields: %v", err)
|
||||
}
|
||||
}
|
||||
return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force)
|
||||
}
|
90
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go
generated
vendored
Normal file
90
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/stripmeta.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
type stripMetaManager struct {
|
||||
fieldManager Manager
|
||||
|
||||
// stripSet is the list of fields that should never be part of a mangedFields.
|
||||
stripSet *fieldpath.Set
|
||||
}
|
||||
|
||||
var _ Manager = &stripMetaManager{}
|
||||
|
||||
// NewStripMetaManager creates a new Manager that strips metadata and typemeta fields from the manager's fieldset.
|
||||
func NewStripMetaManager(fieldManager Manager) Manager {
|
||||
return &stripMetaManager{
|
||||
fieldManager: fieldManager,
|
||||
stripSet: fieldpath.NewSet(
|
||||
fieldpath.MakePathOrDie("apiVersion"),
|
||||
fieldpath.MakePathOrDie("kind"),
|
||||
fieldpath.MakePathOrDie("metadata"),
|
||||
fieldpath.MakePathOrDie("metadata", "name"),
|
||||
fieldpath.MakePathOrDie("metadata", "namespace"),
|
||||
fieldpath.MakePathOrDie("metadata", "creationTimestamp"),
|
||||
fieldpath.MakePathOrDie("metadata", "selfLink"),
|
||||
fieldpath.MakePathOrDie("metadata", "uid"),
|
||||
fieldpath.MakePathOrDie("metadata", "clusterName"),
|
||||
fieldpath.MakePathOrDie("metadata", "generation"),
|
||||
fieldpath.MakePathOrDie("metadata", "managedFields"),
|
||||
fieldpath.MakePathOrDie("metadata", "resourceVersion"),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// Update implements Manager.
|
||||
func (f *stripMetaManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
|
||||
newObj, managed, err := f.fieldManager.Update(liveObj, newObj, managed, manager)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
f.stripFields(managed.Fields(), manager)
|
||||
return newObj, managed, nil
|
||||
}
|
||||
|
||||
// Apply implements Manager.
|
||||
func (f *stripMetaManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) {
|
||||
newObj, managed, err := f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
f.stripFields(managed.Fields(), manager)
|
||||
return newObj, managed, nil
|
||||
}
|
||||
|
||||
// stripFields removes a predefined set of paths found in typed from managed
|
||||
func (f *stripMetaManager) stripFields(managed fieldpath.ManagedFields, manager string) {
|
||||
vs, ok := managed[manager]
|
||||
if ok {
|
||||
if vs == nil {
|
||||
panic(fmt.Sprintf("Found unexpected nil manager which should never happen: %s", manager))
|
||||
}
|
||||
newSet := vs.Set().Difference(f.stripSet)
|
||||
if newSet.Empty() {
|
||||
delete(managed, manager)
|
||||
} else {
|
||||
managed[manager] = fieldpath.NewVersionedSet(newSet, vs.APIVersion(), vs.Applied())
|
||||
}
|
||||
}
|
||||
}
|
183
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go
generated
vendored
Normal file
183
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go
generated
vendored
Normal file
@ -0,0 +1,183 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/merge"
|
||||
)
|
||||
|
||||
type structuredMergeManager struct {
|
||||
typeConverter TypeConverter
|
||||
objectConverter runtime.ObjectConvertor
|
||||
objectDefaulter runtime.ObjectDefaulter
|
||||
groupVersion schema.GroupVersion
|
||||
hubVersion schema.GroupVersion
|
||||
updater merge.Updater
|
||||
}
|
||||
|
||||
var _ Manager = &structuredMergeManager{}
|
||||
|
||||
// NewStructuredMergeManager creates a new Manager that merges apply requests
|
||||
// and update managed fields for other types of requests.
|
||||
func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (Manager, error) {
|
||||
return &structuredMergeManager{
|
||||
typeConverter: typeConverter,
|
||||
objectConverter: objectConverter,
|
||||
objectDefaulter: objectDefaulter,
|
||||
groupVersion: gv,
|
||||
hubVersion: hub,
|
||||
updater: merge.Updater{
|
||||
Converter: newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s
|
||||
IgnoredFields: resetFields,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewCRDStructuredMergeManager creates a new Manager specifically for
|
||||
// CRDs. This allows for the possibility of fields which are not defined
|
||||
// in models, as well as having no models defined at all.
|
||||
func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ Manager, err error) {
|
||||
return &structuredMergeManager{
|
||||
typeConverter: typeConverter,
|
||||
objectConverter: objectConverter,
|
||||
objectDefaulter: objectDefaulter,
|
||||
groupVersion: gv,
|
||||
hubVersion: hub,
|
||||
updater: merge.Updater{
|
||||
Converter: newCRDVersionConverter(typeConverter, objectConverter, hub),
|
||||
IgnoredFields: resetFields,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func objectGVKNN(obj runtime.Object) string {
|
||||
name := "<unknown>"
|
||||
namespace := "<unknown>"
|
||||
if accessor, err := meta.Accessor(obj); err == nil {
|
||||
name = accessor.GetName()
|
||||
namespace = accessor.GetNamespace()
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v/%v; %v", namespace, name, obj.GetObjectKind().GroupVersionKind())
|
||||
}
|
||||
|
||||
// Update implements Manager.
|
||||
func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
|
||||
newObjVersioned, err := f.toVersioned(newObj)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to convert new object (%v) to proper version (%v): %v", objectGVKNN(newObj), f.groupVersion, err)
|
||||
}
|
||||
liveObjVersioned, err := f.toVersioned(liveObj)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err)
|
||||
}
|
||||
newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to convert new object (%v) to smd typed: %v", objectGVKNN(newObjVersioned), err)
|
||||
}
|
||||
liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to convert live object (%v) to smd typed: %v", objectGVKNN(liveObjVersioned), err)
|
||||
}
|
||||
apiVersion := fieldpath.APIVersion(f.groupVersion.String())
|
||||
|
||||
// TODO(apelisse) use the first return value when unions are implemented
|
||||
_, managedFields, err := f.updater.Update(liveObjTyped, newObjTyped, apiVersion, managed.Fields(), manager)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to update ManagedFields (%v): %v", objectGVKNN(newObjVersioned), err)
|
||||
}
|
||||
managed = NewManaged(managedFields, managed.Times())
|
||||
|
||||
return newObj, managed, nil
|
||||
}
|
||||
|
||||
// Apply implements Manager.
|
||||
func (f *structuredMergeManager) Apply(liveObj, patchObj runtime.Object, managed Managed, manager string, force bool) (runtime.Object, Managed, error) {
|
||||
// Check that the patch object has the same version as the live object
|
||||
if patchVersion := patchObj.GetObjectKind().GroupVersionKind().GroupVersion(); patchVersion != f.groupVersion {
|
||||
return nil, nil,
|
||||
errors.NewBadRequest(
|
||||
fmt.Sprintf("Incorrect version specified in apply patch. "+
|
||||
"Specified patch version: %s, expected: %s",
|
||||
patchVersion, f.groupVersion))
|
||||
}
|
||||
|
||||
patchObjMeta, err := meta.Accessor(patchObj)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("couldn't get accessor: %v", err)
|
||||
}
|
||||
if patchObjMeta.GetManagedFields() != nil {
|
||||
return nil, nil, errors.NewBadRequest("metadata.managedFields must be nil")
|
||||
}
|
||||
|
||||
liveObjVersioned, err := f.toVersioned(liveObj)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err)
|
||||
}
|
||||
|
||||
patchObjTyped, err := f.typeConverter.ObjectToTyped(patchObj)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create typed patch object (%v): %v", objectGVKNN(patchObj), err)
|
||||
}
|
||||
liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create typed live object (%v): %v", objectGVKNN(liveObjVersioned), err)
|
||||
}
|
||||
|
||||
apiVersion := fieldpath.APIVersion(f.groupVersion.String())
|
||||
newObjTyped, managedFields, err := f.updater.Apply(liveObjTyped, patchObjTyped, apiVersion, managed.Fields(), manager, force)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
managed = NewManaged(managedFields, managed.Times())
|
||||
|
||||
if newObjTyped == nil {
|
||||
return nil, managed, nil
|
||||
}
|
||||
|
||||
newObj, err := f.typeConverter.TypedToObject(newObjTyped)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to convert new typed object (%v) to object: %v", objectGVKNN(patchObj), err)
|
||||
}
|
||||
|
||||
newObjVersioned, err := f.toVersioned(newObj)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to convert new object (%v) to proper version: %v", objectGVKNN(patchObj), err)
|
||||
}
|
||||
f.objectDefaulter.Default(newObjVersioned)
|
||||
|
||||
newObjUnversioned, err := f.toUnversioned(newObjVersioned)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to convert to unversioned (%v): %v", objectGVKNN(patchObj), err)
|
||||
}
|
||||
return newObjUnversioned, managed, nil
|
||||
}
|
||||
|
||||
func (f *structuredMergeManager) toVersioned(obj runtime.Object) (runtime.Object, error) {
|
||||
return f.objectConverter.ConvertToVersion(obj, f.groupVersion)
|
||||
}
|
||||
|
||||
func (f *structuredMergeManager) toUnversioned(obj runtime.Object) (runtime.Object, error) {
|
||||
return f.objectConverter.ConvertToVersion(obj, f.hubVersion)
|
||||
}
|
193
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go
generated
vendored
Normal file
193
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go
generated
vendored
Normal file
@ -0,0 +1,193 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kube-openapi/pkg/schemaconv"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
smdschema "sigs.k8s.io/structured-merge-diff/v4/schema"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/typed"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/value"
|
||||
)
|
||||
|
||||
// TypeConverter allows you to convert from runtime.Object to
|
||||
// typed.TypedValue and the other way around.
|
||||
type TypeConverter interface {
|
||||
ObjectToTyped(runtime.Object) (*typed.TypedValue, error)
|
||||
TypedToObject(*typed.TypedValue) (runtime.Object, error)
|
||||
}
|
||||
|
||||
type typeConverter struct {
|
||||
parser map[schema.GroupVersionKind]*typed.ParseableType
|
||||
}
|
||||
|
||||
var _ TypeConverter = &typeConverter{}
|
||||
|
||||
func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields bool) (TypeConverter, error) {
|
||||
typeSchema, err := schemaconv.ToSchemaFromOpenAPI(openapiSpec, preserveUnknownFields)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert models to schema: %v", err)
|
||||
}
|
||||
|
||||
typeParser := typed.Parser{Schema: smdschema.Schema{Types: typeSchema.Types}}
|
||||
tr := indexModels(&typeParser, openapiSpec)
|
||||
|
||||
return &typeConverter{parser: tr}, nil
|
||||
}
|
||||
|
||||
func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) {
|
||||
gvk := obj.GetObjectKind().GroupVersionKind()
|
||||
t := c.parser[gvk]
|
||||
if t == nil {
|
||||
return nil, NewNoCorrespondingTypeError(gvk)
|
||||
}
|
||||
switch o := obj.(type) {
|
||||
case *unstructured.Unstructured:
|
||||
return t.FromUnstructured(o.UnstructuredContent())
|
||||
default:
|
||||
return t.FromStructured(obj)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *typeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) {
|
||||
return valueToObject(value.AsValue())
|
||||
}
|
||||
|
||||
type deducedTypeConverter struct{}
|
||||
|
||||
// DeducedTypeConverter is a TypeConverter for CRDs that don't have a
|
||||
// schema. It does implement the same interface though (and create the
|
||||
// same types of objects), so that everything can still work the same.
|
||||
// CRDs are merged with all their fields being "atomic" (lists
|
||||
// included).
|
||||
func NewDeducedTypeConverter() TypeConverter {
|
||||
return deducedTypeConverter{}
|
||||
}
|
||||
|
||||
// ObjectToTyped converts an object into a TypedValue with a "deduced type".
|
||||
func (deducedTypeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) {
|
||||
switch o := obj.(type) {
|
||||
case *unstructured.Unstructured:
|
||||
return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent())
|
||||
default:
|
||||
return typed.DeducedParseableType.FromStructured(obj)
|
||||
}
|
||||
}
|
||||
|
||||
// TypedToObject transforms the typed value into a runtime.Object. That
|
||||
// is not specific to deduced type.
|
||||
func (deducedTypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) {
|
||||
return valueToObject(value.AsValue())
|
||||
}
|
||||
|
||||
func valueToObject(val value.Value) (runtime.Object, error) {
|
||||
vu := val.Unstructured()
|
||||
switch o := vu.(type) {
|
||||
case map[string]interface{}:
|
||||
return &unstructured.Unstructured{Object: o}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu)
|
||||
}
|
||||
}
|
||||
|
||||
func indexModels(
|
||||
typeParser *typed.Parser,
|
||||
openAPISchemas map[string]*spec.Schema,
|
||||
) map[schema.GroupVersionKind]*typed.ParseableType {
|
||||
tr := map[schema.GroupVersionKind]*typed.ParseableType{}
|
||||
for modelName, model := range openAPISchemas {
|
||||
gvkList := parseGroupVersionKind(model.Extensions)
|
||||
if len(gvkList) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
parsedType := typeParser.Type(modelName)
|
||||
for _, gvk := range gvkList {
|
||||
if len(gvk.Kind) > 0 {
|
||||
tr[schema.GroupVersionKind(gvk)] = &parsedType
|
||||
}
|
||||
}
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
||||
// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one.
|
||||
func parseGroupVersionKind(extensions map[string]interface{}) []schema.GroupVersionKind {
|
||||
gvkListResult := []schema.GroupVersionKind{}
|
||||
|
||||
// Get the extensions
|
||||
gvkExtension, ok := extensions["x-kubernetes-group-version-kind"]
|
||||
if !ok {
|
||||
return []schema.GroupVersionKind{}
|
||||
}
|
||||
|
||||
// gvk extension must be a list of at least 1 element.
|
||||
gvkList, ok := gvkExtension.([]interface{})
|
||||
if !ok {
|
||||
return []schema.GroupVersionKind{}
|
||||
}
|
||||
|
||||
for _, gvk := range gvkList {
|
||||
var group, version, kind string
|
||||
|
||||
// gvk extension list must be a map with group, version, and
|
||||
// kind fields
|
||||
if gvkMap, ok := gvk.(map[interface{}]interface{}); ok {
|
||||
group, ok = gvkMap["group"].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
version, ok = gvkMap["version"].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
kind, ok = gvkMap["kind"].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
} else if gvkMap, ok := gvk.(map[string]interface{}); ok {
|
||||
group, ok = gvkMap["group"].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
version, ok = gvkMap["version"].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
kind, ok = gvkMap["kind"].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
|
||||
gvkListResult = append(gvkListResult, schema.GroupVersionKind{
|
||||
Group: group,
|
||||
Version: version,
|
||||
Kind: kind,
|
||||
})
|
||||
}
|
||||
|
||||
return gvkListResult
|
||||
}
|
123
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go
generated
vendored
Normal file
123
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versionconverter.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/merge"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/typed"
|
||||
)
|
||||
|
||||
// versionConverter is an implementation of
|
||||
// sigs.k8s.io/structured-merge-diff/merge.Converter
|
||||
type versionConverter struct {
|
||||
typeConverter TypeConverter
|
||||
objectConvertor runtime.ObjectConvertor
|
||||
hubGetter func(from schema.GroupVersion) schema.GroupVersion
|
||||
}
|
||||
|
||||
var _ merge.Converter = &versionConverter{}
|
||||
|
||||
// NewVersionConverter builds a VersionConverter from a TypeConverter and an ObjectConvertor.
|
||||
func newVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter {
|
||||
return &versionConverter{
|
||||
typeConverter: t,
|
||||
objectConvertor: o,
|
||||
hubGetter: func(from schema.GroupVersion) schema.GroupVersion {
|
||||
return schema.GroupVersion{
|
||||
Group: from.Group,
|
||||
Version: h.Version,
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewCRDVersionConverter builds a VersionConverter for CRDs from a TypeConverter and an ObjectConvertor.
|
||||
func newCRDVersionConverter(t TypeConverter, o runtime.ObjectConvertor, h schema.GroupVersion) merge.Converter {
|
||||
return &versionConverter{
|
||||
typeConverter: t,
|
||||
objectConvertor: o,
|
||||
hubGetter: func(from schema.GroupVersion) schema.GroupVersion {
|
||||
return h
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Convert implements sigs.k8s.io/structured-merge-diff/merge.Converter
|
||||
func (v *versionConverter) Convert(object *typed.TypedValue, version fieldpath.APIVersion) (*typed.TypedValue, error) {
|
||||
// Convert the smd typed value to a kubernetes object.
|
||||
objectToConvert, err := v.typeConverter.TypedToObject(object)
|
||||
if err != nil {
|
||||
return object, err
|
||||
}
|
||||
|
||||
// Parse the target groupVersion.
|
||||
groupVersion, err := schema.ParseGroupVersion(string(version))
|
||||
if err != nil {
|
||||
return object, err
|
||||
}
|
||||
|
||||
// If attempting to convert to the same version as we already have, just return it.
|
||||
fromVersion := objectToConvert.GetObjectKind().GroupVersionKind().GroupVersion()
|
||||
if fromVersion == groupVersion {
|
||||
return object, nil
|
||||
}
|
||||
|
||||
// Convert to internal
|
||||
internalObject, err := v.objectConvertor.ConvertToVersion(objectToConvert, v.hubGetter(fromVersion))
|
||||
if err != nil {
|
||||
return object, err
|
||||
}
|
||||
|
||||
// Convert the object into the target version
|
||||
convertedObject, err := v.objectConvertor.ConvertToVersion(internalObject, groupVersion)
|
||||
if err != nil {
|
||||
return object, err
|
||||
}
|
||||
|
||||
// Convert the object back to a smd typed value and return it.
|
||||
return v.typeConverter.ObjectToTyped(convertedObject)
|
||||
}
|
||||
|
||||
// IsMissingVersionError
|
||||
func (v *versionConverter) IsMissingVersionError(err error) bool {
|
||||
return runtime.IsNotRegisteredError(err) || isNoCorrespondingTypeError(err)
|
||||
}
|
||||
|
||||
type noCorrespondingTypeErr struct {
|
||||
gvk schema.GroupVersionKind
|
||||
}
|
||||
|
||||
func NewNoCorrespondingTypeError(gvk schema.GroupVersionKind) error {
|
||||
return &noCorrespondingTypeErr{gvk: gvk}
|
||||
}
|
||||
|
||||
func (k *noCorrespondingTypeErr) Error() string {
|
||||
return fmt.Sprintf("no corresponding type for %v", k.gvk)
|
||||
}
|
||||
|
||||
func isNoCorrespondingTypeError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := err.(*noCorrespondingTypeErr)
|
||||
return ok
|
||||
}
|
261
vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml
generated
vendored
Normal file
261
vendor/k8s.io/apimachinery/pkg/util/managedfields/node.yaml
generated
vendored
Normal file
@ -0,0 +1,261 @@
|
||||
apiVersion: v1
|
||||
kind: Node
|
||||
metadata:
|
||||
annotations:
|
||||
container.googleapis.com/instance_id: "123456789321654789"
|
||||
node.alpha.kubernetes.io/ttl: "0"
|
||||
volumes.kubernetes.io/controller-managed-attach-detach: "true"
|
||||
creationTimestamp: "2019-07-09T16:17:29Z"
|
||||
labels:
|
||||
kubernetes.io/arch: amd64
|
||||
beta.kubernetes.io/fluentd-ds-ready: "true"
|
||||
beta.kubernetes.io/instance-type: n1-standard-4
|
||||
kubernetes.io/os: linux
|
||||
cloud.google.com/gke-nodepool: default-pool
|
||||
cloud.google.com/gke-os-distribution: cos
|
||||
failure-domain.beta.kubernetes.io/region: us-central1
|
||||
failure-domain.beta.kubernetes.io/zone: us-central1-b
|
||||
topology.kubernetes.io/region: us-central1
|
||||
topology.kubernetes.io/zone: us-central1-b
|
||||
kubernetes.io/hostname: node-default-pool-something
|
||||
name: node-default-pool-something
|
||||
resourceVersion: "211582541"
|
||||
selfLink: /api/v1/nodes/node-default-pool-something
|
||||
uid: 0c24d0e1-a265-11e9-abe4-42010a80026b
|
||||
spec:
|
||||
podCIDR: 10.0.0.1/24
|
||||
providerID: some-provider-id-of-some-sort
|
||||
status:
|
||||
addresses:
|
||||
- address: 10.0.0.1
|
||||
type: InternalIP
|
||||
- address: 192.168.0.1
|
||||
type: ExternalIP
|
||||
- address: node-default-pool-something
|
||||
type: Hostname
|
||||
allocatable:
|
||||
cpu: 3920m
|
||||
ephemeral-storage: "104638878617"
|
||||
hugepages-2Mi: "0"
|
||||
memory: 12700100Ki
|
||||
pods: "110"
|
||||
capacity:
|
||||
cpu: "4"
|
||||
ephemeral-storage: 202086868Ki
|
||||
hugepages-2Mi: "0"
|
||||
memory: 15399364Ki
|
||||
pods: "110"
|
||||
conditions:
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
|
||||
lastTransitionTime: "2019-07-09T16:22:08Z"
|
||||
message: containerd is functioning properly
|
||||
reason: FrequentContainerdRestart
|
||||
status: "False"
|
||||
type: FrequentContainerdRestart
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
|
||||
lastTransitionTime: "2019-07-09T16:22:06Z"
|
||||
message: docker overlay2 is functioning properly
|
||||
reason: CorruptDockerOverlay2
|
||||
status: "False"
|
||||
type: CorruptDockerOverlay2
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
|
||||
lastTransitionTime: "2019-07-09T16:22:06Z"
|
||||
message: node is functioning properly
|
||||
reason: UnregisterNetDevice
|
||||
status: "False"
|
||||
type: FrequentUnregisterNetDevice
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:04Z"
|
||||
message: kernel has no deadlock
|
||||
reason: KernelHasNoDeadlock
|
||||
status: "False"
|
||||
type: KernelDeadlock
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:04Z"
|
||||
message: Filesystem is not read-only
|
||||
reason: FilesystemIsNotReadOnly
|
||||
status: "False"
|
||||
type: ReadonlyFilesystem
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
|
||||
lastTransitionTime: "2019-07-09T16:22:05Z"
|
||||
message: kubelet is functioning properly
|
||||
reason: FrequentKubeletRestart
|
||||
status: "False"
|
||||
type: FrequentKubeletRestart
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:08Z"
|
||||
lastTransitionTime: "2019-07-09T16:22:06Z"
|
||||
message: docker is functioning properly
|
||||
reason: FrequentDockerRestart
|
||||
status: "False"
|
||||
type: FrequentDockerRestart
|
||||
- lastHeartbeatTime: "2019-07-09T16:17:47Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:47Z"
|
||||
message: RouteController created a route
|
||||
reason: RouteCreated
|
||||
status: "False"
|
||||
type: NetworkUnavailable
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:29Z"
|
||||
message: kubelet has sufficient disk space available
|
||||
reason: KubeletHasSufficientDisk
|
||||
status: "False"
|
||||
type: OutOfDisk
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:29Z"
|
||||
message: kubelet has sufficient memory available
|
||||
reason: KubeletHasSufficientMemory
|
||||
status: "False"
|
||||
type: MemoryPressure
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:29Z"
|
||||
message: kubelet has no disk pressure
|
||||
reason: KubeletHasNoDiskPressure
|
||||
status: "False"
|
||||
type: DiskPressure
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:29Z"
|
||||
message: kubelet has sufficient PID available
|
||||
reason: KubeletHasSufficientPID
|
||||
status: "False"
|
||||
type: PIDPressure
|
||||
- lastHeartbeatTime: "2019-09-20T19:32:50Z"
|
||||
lastTransitionTime: "2019-07-09T16:17:49Z"
|
||||
message: kubelet is posting ready status. AppArmor enabled
|
||||
reason: KubeletReady
|
||||
status: "True"
|
||||
type: Ready
|
||||
daemonEndpoints:
|
||||
kubeletEndpoint:
|
||||
Port: 10250
|
||||
images:
|
||||
- names:
|
||||
- grafana/grafana@sha256:80e5e113a984d74836aa16f5b4524012099436b1a50df293f00ac6377fb512c8
|
||||
- grafana/grafana:4.4.2
|
||||
sizeBytes: 287008013
|
||||
- names:
|
||||
- registry.k8s.io/node-problem-detector@sha256:f95cab985c26b2f46e9bd43283e0bfa88860c14e0fb0649266babe8b65e9eb2b
|
||||
- registry.k8s.io/node-problem-detector:v0.4.1
|
||||
sizeBytes: 286572743
|
||||
- names:
|
||||
- grafana/grafana@sha256:7ff7f9b2501a5d55b55ce3f58d21771b1c5af1f2a4ab7dbf11bef7142aae7033
|
||||
- grafana/grafana:4.2.0
|
||||
sizeBytes: 277940263
|
||||
- names:
|
||||
- influxdb@sha256:7dddf03376348876ed4bdf33d6dfa3326f45a2bae0930dbd80781a374eb519bc
|
||||
- influxdb:1.2.2
|
||||
sizeBytes: 223948571
|
||||
- names:
|
||||
- gcr.io/stackdriver-agents/stackdriver-logging-agent@sha256:f8d5231b67b9c53f60068b535a11811d29d1b3efd53d2b79f2a2591ea338e4f2
|
||||
- gcr.io/stackdriver-agents/stackdriver-logging-agent:0.6-1.6.0-1
|
||||
sizeBytes: 223242132
|
||||
- names:
|
||||
- nginx@sha256:35779791c05d119df4fe476db8f47c0bee5943c83eba5656a15fc046db48178b
|
||||
- nginx:1.10.1
|
||||
sizeBytes: 180708613
|
||||
- names:
|
||||
- registry.k8s.io/fluentd-elasticsearch@sha256:b8c94527b489fb61d3d81ce5ad7f3ddbb7be71e9620a3a36e2bede2f2e487d73
|
||||
- registry.k8s.io/fluentd-elasticsearch:v2.0.4
|
||||
sizeBytes: 135716379
|
||||
- names:
|
||||
- nginx@sha256:00be67d6ba53d5318cd91c57771530f5251cfbe028b7be2c4b70526f988cfc9f
|
||||
- nginx:latest
|
||||
sizeBytes: 109357355
|
||||
- names:
|
||||
- registry.k8s.io/kubernetes-dashboard-amd64@sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0
|
||||
- registry.k8s.io/kubernetes-dashboard-amd64:v1.8.3
|
||||
sizeBytes: 102319441
|
||||
- names:
|
||||
- gcr.io/google_containers/kube-proxy:v1.11.10-gke.5
|
||||
- registry.k8s.io/kube-proxy:v1.11.10-gke.5
|
||||
sizeBytes: 102279340
|
||||
- names:
|
||||
- registry.k8s.io/event-exporter@sha256:7f9cd7cb04d6959b0aa960727d04fa86759008048c785397b7b0d9dff0007516
|
||||
- registry.k8s.io/event-exporter:v0.2.3
|
||||
sizeBytes: 94171943
|
||||
- names:
|
||||
- registry.k8s.io/prometheus-to-sd@sha256:6c0c742475363d537ff059136e5d5e4ab1f512ee0fd9b7ca42ea48bc309d1662
|
||||
- registry.k8s.io/prometheus-to-sd:v0.3.1
|
||||
sizeBytes: 88077694
|
||||
- names:
|
||||
- registry.k8s.io/fluentd-gcp-scaler@sha256:a5ace7506d393c4ed65eb2cbb6312c64ab357fcea16dff76b9055bc6e498e5ff
|
||||
- registry.k8s.io/fluentd-gcp-scaler:0.5.1
|
||||
sizeBytes: 86637208
|
||||
- names:
|
||||
- registry.k8s.io/heapster-amd64@sha256:9fae0af136ce0cf4f88393b3670f7139ffc464692060c374d2ae748e13144521
|
||||
- registry.k8s.io/heapster-amd64:v1.6.0-beta.1
|
||||
sizeBytes: 76016169
|
||||
- names:
|
||||
- registry.k8s.io/ingress-glbc-amd64@sha256:31d36bbd9c44caffa135fc78cf0737266fcf25e3cf0cd1c2fcbfbc4f7309cc52
|
||||
- registry.k8s.io/ingress-glbc-amd64:v1.1.1
|
||||
sizeBytes: 67801919
|
||||
- names:
|
||||
- registry.k8s.io/kube-addon-manager@sha256:d53486c3a0b49ebee019932878dc44232735d5622a51dbbdcec7124199020d09
|
||||
- registry.k8s.io/kube-addon-manager:v8.7
|
||||
sizeBytes: 63322109
|
||||
- names:
|
||||
- nginx@sha256:4aacdcf186934dcb02f642579314075910f1855590fd3039d8fa4c9f96e48315
|
||||
- nginx:1.10-alpine
|
||||
sizeBytes: 54042627
|
||||
- names:
|
||||
- registry.k8s.io/cpvpa-amd64@sha256:cfe7b0a11c9c8e18c87b1eb34fef9a7cbb8480a8da11fc2657f78dbf4739f869
|
||||
- registry.k8s.io/cpvpa-amd64:v0.6.0
|
||||
sizeBytes: 51785854
|
||||
- names:
|
||||
- registry.k8s.io/cluster-proportional-autoscaler-amd64@sha256:003f98d9f411ddfa6ff6d539196355e03ddd69fa4ed38c7ffb8fec6f729afe2d
|
||||
- registry.k8s.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
|
||||
sizeBytes: 49648481
|
||||
- names:
|
||||
- registry.k8s.io/ip-masq-agent-amd64@sha256:1ffda57d87901bc01324c82ceb2145fe6a0448d3f0dd9cb65aa76a867cd62103
|
||||
- registry.k8s.io/ip-masq-agent-amd64:v2.1.1
|
||||
sizeBytes: 49612505
|
||||
- names:
|
||||
- registry.k8s.io/k8s-dns-kube-dns-amd64@sha256:b99fc3eee2a9f052f7eb4cc00f15eb12fc405fa41019baa2d6b79847ae7284a8
|
||||
- registry.k8s.io/k8s-dns-kube-dns-amd64:1.14.10
|
||||
sizeBytes: 49549457
|
||||
- names:
|
||||
- registry.k8s.io/rescheduler@sha256:156cfbfd05a5a815206fd2eeb6cbdaf1596d71ea4b415d3a6c43071dd7b99450
|
||||
- registry.k8s.io/rescheduler:v0.4.0
|
||||
sizeBytes: 48973149
|
||||
- names:
|
||||
- registry.k8s.io/event-exporter@sha256:16ca66e2b5dc7a1ce6a5aafcb21d0885828b75cdfc08135430480f7ad2364adc
|
||||
- registry.k8s.io/event-exporter:v0.2.4
|
||||
sizeBytes: 47261019
|
||||
- names:
|
||||
- registry.k8s.io/coredns@sha256:db2bf53126ed1c761d5a41f24a1b82a461c85f736ff6e90542e9522be4757848
|
||||
- registry.k8s.io/coredns:1.1.3
|
||||
sizeBytes: 45587362
|
||||
- names:
|
||||
- prom/prometheus@sha256:483f4c9d7733699ba79facca9f8bcce1cef1af43dfc3e7c5a1882aa85f53cb74
|
||||
- prom/prometheus:v1.1.3
|
||||
sizeBytes: 45493941
|
||||
nodeInfo:
|
||||
architecture: amd64
|
||||
bootID: a32eca78-4ad4-4b76-9252-f143d6c2ae61
|
||||
containerRuntimeVersion: docker://17.3.2
|
||||
kernelVersion: 4.14.127+
|
||||
kubeProxyVersion: v1.11.10-gke.5
|
||||
kubeletVersion: v1.11.10-gke.5
|
||||
machineID: 1739555e5b231057f0f9a0b5fa29511b
|
||||
operatingSystem: linux
|
||||
osImage: Container-Optimized OS from Google
|
||||
systemUUID: 1739555E-5B23-1057-F0F9-A0B5FA29511B
|
||||
volumesAttached:
|
||||
- devicePath: /dev/disk/by-id/b9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049
|
||||
name: kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049
|
||||
- devicePath: /dev/disk/by-id/b9772-pvc-8895a852-fd42-11e6-94d4-42010a800049
|
||||
name: kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049
|
||||
- devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049
|
||||
name: kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049
|
||||
- devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049
|
||||
name: kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049
|
||||
- devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049
|
||||
name: kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049
|
||||
- devicePath: /dev/disk/by-id/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283
|
||||
name: kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283
|
||||
volumesInUse:
|
||||
- kubernetes.io/pd/some-random-clusterb9772-pvc-72e1c7f1-fd41-11e6-94d4-42010a800049
|
||||
- kubernetes.io/pd/some-random-clusterb9772-pvc-8895a852-fd42-11e6-94d4-42010a800049
|
||||
- kubernetes.io/pd/some-random-clusterb9772-pvc-8bf50554-fd42-11e6-94d4-42010a800049
|
||||
- kubernetes.io/pd/some-random-clusterb9772-pvc-8fb5e386-4641-11e7-a490-42010a800283
|
||||
- kubernetes.io/pd/some-random-clusterb9772-pvc-c2435a06-14d7-11e7-9baf-42010a800049
|
||||
- kubernetes.io/pd/some-random-clusterb9772-pvc-c787c67d-14d7-11e7-9baf-42010a800049
|
121
vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml
generated
vendored
Normal file
121
vendor/k8s.io/apimachinery/pkg/util/managedfields/pod.yaml
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: some-app
|
||||
plugin1: some-value
|
||||
plugin2: some-value
|
||||
plugin3: some-value
|
||||
plugin4: some-value
|
||||
name: some-name
|
||||
namespace: default
|
||||
ownerReferences:
|
||||
- apiVersion: apps/v1
|
||||
blockOwnerDeletion: true
|
||||
controller: true
|
||||
kind: ReplicaSet
|
||||
name: some-name
|
||||
uid: 0a9d2b9e-779e-11e7-b422-42010a8001be
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- one
|
||||
- two
|
||||
- three
|
||||
- four
|
||||
- five
|
||||
- six
|
||||
- seven
|
||||
- eight
|
||||
- nine
|
||||
env:
|
||||
- name: VAR_3
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: some-other-key
|
||||
name: some-oher-name
|
||||
- name: VAR_2
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: other-key
|
||||
name: other-name
|
||||
- name: VAR_1
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: some-key
|
||||
name: some-name
|
||||
image: some-image-name
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: some-name
|
||||
resources:
|
||||
requests:
|
||||
cpu: '0'
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
|
||||
name: default-token-hu5jz
|
||||
readOnly: true
|
||||
dnsPolicy: ClusterFirst
|
||||
nodeName: node-name
|
||||
priority: 0
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
serviceAccount: default
|
||||
serviceAccountName: default
|
||||
terminationGracePeriodSeconds: 30
|
||||
tolerations:
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/not-ready
|
||||
operator: Exists
|
||||
tolerationSeconds: 300
|
||||
- effect: NoExecute
|
||||
key: node.kubernetes.io/unreachable
|
||||
operator: Exists
|
||||
tolerationSeconds: 300
|
||||
volumes:
|
||||
- name: default-token-hu5jz
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: default-token-hu5jz
|
||||
status:
|
||||
conditions:
|
||||
- lastProbeTime: null
|
||||
lastTransitionTime: '2019-07-08T09:31:18Z'
|
||||
status: 'True'
|
||||
type: Initialized
|
||||
- lastProbeTime: null
|
||||
lastTransitionTime: '2019-07-08T09:41:59Z'
|
||||
status: 'True'
|
||||
type: Ready
|
||||
- lastProbeTime: null
|
||||
lastTransitionTime: null
|
||||
status: 'True'
|
||||
type: ContainersReady
|
||||
- lastProbeTime: null
|
||||
lastTransitionTime: '2019-07-08T09:31:18Z'
|
||||
status: 'True'
|
||||
type: PodScheduled
|
||||
containerStatuses:
|
||||
- containerID: docker://885e82a1ed0b7356541bb410a0126921ac42439607c09875cd8097dd5d7b5376
|
||||
image: some-image-name
|
||||
imageID: docker-pullable://some-image-id
|
||||
lastState:
|
||||
terminated:
|
||||
containerID: docker://d57290f9e00fad626b20d2dd87a3cf69bbc22edae07985374f86a8b2b4e39565
|
||||
exitCode: 255
|
||||
finishedAt: '2019-07-08T09:39:09Z'
|
||||
reason: Error
|
||||
startedAt: '2019-07-08T09:38:54Z'
|
||||
name: name
|
||||
ready: true
|
||||
restartCount: 6
|
||||
state:
|
||||
running:
|
||||
startedAt: '2019-07-08T09:41:59Z'
|
||||
hostIP: 10.0.0.1
|
||||
phase: Running
|
||||
podIP: 10.0.0.1
|
||||
qosClass: BestEffort
|
||||
startTime: '2019-07-08T09:31:18Z'
|
174
vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go
generated
vendored
Normal file
174
vendor/k8s.io/apimachinery/pkg/util/managedfields/scalehandler.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package managedfields
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/managedfields/internal"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
var (
|
||||
scaleGroupVersion = schema.GroupVersion{Group: "autoscaling", Version: "v1"}
|
||||
replicasPathInScale = fieldpath.MakePathOrDie("spec", "replicas")
|
||||
)
|
||||
|
||||
// ResourcePathMappings maps a group/version to its replicas path. The
|
||||
// assumption is that all the paths correspond to leaf fields.
|
||||
type ResourcePathMappings map[string]fieldpath.Path
|
||||
|
||||
// ScaleHandler manages the conversion of managed fields between a main
|
||||
// resource and the scale subresource
|
||||
type ScaleHandler struct {
|
||||
parentEntries []metav1.ManagedFieldsEntry
|
||||
groupVersion schema.GroupVersion
|
||||
mappings ResourcePathMappings
|
||||
}
|
||||
|
||||
// NewScaleHandler creates a new ScaleHandler
|
||||
func NewScaleHandler(parentEntries []metav1.ManagedFieldsEntry, groupVersion schema.GroupVersion, mappings ResourcePathMappings) *ScaleHandler {
|
||||
return &ScaleHandler{
|
||||
parentEntries: parentEntries,
|
||||
groupVersion: groupVersion,
|
||||
mappings: mappings,
|
||||
}
|
||||
}
|
||||
|
||||
// ToSubresource filter the managed fields of the main resource and convert
|
||||
// them so that they can be handled by scale.
|
||||
// For the managed fields that have a replicas path it performs two changes:
|
||||
// 1. APIVersion is changed to the APIVersion of the scale subresource
|
||||
// 2. Replicas path of the main resource is transformed to the replicas path of
|
||||
// the scale subresource
|
||||
func (h *ScaleHandler) ToSubresource() ([]metav1.ManagedFieldsEntry, error) {
|
||||
managed, err := internal.DecodeManagedFields(h.parentEntries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := fieldpath.ManagedFields{}
|
||||
t := map[string]*metav1.Time{}
|
||||
for manager, versionedSet := range managed.Fields() {
|
||||
path, ok := h.mappings[string(versionedSet.APIVersion())]
|
||||
// Skip the entry if the APIVersion is unknown
|
||||
if !ok || path == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if versionedSet.Set().Has(path) {
|
||||
newVersionedSet := fieldpath.NewVersionedSet(
|
||||
fieldpath.NewSet(replicasPathInScale),
|
||||
fieldpath.APIVersion(scaleGroupVersion.String()),
|
||||
versionedSet.Applied(),
|
||||
)
|
||||
|
||||
f[manager] = newVersionedSet
|
||||
t[manager] = managed.Times()[manager]
|
||||
}
|
||||
}
|
||||
|
||||
return managedFieldsEntries(internal.NewManaged(f, t))
|
||||
}
|
||||
|
||||
// ToParent merges `scaleEntries` with the entries of the main resource and
|
||||
// transforms them accordingly
|
||||
func (h *ScaleHandler) ToParent(scaleEntries []metav1.ManagedFieldsEntry) ([]metav1.ManagedFieldsEntry, error) {
|
||||
decodedParentEntries, err := internal.DecodeManagedFields(h.parentEntries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parentFields := decodedParentEntries.Fields()
|
||||
|
||||
decodedScaleEntries, err := internal.DecodeManagedFields(scaleEntries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scaleFields := decodedScaleEntries.Fields()
|
||||
|
||||
f := fieldpath.ManagedFields{}
|
||||
t := map[string]*metav1.Time{}
|
||||
|
||||
for manager, versionedSet := range parentFields {
|
||||
// Get the main resource "replicas" path
|
||||
path, ok := h.mappings[string(versionedSet.APIVersion())]
|
||||
// Drop the entry if the APIVersion is unknown.
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the parent entry does not have the replicas path or it is nil, just
|
||||
// keep it as it is. The path is nil for Custom Resources without scale
|
||||
// subresource.
|
||||
if path == nil || !versionedSet.Set().Has(path) {
|
||||
f[manager] = versionedSet
|
||||
t[manager] = decodedParentEntries.Times()[manager]
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := scaleFields[manager]; !ok {
|
||||
// "Steal" the replicas path from the main resource entry
|
||||
newSet := versionedSet.Set().Difference(fieldpath.NewSet(path))
|
||||
|
||||
if !newSet.Empty() {
|
||||
newVersionedSet := fieldpath.NewVersionedSet(
|
||||
newSet,
|
||||
versionedSet.APIVersion(),
|
||||
versionedSet.Applied(),
|
||||
)
|
||||
f[manager] = newVersionedSet
|
||||
t[manager] = decodedParentEntries.Times()[manager]
|
||||
}
|
||||
} else {
|
||||
// Field wasn't stolen, let's keep the entry as it is.
|
||||
f[manager] = versionedSet
|
||||
t[manager] = decodedParentEntries.Times()[manager]
|
||||
delete(scaleFields, manager)
|
||||
}
|
||||
}
|
||||
|
||||
for manager, versionedSet := range scaleFields {
|
||||
if !versionedSet.Set().Has(replicasPathInScale) {
|
||||
continue
|
||||
}
|
||||
newVersionedSet := fieldpath.NewVersionedSet(
|
||||
fieldpath.NewSet(h.mappings[h.groupVersion.String()]),
|
||||
fieldpath.APIVersion(h.groupVersion.String()),
|
||||
versionedSet.Applied(),
|
||||
)
|
||||
f[manager] = newVersionedSet
|
||||
t[manager] = decodedParentEntries.Times()[manager]
|
||||
}
|
||||
|
||||
return managedFieldsEntries(internal.NewManaged(f, t))
|
||||
}
|
||||
|
||||
func managedFieldsEntries(entries internal.ManagedInterface) ([]metav1.ManagedFieldsEntry, error) {
|
||||
obj := &unstructured.Unstructured{Object: map[string]interface{}{}}
|
||||
if err := internal.EncodeObjectManagedFields(obj, entries); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("couldn't get accessor: %v", err))
|
||||
}
|
||||
return accessor.GetManagedFields(), nil
|
||||
}
|
47
vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go
generated
vendored
Normal file
47
vendor/k8s.io/apimachinery/pkg/util/managedfields/typeconverter.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package managedfields
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/managedfields/internal"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
// TypeConverter allows you to convert from runtime.Object to
|
||||
// typed.TypedValue and the other way around.
|
||||
type TypeConverter = internal.TypeConverter
|
||||
|
||||
// NewDeducedTypeConverter creates a TypeConverter for CRDs that don't
|
||||
// have a schema. It does implement the same interface though (and
|
||||
// create the same types of objects), so that everything can still work
|
||||
// the same. CRDs are merged with all their fields being "atomic" (lists
|
||||
// included).
|
||||
func NewDeducedTypeConverter() TypeConverter {
|
||||
return internal.NewDeducedTypeConverter()
|
||||
}
|
||||
|
||||
// NewTypeConverter builds a TypeConverter from a map of OpenAPIV3 schemas.
|
||||
// This will automatically find the proper version of the object, and the
|
||||
// corresponding schema information.
|
||||
// The keys to the map must be consistent with the names
|
||||
// used by Refs within the schemas.
|
||||
// The schemas should conform to the Kubernetes Structural Schema OpenAPI
|
||||
// restrictions found in docs:
|
||||
// https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#specifying-a-structural-schema
|
||||
func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields bool) (TypeConverter, error) {
|
||||
return internal.NewTypeConverter(openapiSpec, preserveUnknownFields)
|
||||
}
|
3
vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go
generated
vendored
3
vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go
generated
vendored
@ -88,8 +88,7 @@ func toYAML(v interface{}) (string, error) {
|
||||
// supports JSON merge patch semantics.
|
||||
//
|
||||
// NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts.
|
||||
//
|
||||
// Make sure the unmarshaling of left and right are consistent (e.g. use the same library).
|
||||
// Make sure the unmarshaling of left and right are consistent (e.g. use the same library).
|
||||
func HasConflicts(left, right interface{}) (bool, error) {
|
||||
switch typedLeft := left.(type) {
|
||||
case map[string]interface{}:
|
||||
|
14
vendor/k8s.io/apimachinery/pkg/util/sets/set.go
generated
vendored
14
vendor/k8s.io/apimachinery/pkg/util/sets/set.go
generated
vendored
@ -64,6 +64,20 @@ func (s Set[T]) Delete(items ...T) Set[T] {
|
||||
return s
|
||||
}
|
||||
|
||||
// Clear empties the set.
|
||||
// It is preferable to replace the set with a newly constructed set,
|
||||
// but not all callers can do that (when there are other references to the map).
|
||||
// In some cases the set *won't* be fully cleared, e.g. a Set[float32] containing NaN
|
||||
// can't be cleared because NaN can't be removed.
|
||||
// For sets containing items of a type that is reflexive for ==,
|
||||
// this is optimized to a single call to runtime.mapclear().
|
||||
func (s Set[T]) Clear() Set[T] {
|
||||
for key := range s {
|
||||
delete(s, key)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Has returns true if and only if item is contained in the set.
|
||||
func (s Set[T]) Has(item T) bool {
|
||||
_, contained := s[item]
|
||||
|
1
vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS
generated
vendored
1
vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS
generated
vendored
@ -1,6 +1,7 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- apelisse
|
||||
- pwittrock
|
||||
reviewers:
|
||||
- apelisse
|
||||
|
2
vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
generated
vendored
2
vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
generated
vendored
@ -1106,7 +1106,7 @@ func applyRetainKeysDirective(original, patch map[string]interface{}, options Me
|
||||
// Then, sort them by the relative order in setElementOrder, patch list and live list.
|
||||
// The precedence is $setElementOrder > order in patch list > order in live list.
|
||||
// This function will delete the item after merging it to prevent process it again in the future.
|
||||
// Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md
|
||||
// Ref: https://git.k8s.io/design-proposals-archive/cli/preserve-order-in-strategic-merge-patch.md
|
||||
func mergePatchIntoOriginal(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) error {
|
||||
for key, patchV := range patch {
|
||||
// Do nothing if there is no ordering directive
|
||||
|
8
vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
generated
vendored
8
vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
generated
vendored
@ -191,7 +191,13 @@ func IsDNS1123Label(value string) []string {
|
||||
errs = append(errs, MaxLenError(DNS1123LabelMaxLength))
|
||||
}
|
||||
if !dns1123LabelRegexp.MatchString(value) {
|
||||
errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc"))
|
||||
if dns1123SubdomainRegexp.MatchString(value) {
|
||||
// It was a valid subdomain and not a valid label. Since we
|
||||
// already checked length, it must be dots.
|
||||
errs = append(errs, "must not contain dots")
|
||||
} else {
|
||||
errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc"))
|
||||
}
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
502
vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go
generated
vendored
Normal file
502
vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go
generated
vendored
Normal file
@ -0,0 +1,502 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package wait
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// Backoff holds parameters applied to a Backoff function.
|
||||
type Backoff struct {
|
||||
// The initial duration.
|
||||
Duration time.Duration
|
||||
// Duration is multiplied by factor each iteration, if factor is not zero
|
||||
// and the limits imposed by Steps and Cap have not been reached.
|
||||
// Should not be negative.
|
||||
// The jitter does not contribute to the updates to the duration parameter.
|
||||
Factor float64
|
||||
// The sleep at each iteration is the duration plus an additional
|
||||
// amount chosen uniformly at random from the interval between
|
||||
// zero and `jitter*duration`.
|
||||
Jitter float64
|
||||
// The remaining number of iterations in which the duration
|
||||
// parameter may change (but progress can be stopped earlier by
|
||||
// hitting the cap). If not positive, the duration is not
|
||||
// changed. Used for exponential backoff in combination with
|
||||
// Factor and Cap.
|
||||
Steps int
|
||||
// A limit on revised values of the duration parameter. If a
|
||||
// multiplication by the factor parameter would make the duration
|
||||
// exceed the cap then the duration is set to the cap and the
|
||||
// steps parameter is set to zero.
|
||||
Cap time.Duration
|
||||
}
|
||||
|
||||
// Step returns an amount of time to sleep determined by the original
|
||||
// Duration and Jitter. The backoff is mutated to update its Steps and
|
||||
// Duration. A nil Backoff always has a zero-duration step.
|
||||
func (b *Backoff) Step() time.Duration {
|
||||
if b == nil {
|
||||
return 0
|
||||
}
|
||||
var nextDuration time.Duration
|
||||
nextDuration, b.Duration, b.Steps = delay(b.Steps, b.Duration, b.Cap, b.Factor, b.Jitter)
|
||||
return nextDuration
|
||||
}
|
||||
|
||||
// DelayFunc returns a function that will compute the next interval to
|
||||
// wait given the arguments in b. It does not mutate the original backoff
|
||||
// but the function is safe to use only from a single goroutine.
|
||||
func (b Backoff) DelayFunc() DelayFunc {
|
||||
steps := b.Steps
|
||||
duration := b.Duration
|
||||
cap := b.Cap
|
||||
factor := b.Factor
|
||||
jitter := b.Jitter
|
||||
|
||||
return func() time.Duration {
|
||||
var nextDuration time.Duration
|
||||
// jitter is applied per step and is not cumulative over multiple steps
|
||||
nextDuration, duration, steps = delay(steps, duration, cap, factor, jitter)
|
||||
return nextDuration
|
||||
}
|
||||
}
|
||||
|
||||
// Timer returns a timer implementation appropriate to this backoff's parameters
|
||||
// for use with wait functions.
|
||||
func (b Backoff) Timer() Timer {
|
||||
if b.Steps > 1 || b.Jitter != 0 {
|
||||
return &variableTimer{new: internalClock.NewTimer, fn: b.DelayFunc()}
|
||||
}
|
||||
if b.Duration > 0 {
|
||||
return &fixedTimer{new: internalClock.NewTicker, interval: b.Duration}
|
||||
}
|
||||
return newNoopTimer()
|
||||
}
|
||||
|
||||
// delay implements the core delay algorithm used in this package.
|
||||
func delay(steps int, duration, cap time.Duration, factor, jitter float64) (_ time.Duration, next time.Duration, nextSteps int) {
|
||||
// when steps is non-positive, do not alter the base duration
|
||||
if steps < 1 {
|
||||
if jitter > 0 {
|
||||
return Jitter(duration, jitter), duration, 0
|
||||
}
|
||||
return duration, duration, 0
|
||||
}
|
||||
steps--
|
||||
|
||||
// calculate the next step's interval
|
||||
if factor != 0 {
|
||||
next = time.Duration(float64(duration) * factor)
|
||||
if cap > 0 && next > cap {
|
||||
next = cap
|
||||
steps = 0
|
||||
}
|
||||
} else {
|
||||
next = duration
|
||||
}
|
||||
|
||||
// add jitter for this step
|
||||
if jitter > 0 {
|
||||
duration = Jitter(duration, jitter)
|
||||
}
|
||||
|
||||
return duration, next, steps
|
||||
|
||||
}
|
||||
|
||||
// DelayWithReset returns a DelayFunc that will return the appropriate next interval to
|
||||
// wait. Every resetInterval the backoff parameters are reset to their initial state.
|
||||
// This method is safe to invoke from multiple goroutines, but all calls will advance
|
||||
// the backoff state when Factor is set. If Factor is zero, this method is the same as
|
||||
// invoking b.DelayFunc() since Steps has no impact without Factor. If resetInterval is
|
||||
// zero no backoff will be performed as the same calling DelayFunc with a zero factor
|
||||
// and steps.
|
||||
func (b Backoff) DelayWithReset(c clock.Clock, resetInterval time.Duration) DelayFunc {
|
||||
if b.Factor <= 0 {
|
||||
return b.DelayFunc()
|
||||
}
|
||||
if resetInterval <= 0 {
|
||||
b.Steps = 0
|
||||
b.Factor = 0
|
||||
return b.DelayFunc()
|
||||
}
|
||||
return (&backoffManager{
|
||||
backoff: b,
|
||||
initialBackoff: b,
|
||||
resetInterval: resetInterval,
|
||||
|
||||
clock: c,
|
||||
lastStart: c.Now(),
|
||||
timer: nil,
|
||||
}).Step
|
||||
}
|
||||
|
||||
// Until loops until stop channel is closed, running f every period.
|
||||
//
|
||||
// Until is syntactic sugar on top of JitterUntil with zero jitter factor and
|
||||
// with sliding = true (which means the timer for period starts after the f
|
||||
// completes).
|
||||
func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
|
||||
JitterUntil(f, period, 0.0, true, stopCh)
|
||||
}
|
||||
|
||||
// UntilWithContext loops until context is done, running f every period.
|
||||
//
|
||||
// UntilWithContext is syntactic sugar on top of JitterUntilWithContext
|
||||
// with zero jitter factor and with sliding = true (which means the timer
|
||||
// for period starts after the f completes).
|
||||
func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) {
|
||||
JitterUntilWithContext(ctx, f, period, 0.0, true)
|
||||
}
|
||||
|
||||
// NonSlidingUntil loops until stop channel is closed, running f every
|
||||
// period.
|
||||
//
|
||||
// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter
|
||||
// factor, with sliding = false (meaning the timer for period starts at the same
|
||||
// time as the function starts).
|
||||
func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
|
||||
JitterUntil(f, period, 0.0, false, stopCh)
|
||||
}
|
||||
|
||||
// NonSlidingUntilWithContext loops until context is done, running f every
|
||||
// period.
|
||||
//
|
||||
// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext
|
||||
// with zero jitter factor, with sliding = false (meaning the timer for period
|
||||
// starts at the same time as the function starts).
|
||||
func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) {
|
||||
JitterUntilWithContext(ctx, f, period, 0.0, false)
|
||||
}
|
||||
|
||||
// JitterUntil loops until stop channel is closed, running f every period.
|
||||
//
|
||||
// If jitterFactor is positive, the period is jittered before every run of f.
|
||||
// If jitterFactor is not positive, the period is unchanged and not jittered.
|
||||
//
|
||||
// If sliding is true, the period is computed after f runs. If it is false then
|
||||
// period includes the runtime for f.
|
||||
//
|
||||
// Close stopCh to stop. f may not be invoked if stop channel is already
|
||||
// closed. Pass NeverStop to if you don't want it stop.
|
||||
func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
|
||||
BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh)
|
||||
}
|
||||
|
||||
// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager.
|
||||
//
|
||||
// If sliding is true, the period is computed after f runs. If it is false then
|
||||
// period includes the runtime for f.
|
||||
func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) {
|
||||
var t clock.Timer
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if !sliding {
|
||||
t = backoff.Backoff()
|
||||
}
|
||||
|
||||
func() {
|
||||
defer runtime.HandleCrash()
|
||||
f()
|
||||
}()
|
||||
|
||||
if sliding {
|
||||
t = backoff.Backoff()
|
||||
}
|
||||
|
||||
// NOTE: b/c there is no priority selection in golang
|
||||
// it is possible for this to race, meaning we could
|
||||
// trigger t.C and stopCh, and t.C select falls through.
|
||||
// In order to mitigate we re-check stopCh at the beginning
|
||||
// of every loop to prevent extra executions of f().
|
||||
select {
|
||||
case <-stopCh:
|
||||
if !t.Stop() {
|
||||
<-t.C()
|
||||
}
|
||||
return
|
||||
case <-t.C():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// JitterUntilWithContext loops until context is done, running f every period.
|
||||
//
|
||||
// If jitterFactor is positive, the period is jittered before every run of f.
|
||||
// If jitterFactor is not positive, the period is unchanged and not jittered.
|
||||
//
|
||||
// If sliding is true, the period is computed after f runs. If it is false then
|
||||
// period includes the runtime for f.
|
||||
//
|
||||
// Cancel context to stop. f may not be invoked if context is already expired.
|
||||
func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) {
|
||||
JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done())
|
||||
}
|
||||
|
||||
// backoffManager provides simple backoff behavior in a threadsafe manner to a caller.
|
||||
type backoffManager struct {
|
||||
backoff Backoff
|
||||
initialBackoff Backoff
|
||||
resetInterval time.Duration
|
||||
|
||||
clock clock.Clock
|
||||
|
||||
lock sync.Mutex
|
||||
lastStart time.Time
|
||||
timer clock.Timer
|
||||
}
|
||||
|
||||
// Step returns the expected next duration to wait.
|
||||
func (b *backoffManager) Step() time.Duration {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
switch {
|
||||
case b.resetInterval == 0:
|
||||
b.backoff = b.initialBackoff
|
||||
case b.clock.Now().Sub(b.lastStart) > b.resetInterval:
|
||||
b.backoff = b.initialBackoff
|
||||
b.lastStart = b.clock.Now()
|
||||
}
|
||||
return b.backoff.Step()
|
||||
}
|
||||
|
||||
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer
|
||||
// for exponential backoff. The returned timer must be drained before calling Backoff() the second
|
||||
// time.
|
||||
func (b *backoffManager) Backoff() clock.Timer {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
if b.timer == nil {
|
||||
b.timer = b.clock.NewTimer(b.Step())
|
||||
} else {
|
||||
b.timer.Reset(b.Step())
|
||||
}
|
||||
return b.timer
|
||||
}
|
||||
|
||||
// Timer returns a new Timer instance that shares the clock and the reset behavior with all other
|
||||
// timers.
|
||||
func (b *backoffManager) Timer() Timer {
|
||||
return DelayFunc(b.Step).Timer(b.clock)
|
||||
}
|
||||
|
||||
// BackoffManager manages backoff with a particular scheme based on its underlying implementation.
|
||||
type BackoffManager interface {
|
||||
// Backoff returns a shared clock.Timer that is Reset on every invocation. This method is not
|
||||
// safe for use from multiple threads. It returns a timer for backoff, and caller shall backoff
|
||||
// until Timer.C() drains. If the second Backoff() is called before the timer from the first
|
||||
// Backoff() call finishes, the first timer will NOT be drained and result in undetermined
|
||||
// behavior.
|
||||
Backoff() clock.Timer
|
||||
}
|
||||
|
||||
// Deprecated: Will be removed when the legacy polling functions are removed.
|
||||
type exponentialBackoffManagerImpl struct {
|
||||
backoff *Backoff
|
||||
backoffTimer clock.Timer
|
||||
lastBackoffStart time.Time
|
||||
initialBackoff time.Duration
|
||||
backoffResetDuration time.Duration
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and
|
||||
// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset.
|
||||
// This backoff manager is used to reduce load during upstream unhealthiness.
|
||||
//
|
||||
// Deprecated: Will be removed when the legacy Poll methods are removed. Callers should construct a
|
||||
// Backoff struct, use DelayWithReset() to get a DelayFunc that periodically resets itself, and then
|
||||
// invoke Timer() when calling wait.BackoffUntil.
|
||||
//
|
||||
// Instead of:
|
||||
//
|
||||
// bm := wait.NewExponentialBackoffManager(init, max, reset, factor, jitter, clock)
|
||||
// ...
|
||||
// wait.BackoffUntil(..., bm.Backoff, ...)
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// delayFn := wait.Backoff{
|
||||
// Duration: init,
|
||||
// Cap: max,
|
||||
// Steps: int(math.Ceil(float64(max) / float64(init))), // now a required argument
|
||||
// Factor: factor,
|
||||
// Jitter: jitter,
|
||||
// }.DelayWithReset(reset, clock)
|
||||
// wait.BackoffUntil(..., delayFn.Timer(), ...)
|
||||
func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager {
|
||||
return &exponentialBackoffManagerImpl{
|
||||
backoff: &Backoff{
|
||||
Duration: initBackoff,
|
||||
Factor: backoffFactor,
|
||||
Jitter: jitter,
|
||||
|
||||
// the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not
|
||||
// what we ideally need here, we set it to max int and assume we will never use up the steps
|
||||
Steps: math.MaxInt32,
|
||||
Cap: maxBackoff,
|
||||
},
|
||||
backoffTimer: nil,
|
||||
initialBackoff: initBackoff,
|
||||
lastBackoffStart: c.Now(),
|
||||
backoffResetDuration: resetDuration,
|
||||
clock: c,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration {
|
||||
if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration {
|
||||
b.backoff.Steps = math.MaxInt32
|
||||
b.backoff.Duration = b.initialBackoff
|
||||
}
|
||||
b.lastBackoffStart = b.clock.Now()
|
||||
return b.backoff.Step()
|
||||
}
|
||||
|
||||
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff.
|
||||
// The returned timer must be drained before calling Backoff() the second time
|
||||
func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer {
|
||||
if b.backoffTimer == nil {
|
||||
b.backoffTimer = b.clock.NewTimer(b.getNextBackoff())
|
||||
} else {
|
||||
b.backoffTimer.Reset(b.getNextBackoff())
|
||||
}
|
||||
return b.backoffTimer
|
||||
}
|
||||
|
||||
// Deprecated: Will be removed when the legacy polling functions are removed.
|
||||
type jitteredBackoffManagerImpl struct {
|
||||
clock clock.Clock
|
||||
duration time.Duration
|
||||
jitter float64
|
||||
backoffTimer clock.Timer
|
||||
}
|
||||
|
||||
// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter
|
||||
// is negative, backoff will not be jittered.
|
||||
//
|
||||
// Deprecated: Will be removed when the legacy Poll methods are removed. Callers should construct a
|
||||
// Backoff struct and invoke Timer() when calling wait.BackoffUntil.
|
||||
//
|
||||
// Instead of:
|
||||
//
|
||||
// bm := wait.NewJitteredBackoffManager(duration, jitter, clock)
|
||||
// ...
|
||||
// wait.BackoffUntil(..., bm.Backoff, ...)
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// wait.BackoffUntil(..., wait.Backoff{Duration: duration, Jitter: jitter}.Timer(), ...)
|
||||
func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager {
|
||||
return &jitteredBackoffManagerImpl{
|
||||
clock: c,
|
||||
duration: duration,
|
||||
jitter: jitter,
|
||||
backoffTimer: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration {
|
||||
jitteredPeriod := j.duration
|
||||
if j.jitter > 0.0 {
|
||||
jitteredPeriod = Jitter(j.duration, j.jitter)
|
||||
}
|
||||
return jitteredPeriod
|
||||
}
|
||||
|
||||
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff.
|
||||
// The returned timer must be drained before calling Backoff() the second time
|
||||
func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer {
|
||||
backoff := j.getNextBackoff()
|
||||
if j.backoffTimer == nil {
|
||||
j.backoffTimer = j.clock.NewTimer(backoff)
|
||||
} else {
|
||||
j.backoffTimer.Reset(backoff)
|
||||
}
|
||||
return j.backoffTimer
|
||||
}
|
||||
|
||||
// ExponentialBackoff repeats a condition check with exponential backoff.
|
||||
//
|
||||
// It repeatedly checks the condition and then sleeps, using `backoff.Step()`
|
||||
// to determine the length of the sleep and adjust Duration and Steps.
|
||||
// Stops and returns as soon as:
|
||||
// 1. the condition check returns true or an error,
|
||||
// 2. `backoff.Steps` checks of the condition have been done, or
|
||||
// 3. a sleep truncated by the cap on duration has been completed.
|
||||
// In case (1) the returned error is what the condition function returned.
|
||||
// In all other cases, ErrWaitTimeout is returned.
|
||||
//
|
||||
// Since backoffs are often subject to cancellation, we recommend using
|
||||
// ExponentialBackoffWithContext and passing a context to the method.
|
||||
func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
|
||||
for backoff.Steps > 0 {
|
||||
if ok, err := runConditionWithCrashProtection(condition); err != nil || ok {
|
||||
return err
|
||||
}
|
||||
if backoff.Steps == 1 {
|
||||
break
|
||||
}
|
||||
time.Sleep(backoff.Step())
|
||||
}
|
||||
return ErrWaitTimeout
|
||||
}
|
||||
|
||||
// ExponentialBackoffWithContext repeats a condition check with exponential backoff.
|
||||
// It immediately returns an error if the condition returns an error, the context is cancelled
|
||||
// or hits the deadline, or if the maximum attempts defined in backoff is exceeded (ErrWaitTimeout).
|
||||
// If an error is returned by the condition the backoff stops immediately. The condition will
|
||||
// never be invoked more than backoff.Steps times.
|
||||
func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionWithContextFunc) error {
|
||||
for backoff.Steps > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
if ok, err := runConditionWithCrashProtectionWithContext(ctx, condition); err != nil || ok {
|
||||
return err
|
||||
}
|
||||
|
||||
if backoff.Steps == 1 {
|
||||
break
|
||||
}
|
||||
|
||||
waitBeforeRetry := backoff.Step()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(waitBeforeRetry):
|
||||
}
|
||||
}
|
||||
|
||||
return ErrWaitTimeout
|
||||
}
|
51
vendor/k8s.io/apimachinery/pkg/util/wait/delay.go
generated
vendored
Normal file
51
vendor/k8s.io/apimachinery/pkg/util/wait/delay.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package wait
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// DelayFunc returns the next time interval to wait.
|
||||
type DelayFunc func() time.Duration
|
||||
|
||||
// Timer takes an arbitrary delay function and returns a timer that can handle arbitrary interval changes.
|
||||
// Use Backoff{...}.Timer() for simple delays and more efficient timers.
|
||||
func (fn DelayFunc) Timer(c clock.Clock) Timer {
|
||||
return &variableTimer{fn: fn, new: c.NewTimer}
|
||||
}
|
||||
|
||||
// Until takes an arbitrary delay function and runs until cancelled or the condition indicates exit. This
|
||||
// offers all of the functionality of the methods in this package.
|
||||
func (fn DelayFunc) Until(ctx context.Context, immediate, sliding bool, condition ConditionWithContextFunc) error {
|
||||
return loopConditionUntilContext(ctx, &variableTimer{fn: fn, new: internalClock.NewTimer}, immediate, sliding, condition)
|
||||
}
|
||||
|
||||
// Concurrent returns a version of this DelayFunc that is safe for use by multiple goroutines that
|
||||
// wish to share a single delay timer.
|
||||
func (fn DelayFunc) Concurrent() DelayFunc {
|
||||
var lock sync.Mutex
|
||||
return func() time.Duration {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
return fn()
|
||||
}
|
||||
}
|
96
vendor/k8s.io/apimachinery/pkg/util/wait/error.go
generated
vendored
Normal file
96
vendor/k8s.io/apimachinery/pkg/util/wait/error.go
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package wait
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// ErrWaitTimeout is returned when the condition was not satisfied in time.
|
||||
//
|
||||
// Deprecated: This type will be made private in favor of Interrupted()
|
||||
// for checking errors or ErrorInterrupted(err) for returning a wrapped error.
|
||||
var ErrWaitTimeout = ErrorInterrupted(errors.New("timed out waiting for the condition"))
|
||||
|
||||
// Interrupted returns true if the error indicates a Poll, ExponentialBackoff, or
|
||||
// Until loop exited for any reason besides the condition returning true or an
|
||||
// error. A loop is considered interrupted if the calling context is cancelled,
|
||||
// the context reaches its deadline, or a backoff reaches its maximum allowed
|
||||
// steps.
|
||||
//
|
||||
// Callers should use this method instead of comparing the error value directly to
|
||||
// ErrWaitTimeout, as methods that cancel a context may not return that error.
|
||||
//
|
||||
// Instead of:
|
||||
//
|
||||
// err := wait.Poll(...)
|
||||
// if err == wait.ErrWaitTimeout {
|
||||
// log.Infof("Wait for operation exceeded")
|
||||
// } else ...
|
||||
//
|
||||
// Use:
|
||||
//
|
||||
// err := wait.Poll(...)
|
||||
// if wait.Interrupted(err) {
|
||||
// log.Infof("Wait for operation exceeded")
|
||||
// } else ...
|
||||
func Interrupted(err error) bool {
|
||||
switch {
|
||||
case errors.Is(err, errWaitTimeout),
|
||||
errors.Is(err, context.Canceled),
|
||||
errors.Is(err, context.DeadlineExceeded):
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// errInterrupted
|
||||
type errInterrupted struct {
|
||||
cause error
|
||||
}
|
||||
|
||||
// ErrorInterrupted returns an error that indicates the wait was ended
|
||||
// early for a given reason. If no cause is provided a generic error
|
||||
// will be used but callers are encouraged to provide a real cause for
|
||||
// clarity in debugging.
|
||||
func ErrorInterrupted(cause error) error {
|
||||
switch cause.(type) {
|
||||
case errInterrupted:
|
||||
// no need to wrap twice since errInterrupted is only needed
|
||||
// once in a chain
|
||||
return cause
|
||||
default:
|
||||
return errInterrupted{cause}
|
||||
}
|
||||
}
|
||||
|
||||
// errWaitTimeout is the private version of the previous ErrWaitTimeout
|
||||
// and is private to prevent direct comparison. Use ErrorInterrupted(err)
|
||||
// to get an error that will return true for Interrupted(err).
|
||||
var errWaitTimeout = errInterrupted{}
|
||||
|
||||
func (e errInterrupted) Unwrap() error { return e.cause }
|
||||
func (e errInterrupted) Is(target error) bool { return target == errWaitTimeout }
|
||||
func (e errInterrupted) Error() string {
|
||||
if e.cause == nil {
|
||||
// returns the same error message as historical behavior
|
||||
return "timed out waiting for the condition"
|
||||
}
|
||||
return e.cause.Error()
|
||||
}
|
86
vendor/k8s.io/apimachinery/pkg/util/wait/loop.go
generated
vendored
Normal file
86
vendor/k8s.io/apimachinery/pkg/util/wait/loop.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package wait
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
// loopConditionUntilContext executes the provided condition at intervals defined by
|
||||
// the provided timer until the provided context is cancelled, the condition returns
|
||||
// true, or the condition returns an error. If sliding is true, the period is computed
|
||||
// after condition runs. If it is false then period includes the runtime for condition.
|
||||
// If immediate is false the first delay happens before any call to condition. The
|
||||
// returned error is the error returned by the last condition or the context error if
|
||||
// the context was terminated.
|
||||
//
|
||||
// This is the common loop construct for all polling in the wait package.
|
||||
func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding bool, condition ConditionWithContextFunc) error {
|
||||
defer t.Stop()
|
||||
|
||||
var timeCh <-chan time.Time
|
||||
doneCh := ctx.Done()
|
||||
|
||||
// if we haven't requested immediate execution, delay once
|
||||
if !immediate {
|
||||
timeCh = t.C()
|
||||
select {
|
||||
case <-doneCh:
|
||||
return ctx.Err()
|
||||
case <-timeCh:
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
// checking ctx.Err() is slightly faster than checking a select
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !sliding {
|
||||
t.Next()
|
||||
}
|
||||
if ok, err := func() (bool, error) {
|
||||
defer runtime.HandleCrash()
|
||||
return condition(ctx)
|
||||
}(); err != nil || ok {
|
||||
return err
|
||||
}
|
||||
if sliding {
|
||||
t.Next()
|
||||
}
|
||||
|
||||
if timeCh == nil {
|
||||
timeCh = t.C()
|
||||
}
|
||||
|
||||
// NOTE: b/c there is no priority selection in golang
|
||||
// it is possible for this to race, meaning we could
|
||||
// trigger t.C and doneCh, and t.C select falls through.
|
||||
// In order to mitigate we re-check doneCh at the beginning
|
||||
// of every loop to guarantee at-most one extra execution
|
||||
// of condition.
|
||||
select {
|
||||
case <-doneCh:
|
||||
return ctx.Err()
|
||||
case <-timeCh:
|
||||
}
|
||||
}
|
||||
}
|
315
vendor/k8s.io/apimachinery/pkg/util/wait/poll.go
generated
vendored
Normal file
315
vendor/k8s.io/apimachinery/pkg/util/wait/poll.go
generated
vendored
Normal file
@ -0,0 +1,315 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package wait
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PollUntilContextCancel tries a condition func until it returns true, an error, or the context
|
||||
// is cancelled or hits a deadline. condition will be invoked after the first interval if the
|
||||
// context is not cancelled first. The returned error will be from ctx.Err(), the condition's
|
||||
// err return value, or nil. If invoking condition takes longer than interval the next condition
|
||||
// will be invoked immediately. When using very short intervals, condition may be invoked multiple
|
||||
// times before a context cancellation is detected. If immediate is true, condition will be
|
||||
// invoked before waiting and guarantees that condition is invoked at least once, regardless of
|
||||
// whether the context has been cancelled.
|
||||
func PollUntilContextCancel(ctx context.Context, interval time.Duration, immediate bool, condition ConditionWithContextFunc) error {
|
||||
return loopConditionUntilContext(ctx, Backoff{Duration: interval}.Timer(), immediate, false, condition)
|
||||
}
|
||||
|
||||
// PollUntilContextTimeout will terminate polling after timeout duration by setting a context
|
||||
// timeout. This is provided as a convenience function for callers not currently executing under
|
||||
// a deadline and is equivalent to:
|
||||
//
|
||||
// deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout)
|
||||
// err := PollUntilContextCancel(ctx, interval, immediate, condition)
|
||||
//
|
||||
// The deadline context will be cancelled if the Poll succeeds before the timeout, simplifying
|
||||
// inline usage. All other behavior is identical to PollWithContextTimeout.
|
||||
func PollUntilContextTimeout(ctx context.Context, interval, timeout time.Duration, immediate bool, condition ConditionWithContextFunc) error {
|
||||
deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout)
|
||||
defer deadlineCancel()
|
||||
return loopConditionUntilContext(deadlineCtx, Backoff{Duration: interval}.Timer(), immediate, false, condition)
|
||||
}
|
||||
|
||||
// Poll tries a condition func until it returns true, an error, or the timeout
|
||||
// is reached.
|
||||
//
|
||||
// Poll always waits the interval before the run of 'condition'.
|
||||
// 'condition' will always be invoked at least once.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
//
|
||||
// If you want to Poll something forever, see PollInfinite.
|
||||
//
|
||||
// Deprecated: This method does not return errors from context, use PollWithContextTimeout.
|
||||
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
|
||||
// defined by the context package. Will be removed in a future release.
|
||||
func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
|
||||
return PollWithContext(context.Background(), interval, timeout, condition.WithContext())
|
||||
}
|
||||
|
||||
// PollWithContext tries a condition func until it returns true, an error,
|
||||
// or when the context expires or the timeout is reached, whichever
|
||||
// happens first.
|
||||
//
|
||||
// PollWithContext always waits the interval before the run of 'condition'.
|
||||
// 'condition' will always be invoked at least once.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
//
|
||||
// If you want to Poll something forever, see PollInfinite.
|
||||
//
|
||||
// Deprecated: This method does not return errors from context, use PollWithContextTimeout.
|
||||
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
|
||||
// defined by the context package. Will be removed in a future release.
|
||||
func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error {
|
||||
return poll(ctx, false, poller(interval, timeout), condition)
|
||||
}
|
||||
|
||||
// PollUntil tries a condition func until it returns true, an error or stopCh is
|
||||
// closed.
|
||||
//
|
||||
// PollUntil always waits interval before the first run of 'condition'.
|
||||
// 'condition' will always be invoked at least once.
|
||||
//
|
||||
// Deprecated: This method does not return errors from context, use PollWithContextCancel.
|
||||
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
|
||||
// defined by the context package. Will be removed in a future release.
|
||||
func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
|
||||
return PollUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext())
|
||||
}
|
||||
|
||||
// PollUntilWithContext tries a condition func until it returns true,
|
||||
// an error or the specified context is cancelled or expired.
|
||||
//
|
||||
// PollUntilWithContext always waits interval before the first run of 'condition'.
|
||||
// 'condition' will always be invoked at least once.
|
||||
//
|
||||
// Deprecated: This method does not return errors from context, use PollWithContextCancel.
|
||||
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
|
||||
// defined by the context package. Will be removed in a future release.
|
||||
func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
|
||||
return poll(ctx, false, poller(interval, 0), condition)
|
||||
}
|
||||
|
||||
// PollInfinite tries a condition func until it returns true or an error
|
||||
//
|
||||
// PollInfinite always waits the interval before the run of 'condition'.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
//
|
||||
// Deprecated: This method does not return errors from context, use PollWithContextCancel.
|
||||
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
|
||||
// defined by the context package. Will be removed in a future release.
|
||||
func PollInfinite(interval time.Duration, condition ConditionFunc) error {
|
||||
return PollInfiniteWithContext(context.Background(), interval, condition.WithContext())
|
||||
}
|
||||
|
||||
// PollInfiniteWithContext tries a condition func until it returns true or an error
|
||||
//
|
||||
// PollInfiniteWithContext always waits the interval before the run of 'condition'.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
//
|
||||
// Deprecated: This method does not return errors from context, use PollWithContextCancel.
|
||||
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
|
||||
// defined by the context package. Will be removed in a future release.
|
||||
func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
|
||||
return poll(ctx, false, poller(interval, 0), condition)
|
||||
}
|
||||
|
||||
// PollImmediate tries a condition func until it returns true, an error, or the timeout
|
||||
// is reached.
|
||||
//
|
||||
// PollImmediate always checks 'condition' before waiting for the interval. 'condition'
|
||||
// will always be invoked at least once.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
//
|
||||
// If you want to immediately Poll something forever, see PollImmediateInfinite.
|
||||
//
|
||||
// Deprecated: This method does not return errors from context, use PollWithContextTimeout.
|
||||
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
|
||||
// defined by the context package. Will be removed in a future release.
|
||||
func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
|
||||
return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext())
|
||||
}
|
||||
|
||||
// PollImmediateWithContext tries a condition func until it returns true, an error,
|
||||
// or the timeout is reached or the specified context expires, whichever happens first.
|
||||
//
|
||||
// PollImmediateWithContext always checks 'condition' before waiting for the interval.
|
||||
// 'condition' will always be invoked at least once.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
//
|
||||
// If you want to immediately Poll something forever, see PollImmediateInfinite.
|
||||
//
|
||||
// Deprecated: This method does not return errors from context, use PollWithContextTimeout.
|
||||
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
|
||||
// defined by the context package. Will be removed in a future release.
|
||||
func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error {
|
||||
return poll(ctx, true, poller(interval, timeout), condition)
|
||||
}
|
||||
|
||||
// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed.
|
||||
//
|
||||
// PollImmediateUntil runs the 'condition' before waiting for the interval.
|
||||
// 'condition' will always be invoked at least once.
|
||||
//
|
||||
// Deprecated: This method does not return errors from context, use PollWithContextCancel.
|
||||
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
|
||||
// defined by the context package. Will be removed in a future release.
|
||||
func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
|
||||
return PollImmediateUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext())
|
||||
}
|
||||
|
||||
// PollImmediateUntilWithContext tries a condition func until it returns true,
|
||||
// an error or the specified context is cancelled or expired.
|
||||
//
|
||||
// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval.
|
||||
// 'condition' will always be invoked at least once.
|
||||
//
|
||||
// Deprecated: This method does not return errors from context, use PollWithContextCancel.
|
||||
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
|
||||
// defined by the context package. Will be removed in a future release.
|
||||
func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
|
||||
return poll(ctx, true, poller(interval, 0), condition)
|
||||
}
|
||||
|
||||
// PollImmediateInfinite tries a condition func until it returns true or an error
|
||||
//
|
||||
// PollImmediateInfinite runs the 'condition' before waiting for the interval.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
//
|
||||
// Deprecated: This method does not return errors from context, use PollWithContextCancel.
|
||||
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
|
||||
// defined by the context package. Will be removed in a future release.
|
||||
func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error {
|
||||
return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext())
|
||||
}
|
||||
|
||||
// PollImmediateInfiniteWithContext tries a condition func until it returns true
|
||||
// or an error or the specified context gets cancelled or expired.
|
||||
//
|
||||
// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
//
|
||||
// Deprecated: This method does not return errors from context, use PollWithContextCancel.
|
||||
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
|
||||
// defined by the context package. Will be removed in a future release.
|
||||
func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
|
||||
return poll(ctx, true, poller(interval, 0), condition)
|
||||
}
|
||||
|
||||
// Internally used, each of the public 'Poll*' function defined in this
|
||||
// package should invoke this internal function with appropriate parameters.
|
||||
// ctx: the context specified by the caller, for infinite polling pass
|
||||
// a context that never gets cancelled or expired.
|
||||
// immediate: if true, the 'condition' will be invoked before waiting for the interval,
|
||||
// in this case 'condition' will always be invoked at least once.
|
||||
// wait: user specified WaitFunc function that controls at what interval the condition
|
||||
// function should be invoked periodically and whether it is bound by a timeout.
|
||||
// condition: user specified ConditionWithContextFunc function.
|
||||
//
|
||||
// Deprecated: will be removed in favor of loopConditionUntilContext.
|
||||
func poll(ctx context.Context, immediate bool, wait waitWithContextFunc, condition ConditionWithContextFunc) error {
|
||||
if immediate {
|
||||
done, err := runConditionWithCrashProtectionWithContext(ctx, condition)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if done {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// returning ctx.Err() will break backward compatibility, use new PollUntilContext*
|
||||
// methods instead
|
||||
return ErrWaitTimeout
|
||||
default:
|
||||
return waitForWithContext(ctx, wait, condition)
|
||||
}
|
||||
}
|
||||
|
||||
// poller returns a WaitFunc that will send to the channel every interval until
|
||||
// timeout has elapsed and then closes the channel.
|
||||
//
|
||||
// Over very short intervals you may receive no ticks before the channel is
|
||||
// closed. A timeout of 0 is interpreted as an infinity, and in such a case
|
||||
// it would be the caller's responsibility to close the done channel.
|
||||
// Failure to do so would result in a leaked goroutine.
|
||||
//
|
||||
// Output ticks are not buffered. If the channel is not ready to receive an
|
||||
// item, the tick is skipped.
|
||||
//
|
||||
// Deprecated: Will be removed in a future release.
|
||||
func poller(interval, timeout time.Duration) waitWithContextFunc {
|
||||
return waitWithContextFunc(func(ctx context.Context) <-chan struct{} {
|
||||
ch := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
tick := time.NewTicker(interval)
|
||||
defer tick.Stop()
|
||||
|
||||
var after <-chan time.Time
|
||||
if timeout != 0 {
|
||||
// time.After is more convenient, but it
|
||||
// potentially leaves timers around much longer
|
||||
// than necessary if we exit early.
|
||||
timer := time.NewTimer(timeout)
|
||||
after = timer.C
|
||||
defer timer.Stop()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
// If the consumer isn't ready for this signal drop it and
|
||||
// check the other channels.
|
||||
select {
|
||||
case ch <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-after:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
})
|
||||
}
|
121
vendor/k8s.io/apimachinery/pkg/util/wait/timer.go
generated
vendored
Normal file
121
vendor/k8s.io/apimachinery/pkg/util/wait/timer.go
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package wait
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// Timer abstracts how wait functions interact with time runtime efficiently. Test
|
||||
// code may implement this interface directly but package consumers are encouraged
|
||||
// to use the Backoff type as the primary mechanism for acquiring a Timer. The
|
||||
// interface is a simplification of clock.Timer to prevent misuse. Timers are not
|
||||
// expected to be safe for calls from multiple goroutines.
|
||||
type Timer interface {
|
||||
// C returns a channel that will receive a struct{} each time the timer fires.
|
||||
// The channel should not be waited on after Stop() is invoked. It is allowed
|
||||
// to cache the returned value of C() for the lifetime of the Timer.
|
||||
C() <-chan time.Time
|
||||
// Next is invoked by wait functions to signal timers that the next interval
|
||||
// should begin. You may only use Next() if you have drained the channel C().
|
||||
// You should not call Next() after Stop() is invoked.
|
||||
Next()
|
||||
// Stop releases the timer. It is safe to invoke if no other methods have been
|
||||
// called.
|
||||
Stop()
|
||||
}
|
||||
|
||||
type noopTimer struct {
|
||||
closedCh <-chan time.Time
|
||||
}
|
||||
|
||||
// newNoopTimer creates a timer with a unique channel to avoid contention
|
||||
// for the channel's lock across multiple unrelated timers.
|
||||
func newNoopTimer() noopTimer {
|
||||
ch := make(chan time.Time)
|
||||
close(ch)
|
||||
return noopTimer{closedCh: ch}
|
||||
}
|
||||
|
||||
func (t noopTimer) C() <-chan time.Time {
|
||||
return t.closedCh
|
||||
}
|
||||
func (noopTimer) Next() {}
|
||||
func (noopTimer) Stop() {}
|
||||
|
||||
type variableTimer struct {
|
||||
fn DelayFunc
|
||||
t clock.Timer
|
||||
new func(time.Duration) clock.Timer
|
||||
}
|
||||
|
||||
func (t *variableTimer) C() <-chan time.Time {
|
||||
if t.t == nil {
|
||||
d := t.fn()
|
||||
t.t = t.new(d)
|
||||
}
|
||||
return t.t.C()
|
||||
}
|
||||
func (t *variableTimer) Next() {
|
||||
if t.t == nil {
|
||||
return
|
||||
}
|
||||
d := t.fn()
|
||||
t.t.Reset(d)
|
||||
}
|
||||
func (t *variableTimer) Stop() {
|
||||
if t.t == nil {
|
||||
return
|
||||
}
|
||||
t.t.Stop()
|
||||
t.t = nil
|
||||
}
|
||||
|
||||
type fixedTimer struct {
|
||||
interval time.Duration
|
||||
t clock.Ticker
|
||||
new func(time.Duration) clock.Ticker
|
||||
}
|
||||
|
||||
func (t *fixedTimer) C() <-chan time.Time {
|
||||
if t.t == nil {
|
||||
t.t = t.new(t.interval)
|
||||
}
|
||||
return t.t.C()
|
||||
}
|
||||
func (t *fixedTimer) Next() {
|
||||
// no-op for fixed timers
|
||||
}
|
||||
func (t *fixedTimer) Stop() {
|
||||
if t.t == nil {
|
||||
return
|
||||
}
|
||||
t.t.Stop()
|
||||
t.t = nil
|
||||
}
|
||||
|
||||
var (
|
||||
// RealTimer can be passed to methods that need a clock.Timer.
|
||||
RealTimer = clock.RealClock{}.NewTimer
|
||||
)
|
||||
|
||||
var (
|
||||
// internalClock is used for test injection of clocks
|
||||
internalClock = clock.RealClock{}
|
||||
)
|
650
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
generated
vendored
650
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
generated
vendored
@ -18,14 +18,11 @@ package wait
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// For any test of the style:
|
||||
@ -83,113 +80,6 @@ func Forever(f func(), period time.Duration) {
|
||||
Until(f, period, NeverStop)
|
||||
}
|
||||
|
||||
// Until loops until stop channel is closed, running f every period.
|
||||
//
|
||||
// Until is syntactic sugar on top of JitterUntil with zero jitter factor and
|
||||
// with sliding = true (which means the timer for period starts after the f
|
||||
// completes).
|
||||
func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
|
||||
JitterUntil(f, period, 0.0, true, stopCh)
|
||||
}
|
||||
|
||||
// UntilWithContext loops until context is done, running f every period.
|
||||
//
|
||||
// UntilWithContext is syntactic sugar on top of JitterUntilWithContext
|
||||
// with zero jitter factor and with sliding = true (which means the timer
|
||||
// for period starts after the f completes).
|
||||
func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) {
|
||||
JitterUntilWithContext(ctx, f, period, 0.0, true)
|
||||
}
|
||||
|
||||
// NonSlidingUntil loops until stop channel is closed, running f every
|
||||
// period.
|
||||
//
|
||||
// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter
|
||||
// factor, with sliding = false (meaning the timer for period starts at the same
|
||||
// time as the function starts).
|
||||
func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
|
||||
JitterUntil(f, period, 0.0, false, stopCh)
|
||||
}
|
||||
|
||||
// NonSlidingUntilWithContext loops until context is done, running f every
|
||||
// period.
|
||||
//
|
||||
// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext
|
||||
// with zero jitter factor, with sliding = false (meaning the timer for period
|
||||
// starts at the same time as the function starts).
|
||||
func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) {
|
||||
JitterUntilWithContext(ctx, f, period, 0.0, false)
|
||||
}
|
||||
|
||||
// JitterUntil loops until stop channel is closed, running f every period.
|
||||
//
|
||||
// If jitterFactor is positive, the period is jittered before every run of f.
|
||||
// If jitterFactor is not positive, the period is unchanged and not jittered.
|
||||
//
|
||||
// If sliding is true, the period is computed after f runs. If it is false then
|
||||
// period includes the runtime for f.
|
||||
//
|
||||
// Close stopCh to stop. f may not be invoked if stop channel is already
|
||||
// closed. Pass NeverStop to if you don't want it stop.
|
||||
func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
|
||||
BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh)
|
||||
}
|
||||
|
||||
// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager.
|
||||
//
|
||||
// If sliding is true, the period is computed after f runs. If it is false then
|
||||
// period includes the runtime for f.
|
||||
func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) {
|
||||
var t clock.Timer
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if !sliding {
|
||||
t = backoff.Backoff()
|
||||
}
|
||||
|
||||
func() {
|
||||
defer runtime.HandleCrash()
|
||||
f()
|
||||
}()
|
||||
|
||||
if sliding {
|
||||
t = backoff.Backoff()
|
||||
}
|
||||
|
||||
// NOTE: b/c there is no priority selection in golang
|
||||
// it is possible for this to race, meaning we could
|
||||
// trigger t.C and stopCh, and t.C select falls through.
|
||||
// In order to mitigate we re-check stopCh at the beginning
|
||||
// of every loop to prevent extra executions of f().
|
||||
select {
|
||||
case <-stopCh:
|
||||
if !t.Stop() {
|
||||
<-t.C()
|
||||
}
|
||||
return
|
||||
case <-t.C():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// JitterUntilWithContext loops until context is done, running f every period.
|
||||
//
|
||||
// If jitterFactor is positive, the period is jittered before every run of f.
|
||||
// If jitterFactor is not positive, the period is unchanged and not jittered.
|
||||
//
|
||||
// If sliding is true, the period is computed after f runs. If it is false then
|
||||
// period includes the runtime for f.
|
||||
//
|
||||
// Cancel context to stop. f may not be invoked if context is already expired.
|
||||
func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) {
|
||||
JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done())
|
||||
}
|
||||
|
||||
// Jitter returns a time.Duration between duration and duration + maxFactor *
|
||||
// duration.
|
||||
//
|
||||
@ -203,9 +93,6 @@ func Jitter(duration time.Duration, maxFactor float64) time.Duration {
|
||||
return wait
|
||||
}
|
||||
|
||||
// ErrWaitTimeout is returned when the condition exited without success.
|
||||
var ErrWaitTimeout = errors.New("timed out waiting for the condition")
|
||||
|
||||
// ConditionFunc returns true if the condition is satisfied, or an error
|
||||
// if the loop should be aborted.
|
||||
type ConditionFunc func() (done bool, err error)
|
||||
@ -223,425 +110,80 @@ func (cf ConditionFunc) WithContext() ConditionWithContextFunc {
|
||||
}
|
||||
}
|
||||
|
||||
// runConditionWithCrashProtection runs a ConditionFunc with crash protection
|
||||
func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) {
|
||||
return runConditionWithCrashProtectionWithContext(context.TODO(), condition.WithContext())
|
||||
// ContextForChannel provides a context that will be treated as cancelled
|
||||
// when the provided parentCh is closed. The implementation returns
|
||||
// context.Canceled for Err() if and only if the parentCh is closed.
|
||||
func ContextForChannel(parentCh <-chan struct{}) context.Context {
|
||||
return channelContext{stopCh: parentCh}
|
||||
}
|
||||
|
||||
// runConditionWithCrashProtectionWithContext runs a
|
||||
// ConditionWithContextFunc with crash protection.
|
||||
var _ context.Context = channelContext{}
|
||||
|
||||
// channelContext will behave as if the context were cancelled when stopCh is
|
||||
// closed.
|
||||
type channelContext struct {
|
||||
stopCh <-chan struct{}
|
||||
}
|
||||
|
||||
func (c channelContext) Done() <-chan struct{} { return c.stopCh }
|
||||
func (c channelContext) Err() error {
|
||||
select {
|
||||
case <-c.stopCh:
|
||||
return context.Canceled
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
func (c channelContext) Deadline() (time.Time, bool) { return time.Time{}, false }
|
||||
func (c channelContext) Value(key any) any { return nil }
|
||||
|
||||
// runConditionWithCrashProtection runs a ConditionFunc with crash protection.
|
||||
//
|
||||
// Deprecated: Will be removed when the legacy polling methods are removed.
|
||||
func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) {
|
||||
defer runtime.HandleCrash()
|
||||
return condition()
|
||||
}
|
||||
|
||||
// runConditionWithCrashProtectionWithContext runs a ConditionWithContextFunc
|
||||
// with crash protection.
|
||||
//
|
||||
// Deprecated: Will be removed when the legacy polling methods are removed.
|
||||
func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) {
|
||||
defer runtime.HandleCrash()
|
||||
return condition(ctx)
|
||||
}
|
||||
|
||||
// Backoff holds parameters applied to a Backoff function.
|
||||
type Backoff struct {
|
||||
// The initial duration.
|
||||
Duration time.Duration
|
||||
// Duration is multiplied by factor each iteration, if factor is not zero
|
||||
// and the limits imposed by Steps and Cap have not been reached.
|
||||
// Should not be negative.
|
||||
// The jitter does not contribute to the updates to the duration parameter.
|
||||
Factor float64
|
||||
// The sleep at each iteration is the duration plus an additional
|
||||
// amount chosen uniformly at random from the interval between
|
||||
// zero and `jitter*duration`.
|
||||
Jitter float64
|
||||
// The remaining number of iterations in which the duration
|
||||
// parameter may change (but progress can be stopped earlier by
|
||||
// hitting the cap). If not positive, the duration is not
|
||||
// changed. Used for exponential backoff in combination with
|
||||
// Factor and Cap.
|
||||
Steps int
|
||||
// A limit on revised values of the duration parameter. If a
|
||||
// multiplication by the factor parameter would make the duration
|
||||
// exceed the cap then the duration is set to the cap and the
|
||||
// steps parameter is set to zero.
|
||||
Cap time.Duration
|
||||
}
|
||||
|
||||
// Step (1) returns an amount of time to sleep determined by the
|
||||
// original Duration and Jitter and (2) mutates the provided Backoff
|
||||
// to update its Steps and Duration.
|
||||
func (b *Backoff) Step() time.Duration {
|
||||
if b.Steps < 1 {
|
||||
if b.Jitter > 0 {
|
||||
return Jitter(b.Duration, b.Jitter)
|
||||
}
|
||||
return b.Duration
|
||||
}
|
||||
b.Steps--
|
||||
|
||||
duration := b.Duration
|
||||
|
||||
// calculate the next step
|
||||
if b.Factor != 0 {
|
||||
b.Duration = time.Duration(float64(b.Duration) * b.Factor)
|
||||
if b.Cap > 0 && b.Duration > b.Cap {
|
||||
b.Duration = b.Cap
|
||||
b.Steps = 0
|
||||
}
|
||||
}
|
||||
|
||||
if b.Jitter > 0 {
|
||||
duration = Jitter(duration, b.Jitter)
|
||||
}
|
||||
return duration
|
||||
}
|
||||
|
||||
// ContextForChannel derives a child context from a parent channel.
|
||||
//
|
||||
// The derived context's Done channel is closed when the returned cancel function
|
||||
// is called or when the parent channel is closed, whichever happens first.
|
||||
//
|
||||
// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked.
|
||||
func ContextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case <-parentCh:
|
||||
cancel()
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}()
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
// BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides
|
||||
// an interface to return a timer for backoff, and caller shall backoff until Timer.C() drains. If the second Backoff()
|
||||
// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained and result in
|
||||
// undetermined behavior.
|
||||
// The BackoffManager is supposed to be called in a single-threaded environment.
|
||||
type BackoffManager interface {
|
||||
Backoff() clock.Timer
|
||||
}
|
||||
|
||||
type exponentialBackoffManagerImpl struct {
|
||||
backoff *Backoff
|
||||
backoffTimer clock.Timer
|
||||
lastBackoffStart time.Time
|
||||
initialBackoff time.Duration
|
||||
backoffResetDuration time.Duration
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and
|
||||
// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset.
|
||||
// This backoff manager is used to reduce load during upstream unhealthiness.
|
||||
func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager {
|
||||
return &exponentialBackoffManagerImpl{
|
||||
backoff: &Backoff{
|
||||
Duration: initBackoff,
|
||||
Factor: backoffFactor,
|
||||
Jitter: jitter,
|
||||
|
||||
// the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not
|
||||
// what we ideally need here, we set it to max int and assume we will never use up the steps
|
||||
Steps: math.MaxInt32,
|
||||
Cap: maxBackoff,
|
||||
},
|
||||
backoffTimer: nil,
|
||||
initialBackoff: initBackoff,
|
||||
lastBackoffStart: c.Now(),
|
||||
backoffResetDuration: resetDuration,
|
||||
clock: c,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration {
|
||||
if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration {
|
||||
b.backoff.Steps = math.MaxInt32
|
||||
b.backoff.Duration = b.initialBackoff
|
||||
}
|
||||
b.lastBackoffStart = b.clock.Now()
|
||||
return b.backoff.Step()
|
||||
}
|
||||
|
||||
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff.
|
||||
// The returned timer must be drained before calling Backoff() the second time
|
||||
func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer {
|
||||
if b.backoffTimer == nil {
|
||||
b.backoffTimer = b.clock.NewTimer(b.getNextBackoff())
|
||||
} else {
|
||||
b.backoffTimer.Reset(b.getNextBackoff())
|
||||
}
|
||||
return b.backoffTimer
|
||||
}
|
||||
|
||||
type jitteredBackoffManagerImpl struct {
|
||||
clock clock.Clock
|
||||
duration time.Duration
|
||||
jitter float64
|
||||
backoffTimer clock.Timer
|
||||
}
|
||||
|
||||
// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter
|
||||
// is negative, backoff will not be jittered.
|
||||
func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager {
|
||||
return &jitteredBackoffManagerImpl{
|
||||
clock: c,
|
||||
duration: duration,
|
||||
jitter: jitter,
|
||||
backoffTimer: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration {
|
||||
jitteredPeriod := j.duration
|
||||
if j.jitter > 0.0 {
|
||||
jitteredPeriod = Jitter(j.duration, j.jitter)
|
||||
}
|
||||
return jitteredPeriod
|
||||
}
|
||||
|
||||
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff.
|
||||
// The returned timer must be drained before calling Backoff() the second time
|
||||
func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer {
|
||||
backoff := j.getNextBackoff()
|
||||
if j.backoffTimer == nil {
|
||||
j.backoffTimer = j.clock.NewTimer(backoff)
|
||||
} else {
|
||||
j.backoffTimer.Reset(backoff)
|
||||
}
|
||||
return j.backoffTimer
|
||||
}
|
||||
|
||||
// ExponentialBackoff repeats a condition check with exponential backoff.
|
||||
//
|
||||
// It repeatedly checks the condition and then sleeps, using `backoff.Step()`
|
||||
// to determine the length of the sleep and adjust Duration and Steps.
|
||||
// Stops and returns as soon as:
|
||||
// 1. the condition check returns true or an error,
|
||||
// 2. `backoff.Steps` checks of the condition have been done, or
|
||||
// 3. a sleep truncated by the cap on duration has been completed.
|
||||
// In case (1) the returned error is what the condition function returned.
|
||||
// In all other cases, ErrWaitTimeout is returned.
|
||||
func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
|
||||
for backoff.Steps > 0 {
|
||||
if ok, err := runConditionWithCrashProtection(condition); err != nil || ok {
|
||||
return err
|
||||
}
|
||||
if backoff.Steps == 1 {
|
||||
break
|
||||
}
|
||||
time.Sleep(backoff.Step())
|
||||
}
|
||||
return ErrWaitTimeout
|
||||
}
|
||||
|
||||
// Poll tries a condition func until it returns true, an error, or the timeout
|
||||
// is reached.
|
||||
//
|
||||
// Poll always waits the interval before the run of 'condition'.
|
||||
// 'condition' will always be invoked at least once.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
//
|
||||
// If you want to Poll something forever, see PollInfinite.
|
||||
func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
|
||||
return PollWithContext(context.Background(), interval, timeout, condition.WithContext())
|
||||
}
|
||||
|
||||
// PollWithContext tries a condition func until it returns true, an error,
|
||||
// or when the context expires or the timeout is reached, whichever
|
||||
// happens first.
|
||||
//
|
||||
// PollWithContext always waits the interval before the run of 'condition'.
|
||||
// 'condition' will always be invoked at least once.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
//
|
||||
// If you want to Poll something forever, see PollInfinite.
|
||||
func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error {
|
||||
return poll(ctx, false, poller(interval, timeout), condition)
|
||||
}
|
||||
|
||||
// PollUntil tries a condition func until it returns true, an error or stopCh is
|
||||
// closed.
|
||||
//
|
||||
// PollUntil always waits interval before the first run of 'condition'.
|
||||
// 'condition' will always be invoked at least once.
|
||||
func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
|
||||
ctx, cancel := ContextForChannel(stopCh)
|
||||
defer cancel()
|
||||
return PollUntilWithContext(ctx, interval, condition.WithContext())
|
||||
}
|
||||
|
||||
// PollUntilWithContext tries a condition func until it returns true,
|
||||
// an error or the specified context is cancelled or expired.
|
||||
//
|
||||
// PollUntilWithContext always waits interval before the first run of 'condition'.
|
||||
// 'condition' will always be invoked at least once.
|
||||
func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
|
||||
return poll(ctx, false, poller(interval, 0), condition)
|
||||
}
|
||||
|
||||
// PollInfinite tries a condition func until it returns true or an error
|
||||
//
|
||||
// PollInfinite always waits the interval before the run of 'condition'.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
func PollInfinite(interval time.Duration, condition ConditionFunc) error {
|
||||
return PollInfiniteWithContext(context.Background(), interval, condition.WithContext())
|
||||
}
|
||||
|
||||
// PollInfiniteWithContext tries a condition func until it returns true or an error
|
||||
//
|
||||
// PollInfiniteWithContext always waits the interval before the run of 'condition'.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
|
||||
return poll(ctx, false, poller(interval, 0), condition)
|
||||
}
|
||||
|
||||
// PollImmediate tries a condition func until it returns true, an error, or the timeout
|
||||
// is reached.
|
||||
//
|
||||
// PollImmediate always checks 'condition' before waiting for the interval. 'condition'
|
||||
// will always be invoked at least once.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
//
|
||||
// If you want to immediately Poll something forever, see PollImmediateInfinite.
|
||||
func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
|
||||
return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext())
|
||||
}
|
||||
|
||||
// PollImmediateWithContext tries a condition func until it returns true, an error,
|
||||
// or the timeout is reached or the specified context expires, whichever happens first.
|
||||
//
|
||||
// PollImmediateWithContext always checks 'condition' before waiting for the interval.
|
||||
// 'condition' will always be invoked at least once.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
//
|
||||
// If you want to immediately Poll something forever, see PollImmediateInfinite.
|
||||
func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error {
|
||||
return poll(ctx, true, poller(interval, timeout), condition)
|
||||
}
|
||||
|
||||
// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed.
|
||||
//
|
||||
// PollImmediateUntil runs the 'condition' before waiting for the interval.
|
||||
// 'condition' will always be invoked at least once.
|
||||
func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
|
||||
ctx, cancel := ContextForChannel(stopCh)
|
||||
defer cancel()
|
||||
return PollImmediateUntilWithContext(ctx, interval, condition.WithContext())
|
||||
}
|
||||
|
||||
// PollImmediateUntilWithContext tries a condition func until it returns true,
|
||||
// an error or the specified context is cancelled or expired.
|
||||
//
|
||||
// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval.
|
||||
// 'condition' will always be invoked at least once.
|
||||
func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
|
||||
return poll(ctx, true, poller(interval, 0), condition)
|
||||
}
|
||||
|
||||
// PollImmediateInfinite tries a condition func until it returns true or an error
|
||||
//
|
||||
// PollImmediateInfinite runs the 'condition' before waiting for the interval.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error {
|
||||
return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext())
|
||||
}
|
||||
|
||||
// PollImmediateInfiniteWithContext tries a condition func until it returns true
|
||||
// or an error or the specified context gets cancelled or expired.
|
||||
//
|
||||
// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
|
||||
return poll(ctx, true, poller(interval, 0), condition)
|
||||
}
|
||||
|
||||
// Internally used, each of the public 'Poll*' function defined in this
|
||||
// package should invoke this internal function with appropriate parameters.
|
||||
// ctx: the context specified by the caller, for infinite polling pass
|
||||
// a context that never gets cancelled or expired.
|
||||
// immediate: if true, the 'condition' will be invoked before waiting for the interval,
|
||||
// in this case 'condition' will always be invoked at least once.
|
||||
// wait: user specified WaitFunc function that controls at what interval the condition
|
||||
// function should be invoked periodically and whether it is bound by a timeout.
|
||||
// condition: user specified ConditionWithContextFunc function.
|
||||
func poll(ctx context.Context, immediate bool, wait WaitWithContextFunc, condition ConditionWithContextFunc) error {
|
||||
if immediate {
|
||||
done, err := runConditionWithCrashProtectionWithContext(ctx, condition)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if done {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// returning ctx.Err() will break backward compatibility
|
||||
return ErrWaitTimeout
|
||||
default:
|
||||
return WaitForWithContext(ctx, wait, condition)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitFunc creates a channel that receives an item every time a test
|
||||
// waitFunc creates a channel that receives an item every time a test
|
||||
// should be executed and is closed when the last test should be invoked.
|
||||
type WaitFunc func(done <-chan struct{}) <-chan struct{}
|
||||
//
|
||||
// Deprecated: Will be removed in a future release in favor of
|
||||
// loopConditionUntilContext.
|
||||
type waitFunc func(done <-chan struct{}) <-chan struct{}
|
||||
|
||||
// WithContext converts the WaitFunc to an equivalent WaitWithContextFunc
|
||||
func (w WaitFunc) WithContext() WaitWithContextFunc {
|
||||
func (w waitFunc) WithContext() waitWithContextFunc {
|
||||
return func(ctx context.Context) <-chan struct{} {
|
||||
return w(ctx.Done())
|
||||
}
|
||||
}
|
||||
|
||||
// WaitWithContextFunc creates a channel that receives an item every time a test
|
||||
// waitWithContextFunc creates a channel that receives an item every time a test
|
||||
// should be executed and is closed when the last test should be invoked.
|
||||
//
|
||||
// When the specified context gets cancelled or expires the function
|
||||
// stops sending item and returns immediately.
|
||||
type WaitWithContextFunc func(ctx context.Context) <-chan struct{}
|
||||
//
|
||||
// Deprecated: Will be removed in a future release in favor of
|
||||
// loopConditionUntilContext.
|
||||
type waitWithContextFunc func(ctx context.Context) <-chan struct{}
|
||||
|
||||
// WaitFor continually checks 'fn' as driven by 'wait'.
|
||||
// waitForWithContext continually checks 'fn' as driven by 'wait'.
|
||||
//
|
||||
// WaitFor gets a channel from 'wait()”, and then invokes 'fn' once for every value
|
||||
// placed on the channel and once more when the channel is closed. If the channel is closed
|
||||
// and 'fn' returns false without error, WaitFor returns ErrWaitTimeout.
|
||||
//
|
||||
// If 'fn' returns an error the loop ends and that error is returned. If
|
||||
// 'fn' returns true the loop ends and nil is returned.
|
||||
//
|
||||
// ErrWaitTimeout will be returned if the 'done' channel is closed without fn ever
|
||||
// returning true.
|
||||
//
|
||||
// When the done channel is closed, because the golang `select` statement is
|
||||
// "uniform pseudo-random", the `fn` might still run one or multiple time,
|
||||
// though eventually `WaitFor` will return.
|
||||
func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
|
||||
ctx, cancel := ContextForChannel(done)
|
||||
defer cancel()
|
||||
return WaitForWithContext(ctx, wait.WithContext(), fn.WithContext())
|
||||
}
|
||||
|
||||
// WaitForWithContext continually checks 'fn' as driven by 'wait'.
|
||||
//
|
||||
// WaitForWithContext gets a channel from 'wait()”, and then invokes 'fn'
|
||||
// waitForWithContext gets a channel from 'wait()”, and then invokes 'fn'
|
||||
// once for every value placed on the channel and once more when the
|
||||
// channel is closed. If the channel is closed and 'fn'
|
||||
// returns false without error, WaitForWithContext returns ErrWaitTimeout.
|
||||
// returns false without error, waitForWithContext returns ErrWaitTimeout.
|
||||
//
|
||||
// If 'fn' returns an error the loop ends and that error is returned. If
|
||||
// 'fn' returns true the loop ends and nil is returned.
|
||||
@ -651,8 +193,11 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
|
||||
//
|
||||
// When the ctx.Done() channel is closed, because the golang `select` statement is
|
||||
// "uniform pseudo-random", the `fn` might still run one or multiple times,
|
||||
// though eventually `WaitForWithContext` will return.
|
||||
func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn ConditionWithContextFunc) error {
|
||||
// though eventually `waitForWithContext` will return.
|
||||
//
|
||||
// Deprecated: Will be removed in a future release in favor of
|
||||
// loopConditionUntilContext.
|
||||
func waitForWithContext(ctx context.Context, wait waitWithContextFunc, fn ConditionWithContextFunc) error {
|
||||
waitCtx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
c := wait(waitCtx)
|
||||
@ -670,88 +215,9 @@ func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn Condit
|
||||
return ErrWaitTimeout
|
||||
}
|
||||
case <-ctx.Done():
|
||||
// returning ctx.Err() will break backward compatibility
|
||||
// returning ctx.Err() will break backward compatibility, use new PollUntilContext*
|
||||
// methods instead
|
||||
return ErrWaitTimeout
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// poller returns a WaitFunc that will send to the channel every interval until
|
||||
// timeout has elapsed and then closes the channel.
|
||||
//
|
||||
// Over very short intervals you may receive no ticks before the channel is
|
||||
// closed. A timeout of 0 is interpreted as an infinity, and in such a case
|
||||
// it would be the caller's responsibility to close the done channel.
|
||||
// Failure to do so would result in a leaked goroutine.
|
||||
//
|
||||
// Output ticks are not buffered. If the channel is not ready to receive an
|
||||
// item, the tick is skipped.
|
||||
func poller(interval, timeout time.Duration) WaitWithContextFunc {
|
||||
return WaitWithContextFunc(func(ctx context.Context) <-chan struct{} {
|
||||
ch := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
tick := time.NewTicker(interval)
|
||||
defer tick.Stop()
|
||||
|
||||
var after <-chan time.Time
|
||||
if timeout != 0 {
|
||||
// time.After is more convenient, but it
|
||||
// potentially leaves timers around much longer
|
||||
// than necessary if we exit early.
|
||||
timer := time.NewTimer(timeout)
|
||||
after = timer.C
|
||||
defer timer.Stop()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
// If the consumer isn't ready for this signal drop it and
|
||||
// check the other channels.
|
||||
select {
|
||||
case ch <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-after:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch
|
||||
})
|
||||
}
|
||||
|
||||
// ExponentialBackoffWithContext works with a request context and a Backoff. It ensures that the retry wait never
|
||||
// exceeds the deadline specified by the request context.
|
||||
func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionFunc) error {
|
||||
for backoff.Steps > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
if ok, err := runConditionWithCrashProtection(condition); err != nil || ok {
|
||||
return err
|
||||
}
|
||||
|
||||
if backoff.Steps == 1 {
|
||||
break
|
||||
}
|
||||
|
||||
waitBeforeRetry := backoff.Step()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(waitBeforeRetry):
|
||||
}
|
||||
}
|
||||
|
||||
return ErrWaitTimeout
|
||||
}
|
||||
|
134
vendor/k8s.io/apimachinery/pkg/util/waitgroup/ratelimited_waitgroup.go
generated
vendored
Normal file
134
vendor/k8s.io/apimachinery/pkg/util/waitgroup/ratelimited_waitgroup.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package waitgroup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// RateLimiter abstracts the rate limiter used by RateLimitedSafeWaitGroup.
|
||||
// The implementation must be thread-safe.
|
||||
type RateLimiter interface {
|
||||
Wait(ctx context.Context) error
|
||||
}
|
||||
|
||||
// RateLimiterFactoryFunc is used by the RateLimitedSafeWaitGroup to create a new
|
||||
// instance of a RateLimiter that will be used to rate limit the return rate
|
||||
// of the active number of request(s). 'count' is the number of requests in
|
||||
// flight that are expected to invoke 'Done' on this wait group.
|
||||
type RateLimiterFactoryFunc func(count int) (RateLimiter, context.Context, context.CancelFunc)
|
||||
|
||||
// RateLimitedSafeWaitGroup must not be copied after first use.
|
||||
type RateLimitedSafeWaitGroup struct {
|
||||
wg sync.WaitGroup
|
||||
// Once Wait is initiated, all consecutive Done invocation will be
|
||||
// rate limited using this rate limiter.
|
||||
limiter RateLimiter
|
||||
stopCtx context.Context
|
||||
|
||||
mu sync.Mutex
|
||||
// wait indicate whether Wait is called, if true,
|
||||
// then any Add with positive delta will return error.
|
||||
wait bool
|
||||
// number of request(s) currently using the wait group
|
||||
count int
|
||||
}
|
||||
|
||||
// Add adds delta, which may be negative, similar to sync.WaitGroup.
|
||||
// If Add with a positive delta happens after Wait, it will return error,
|
||||
// which prevent unsafe Add.
|
||||
func (wg *RateLimitedSafeWaitGroup) Add(delta int) error {
|
||||
wg.mu.Lock()
|
||||
defer wg.mu.Unlock()
|
||||
|
||||
if wg.wait && delta > 0 {
|
||||
return fmt.Errorf("add with positive delta after Wait is forbidden")
|
||||
}
|
||||
wg.wg.Add(delta)
|
||||
wg.count += delta
|
||||
return nil
|
||||
}
|
||||
|
||||
// Done decrements the WaitGroup counter, rate limiting is applied only
|
||||
// when the wait group is in waiting mode.
|
||||
func (wg *RateLimitedSafeWaitGroup) Done() {
|
||||
var limiter RateLimiter
|
||||
func() {
|
||||
wg.mu.Lock()
|
||||
defer wg.mu.Unlock()
|
||||
|
||||
wg.count -= 1
|
||||
if wg.wait {
|
||||
// we are using the limiter outside the scope of the lock
|
||||
limiter = wg.limiter
|
||||
}
|
||||
}()
|
||||
|
||||
defer wg.wg.Done()
|
||||
if limiter != nil {
|
||||
limiter.Wait(wg.stopCtx)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the WaitGroup counter is zero or a hard limit has elapsed.
|
||||
// It returns the number of active request(s) accounted for at the time Wait
|
||||
// has been invoked, number of request(s) that have drianed (done using the
|
||||
// wait group immediately before Wait returns).
|
||||
// Ideally, the both numbers returned should be equal, to indicate that all
|
||||
// request(s) using the wait group have released their lock.
|
||||
func (wg *RateLimitedSafeWaitGroup) Wait(limiterFactory RateLimiterFactoryFunc) (int, int, error) {
|
||||
if limiterFactory == nil {
|
||||
return 0, 0, fmt.Errorf("rate limiter factory must be specified")
|
||||
}
|
||||
|
||||
var cancel context.CancelFunc
|
||||
var countNow, countAfter int
|
||||
func() {
|
||||
wg.mu.Lock()
|
||||
defer wg.mu.Unlock()
|
||||
|
||||
wg.limiter, wg.stopCtx, cancel = limiterFactory(wg.count)
|
||||
countNow = wg.count
|
||||
wg.wait = true
|
||||
}()
|
||||
|
||||
defer cancel()
|
||||
// there should be a hard stop, in case request(s) are not responsive
|
||||
// enough to invoke Done before the grace period is over.
|
||||
waitDoneCh := make(chan struct{})
|
||||
go func() {
|
||||
defer close(waitDoneCh)
|
||||
wg.wg.Wait()
|
||||
}()
|
||||
|
||||
var err error
|
||||
select {
|
||||
case <-wg.stopCtx.Done():
|
||||
err = wg.stopCtx.Err()
|
||||
case <-waitDoneCh:
|
||||
}
|
||||
|
||||
func() {
|
||||
wg.mu.Lock()
|
||||
defer wg.mu.Unlock()
|
||||
|
||||
countAfter = wg.count
|
||||
}()
|
||||
return countNow, countAfter, err
|
||||
}
|
Reference in New Issue
Block a user