build: move e2e dependencies into e2e/go.mod

Several packages are only used while running the e2e suite. These
packages are less important to update, as the they can not influence the
final executable that is part of the Ceph-CSI container-image.

By moving these dependencies out of the main Ceph-CSI go.mod, it is
easier to identify if a reported CVE affects Ceph-CSI, or only the
testing (like most of the Kubernetes CVEs).

Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
Niels de Vos
2025-03-04 08:57:28 +01:00
committed by mergify[bot]
parent 15da101b1b
commit bec6090996
8047 changed files with 1407827 additions and 3453 deletions

View File

@ -0,0 +1,531 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package assumecache
import (
"errors"
"fmt"
"strconv"
"sync"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/api/meta"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/scheduler/util/queue"
)
// Informer is the subset of [cache.SharedInformer] that NewAssumeCache depends upon.
type Informer interface {
AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error)
}
// AddTestObject adds an object to the assume cache.
// Only use this for unit testing!
func AddTestObject(cache *AssumeCache, obj interface{}) {
cache.add(obj)
}
// UpdateTestObject updates an object in the assume cache.
// Only use this for unit testing!
func UpdateTestObject(cache *AssumeCache, obj interface{}) {
cache.update(nil, obj)
}
// DeleteTestObject deletes object in the assume cache.
// Only use this for unit testing!
func DeleteTestObject(cache *AssumeCache, obj interface{}) {
cache.delete(obj)
}
// Sentinel errors that can be checked for with errors.Is.
var (
ErrWrongType = errors.New("object has wrong type")
ErrNotFound = errors.New("object not found")
ErrObjectName = errors.New("cannot determine object name")
)
type WrongTypeError struct {
TypeName string
Object interface{}
}
func (e WrongTypeError) Error() string {
return fmt.Sprintf("could not convert object to type %v: %+v", e.TypeName, e.Object)
}
func (e WrongTypeError) Is(err error) bool {
return err == ErrWrongType
}
type NotFoundError struct {
TypeName string
ObjectKey string
}
func (e NotFoundError) Error() string {
return fmt.Sprintf("could not find %v %q", e.TypeName, e.ObjectKey)
}
func (e NotFoundError) Is(err error) bool {
return err == ErrNotFound
}
type ObjectNameError struct {
DetailedErr error
}
func (e ObjectNameError) Error() string {
return fmt.Sprintf("failed to get object name: %v", e.DetailedErr)
}
func (e ObjectNameError) Is(err error) bool {
return err == ErrObjectName
}
// AssumeCache is a cache on top of the informer that allows for updating
// objects outside of informer events and also restoring the informer
// cache's version of the object. Objects are assumed to be
// Kubernetes API objects that are supported by [meta.Accessor].
//
// Objects can referenced via their key, with [cache.MetaNamespaceKeyFunc]
// as key function.
//
// AssumeCache stores two pointers to represent a single object:
// - The pointer to the informer object.
// - The pointer to the latest object, which could be the same as
// the informer object, or an in-memory object.
//
// An informer update always overrides the latest object pointer.
//
// Assume() only updates the latest object pointer.
// Restore() sets the latest object pointer back to the informer object.
// Get/List() always returns the latest object pointer.
type AssumeCache struct {
// The logger that was chosen when setting up the cache.
// Will be used for all operations.
logger klog.Logger
// Synchronizes updates to all fields below.
rwMutex sync.RWMutex
// All registered event handlers.
eventHandlers []cache.ResourceEventHandler
handlerRegistration cache.ResourceEventHandlerRegistration
// The eventQueue contains functions which deliver an event to one
// event handler.
//
// These functions must be invoked while *not locking* rwMutex because
// the event handlers are allowed to access the assume cache. Holding
// rwMutex then would cause a deadlock.
//
// New functions get added as part of processing a cache update while
// the rwMutex is locked. Each function which adds something to the queue
// also drains the queue before returning, therefore it is guaranteed
// that all event handlers get notified immediately (useful for unit
// testing).
//
// A channel cannot be used here because it cannot have an unbounded
// capacity. This could lead to a deadlock (writer holds rwMutex,
// gets blocked because capacity is exhausted, reader is in a handler
// which tries to lock the rwMutex). Writing into such a channel
// while not holding the rwMutex doesn't work because in-order delivery
// of events would no longer be guaranteed.
eventQueue queue.FIFO[func()]
// describes the object stored
description string
// Stores objInfo pointers
store cache.Indexer
// Index function for object
indexFunc cache.IndexFunc
indexName string
}
type objInfo struct {
// name of the object
name string
// Latest version of object could be cached-only or from informer
latestObj interface{}
// Latest object from informer
apiObj interface{}
}
func objInfoKeyFunc(obj interface{}) (string, error) {
objInfo, ok := obj.(*objInfo)
if !ok {
return "", &WrongTypeError{TypeName: "objInfo", Object: obj}
}
return objInfo.name, nil
}
func (c *AssumeCache) objInfoIndexFunc(obj interface{}) ([]string, error) {
objInfo, ok := obj.(*objInfo)
if !ok {
return []string{""}, &WrongTypeError{TypeName: "objInfo", Object: obj}
}
return c.indexFunc(objInfo.latestObj)
}
// NewAssumeCache creates an assume cache for general objects.
func NewAssumeCache(logger klog.Logger, informer Informer, description, indexName string, indexFunc cache.IndexFunc) *AssumeCache {
c := &AssumeCache{
logger: logger,
description: description,
indexFunc: indexFunc,
indexName: indexName,
}
indexers := cache.Indexers{}
if indexName != "" && indexFunc != nil {
indexers[indexName] = c.objInfoIndexFunc
}
c.store = cache.NewIndexer(objInfoKeyFunc, indexers)
// Unit tests don't use informers
if informer != nil {
// Cannot fail in practice?! No-one bothers checking the error.
c.handlerRegistration, _ = informer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: c.add,
UpdateFunc: c.update,
DeleteFunc: c.delete,
},
)
}
return c
}
func (c *AssumeCache) add(obj interface{}) {
if obj == nil {
return
}
name, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
c.logger.Error(&ObjectNameError{err}, "Add failed")
return
}
defer c.emitEvents()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
var oldObj interface{}
if objInfo, _ := c.getObjInfo(name); objInfo != nil {
newVersion, err := c.getObjVersion(name, obj)
if err != nil {
c.logger.Error(err, "Add failed: couldn't get object version")
return
}
storedVersion, err := c.getObjVersion(name, objInfo.latestObj)
if err != nil {
c.logger.Error(err, "Add failed: couldn't get stored object version")
return
}
// Only update object if version is newer.
// This is so we don't override assumed objects due to informer resync.
if newVersion <= storedVersion {
c.logger.V(10).Info("Skip adding object to assume cache because version is not newer than storedVersion", "description", c.description, "cacheKey", name, "newVersion", newVersion, "storedVersion", storedVersion)
return
}
oldObj = objInfo.latestObj
}
objInfo := &objInfo{name: name, latestObj: obj, apiObj: obj}
if err = c.store.Update(objInfo); err != nil {
c.logger.Info("Error occurred while updating stored object", "err", err)
} else {
c.logger.V(10).Info("Adding object to assume cache", "description", c.description, "cacheKey", name, "assumeCache", obj)
c.pushEvent(oldObj, obj)
}
}
func (c *AssumeCache) update(oldObj interface{}, newObj interface{}) {
c.add(newObj)
}
func (c *AssumeCache) delete(obj interface{}) {
if obj == nil {
return
}
name, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil {
c.logger.Error(&ObjectNameError{err}, "Failed to delete")
return
}
defer c.emitEvents()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
var oldObj interface{}
if len(c.eventHandlers) > 0 {
if objInfo, _ := c.getObjInfo(name); objInfo != nil {
oldObj = objInfo.latestObj
}
}
objInfo := &objInfo{name: name}
err = c.store.Delete(objInfo)
if err != nil {
c.logger.Error(err, "Failed to delete", "description", c.description, "cacheKey", name)
}
c.pushEvent(oldObj, nil)
}
// pushEvent gets called while the mutex is locked for writing.
// It ensures that all currently registered event handlers get
// notified about a change when the caller starts delivering
// those with emitEvents.
//
// For a delete event, newObj is nil. For an add, oldObj is nil.
// An update has both as non-nil.
func (c *AssumeCache) pushEvent(oldObj, newObj interface{}) {
for _, handler := range c.eventHandlers {
handler := handler
if oldObj == nil {
c.eventQueue.Push(func() {
handler.OnAdd(newObj, false)
})
} else if newObj == nil {
c.eventQueue.Push(func() {
handler.OnDelete(oldObj)
})
} else {
c.eventQueue.Push(func() {
handler.OnUpdate(oldObj, newObj)
})
}
}
}
func (c *AssumeCache) getObjVersion(name string, obj interface{}) (int64, error) {
objAccessor, err := meta.Accessor(obj)
if err != nil {
return -1, err
}
objResourceVersion, err := strconv.ParseInt(objAccessor.GetResourceVersion(), 10, 64)
if err != nil {
//nolint:errorlint // Intentionally not wrapping the error, the underlying error is an implementation detail.
return -1, fmt.Errorf("error parsing ResourceVersion %q for %v %q: %v", objAccessor.GetResourceVersion(), c.description, name, err)
}
return objResourceVersion, nil
}
func (c *AssumeCache) getObjInfo(key string) (*objInfo, error) {
obj, ok, err := c.store.GetByKey(key)
if err != nil {
return nil, err
}
if !ok {
return nil, &NotFoundError{TypeName: c.description, ObjectKey: key}
}
objInfo, ok := obj.(*objInfo)
if !ok {
return nil, &WrongTypeError{"objInfo", obj}
}
return objInfo, nil
}
// Get the object by its key.
func (c *AssumeCache) Get(key string) (interface{}, error) {
c.rwMutex.RLock()
defer c.rwMutex.RUnlock()
objInfo, err := c.getObjInfo(key)
if err != nil {
return nil, err
}
return objInfo.latestObj, nil
}
// GetAPIObj gets the informer cache's version by its key.
func (c *AssumeCache) GetAPIObj(key string) (interface{}, error) {
c.rwMutex.RLock()
defer c.rwMutex.RUnlock()
objInfo, err := c.getObjInfo(key)
if err != nil {
return nil, err
}
return objInfo.apiObj, nil
}
// List all the objects in the cache.
func (c *AssumeCache) List(indexObj interface{}) []interface{} {
c.rwMutex.RLock()
defer c.rwMutex.RUnlock()
return c.listLocked(indexObj)
}
func (c *AssumeCache) listLocked(indexObj interface{}) []interface{} {
allObjs := []interface{}{}
var objs []interface{}
if c.indexName != "" {
o, err := c.store.Index(c.indexName, &objInfo{latestObj: indexObj})
if err != nil {
c.logger.Error(err, "List index error")
return nil
}
objs = o
} else {
objs = c.store.List()
}
for _, obj := range objs {
objInfo, ok := obj.(*objInfo)
if !ok {
c.logger.Error(&WrongTypeError{TypeName: "objInfo", Object: obj}, "List error")
continue
}
allObjs = append(allObjs, objInfo.latestObj)
}
return allObjs
}
// Assume updates the object in-memory only.
//
// The version of the object must be greater or equal to
// the current object, otherwise an error is returned.
//
// Storing an object with the same version is supported
// by the assume cache, but suffers from a race: if an
// update is received via the informer while such an
// object is assumed, it gets dropped in favor of the
// newer object from the apiserver.
//
// Only assuming objects that were returned by an apiserver
// operation (Update, Patch) is safe.
func (c *AssumeCache) Assume(obj interface{}) error {
name, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
return &ObjectNameError{err}
}
defer c.emitEvents()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
objInfo, err := c.getObjInfo(name)
if err != nil {
return err
}
newVersion, err := c.getObjVersion(name, obj)
if err != nil {
return err
}
storedVersion, err := c.getObjVersion(name, objInfo.latestObj)
if err != nil {
return err
}
if newVersion < storedVersion {
return fmt.Errorf("%v %q is out of sync (stored: %d, assume: %d)", c.description, name, storedVersion, newVersion)
}
c.pushEvent(objInfo.latestObj, obj)
// Only update the cached object
objInfo.latestObj = obj
c.logger.V(4).Info("Assumed object", "description", c.description, "cacheKey", name, "version", newVersion)
return nil
}
// Restore the informer cache's version of the object.
func (c *AssumeCache) Restore(objName string) {
defer c.emitEvents()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
objInfo, err := c.getObjInfo(objName)
if err != nil {
// This could be expected if object got deleted
c.logger.V(5).Info("Restore object", "description", c.description, "cacheKey", objName, "err", err)
} else {
if objInfo.latestObj != objInfo.apiObj {
c.pushEvent(objInfo.latestObj, objInfo.apiObj)
objInfo.latestObj = objInfo.apiObj
}
c.logger.V(4).Info("Restored object", "description", c.description, "cacheKey", objName)
}
}
// AddEventHandler adds an event handler to the cache. Events to a
// single handler are delivered sequentially, but there is no
// coordination between different handlers. A handler may use the
// cache.
//
// The return value can be used to wait for cache synchronization.
func (c *AssumeCache) AddEventHandler(handler cache.ResourceEventHandler) cache.ResourceEventHandlerRegistration {
defer c.emitEvents()
c.rwMutex.Lock()
defer c.rwMutex.Unlock()
c.eventHandlers = append(c.eventHandlers, handler)
allObjs := c.listLocked(nil)
for _, obj := range allObjs {
c.eventQueue.Push(func() {
handler.OnAdd(obj, true)
})
}
if c.handlerRegistration == nil {
// No informer, so immediately synced.
return syncedHandlerRegistration{}
}
return c.handlerRegistration
}
// emitEvents delivers all pending events that are in the queue, in the order
// in which they were stored there (FIFO).
func (c *AssumeCache) emitEvents() {
for {
c.rwMutex.Lock()
deliver, ok := c.eventQueue.Pop()
c.rwMutex.Unlock()
if !ok {
return
}
func() {
defer utilruntime.HandleCrash()
deliver()
}()
}
}
// syncedHandlerRegistration is an implementation of ResourceEventHandlerRegistration
// which always returns true.
type syncedHandlerRegistration struct{}
func (syncedHandlerRegistration) HasSynced() bool { return true }

View File

@ -0,0 +1,32 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
// For each of these resources, a container that doesn't request the resource explicitly
// will be treated as having requested the amount indicated below, for the purpose
// of computing priority only. This ensures that when scheduling zero-request pods, such
// pods will not all be scheduled to the node with the smallest in-use request,
// and that when scheduling regular pods, such pods will not see zero-request pods as
// consuming no resources whatsoever. We chose these values to be similar to the
// resources that we give to cluster addon pods (#10653). But they are pretty arbitrary.
// As described in #11713, we use request instead of limit to deal with resource requirements.
const (
// DefaultMilliCPURequest defines default milli cpu request number.
DefaultMilliCPURequest int64 = 100 // 0.1 core
// DefaultMemoryRequest defines default memory request size.
DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB
)

View File

@ -0,0 +1,110 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package queue
const (
// normalSize limits the size of the buffer that is kept
// for reuse.
normalSize = 4
)
// FIFO implements a first-in-first-out queue with unbounded size.
// The null FIFO is a valid empty queue.
//
// Access must be protected by the caller when used concurrently by
// different goroutines, the queue itself implements no locking.
type FIFO[T any] struct {
// elements contains a buffer for elements which have been
// pushed and not popped yet. Two scenarios are possible:
// - one chunk in the middle (start <= end)
// - one chunk at the end, followed by one chunk at the
// beginning (end <= start)
//
// start == end can be either an empty queue or a completely
// full one (with two chunks).
elements []T
// len counts the number of elements which have been pushed and
// not popped yet.
len int
// start is the index of the first valid element.
start int
// end is the index after the last valid element.
end int
}
func (q *FIFO[T]) Len() int {
return q.len
}
func (q *FIFO[T]) Push(element T) {
size := len(q.elements)
if q.len == size {
// Need larger buffer.
newSize := size * 2
if newSize == 0 {
newSize = normalSize
}
elements := make([]T, newSize)
if q.start == 0 {
copy(elements, q.elements)
} else {
copy(elements, q.elements[q.start:])
copy(elements[len(q.elements)-q.start:], q.elements[0:q.end])
}
q.start = 0
q.end = q.len
q.elements = elements
size = newSize
}
if q.end == size {
// Wrap around.
q.elements[0] = element
q.end = 1
q.len++
return
}
q.elements[q.end] = element
q.end++
q.len++
}
func (q *FIFO[T]) Pop() (element T, ok bool) {
if q.len == 0 {
return
}
element = q.elements[q.start]
q.start++
if q.start == len(q.elements) {
// Wrap around.
q.start = 0
}
q.len--
// Once it is empty, shrink down to avoid hanging onto
// a large buffer forever.
if q.len == 0 && len(q.elements) > normalSize {
q.elements = make([]T, normalSize)
q.start = 0
q.end = 0
}
ok = true
return
}

View File

@ -0,0 +1,190 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"context"
"encoding/json"
"fmt"
"time"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/retry"
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
)
// GetPodFullName returns a name that uniquely identifies a pod.
func GetPodFullName(pod *v1.Pod) string {
// Use underscore as the delimiter because it is not allowed in pod name
// (DNS subdomain format).
return pod.Name + "_" + pod.Namespace
}
// GetPodStartTime returns start time of the given pod or current timestamp
// if it hasn't started yet.
func GetPodStartTime(pod *v1.Pod) *metav1.Time {
if pod.Status.StartTime != nil {
return pod.Status.StartTime
}
// Assumed pods and bound pods that haven't started don't have a StartTime yet.
return &metav1.Time{Time: time.Now()}
}
// GetEarliestPodStartTime returns the earliest start time of all pods that
// have the highest priority among all victims.
func GetEarliestPodStartTime(victims *extenderv1.Victims) *metav1.Time {
if len(victims.Pods) == 0 {
// should not reach here.
klog.Background().Error(nil, "victims.Pods is empty. Should not reach here")
return nil
}
earliestPodStartTime := GetPodStartTime(victims.Pods[0])
maxPriority := corev1helpers.PodPriority(victims.Pods[0])
for _, pod := range victims.Pods {
if podPriority := corev1helpers.PodPriority(pod); podPriority == maxPriority {
if podStartTime := GetPodStartTime(pod); podStartTime.Before(earliestPodStartTime) {
earliestPodStartTime = podStartTime
}
} else if podPriority > maxPriority {
maxPriority = podPriority
earliestPodStartTime = GetPodStartTime(pod)
}
}
return earliestPodStartTime
}
// MoreImportantPod return true when priority of the first pod is higher than
// the second one. If two pods' priorities are equal, compare their StartTime.
// It takes arguments of the type "interface{}" to be used with SortableList,
// but expects those arguments to be *v1.Pod.
func MoreImportantPod(pod1, pod2 *v1.Pod) bool {
p1 := corev1helpers.PodPriority(pod1)
p2 := corev1helpers.PodPriority(pod2)
if p1 != p2 {
return p1 > p2
}
return GetPodStartTime(pod1).Before(GetPodStartTime(pod2))
}
// Retriable defines the retriable errors during a scheduling cycle.
func Retriable(err error) bool {
return apierrors.IsInternalError(err) || apierrors.IsServiceUnavailable(err) ||
net.IsConnectionRefused(err)
}
// PatchPodStatus calculates the delta bytes change from <old.Status> to <newStatus>,
// and then submit a request to API server to patch the pod changes.
func PatchPodStatus(ctx context.Context, cs kubernetes.Interface, old *v1.Pod, newStatus *v1.PodStatus) error {
if newStatus == nil {
return nil
}
oldData, err := json.Marshal(v1.Pod{Status: old.Status})
if err != nil {
return err
}
newData, err := json.Marshal(v1.Pod{Status: *newStatus})
if err != nil {
return err
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Pod{})
if err != nil {
return fmt.Errorf("failed to create merge patch for pod %q/%q: %v", old.Namespace, old.Name, err)
}
if "{}" == string(patchBytes) {
return nil
}
patchFn := func() error {
_, err := cs.CoreV1().Pods(old.Namespace).Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
return err
}
return retry.OnError(retry.DefaultBackoff, Retriable, patchFn)
}
// DeletePod deletes the given <pod> from API server
func DeletePod(ctx context.Context, cs kubernetes.Interface, pod *v1.Pod) error {
return cs.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{})
}
// ClearNominatedNodeName internally submit a patch request to API server
// to set each pods[*].Status.NominatedNodeName> to "".
func ClearNominatedNodeName(ctx context.Context, cs kubernetes.Interface, pods ...*v1.Pod) utilerrors.Aggregate {
var errs []error
for _, p := range pods {
if len(p.Status.NominatedNodeName) == 0 {
continue
}
podStatusCopy := p.Status.DeepCopy()
podStatusCopy.NominatedNodeName = ""
if err := PatchPodStatus(ctx, cs, p, podStatusCopy); err != nil {
errs = append(errs, err)
}
}
return utilerrors.NewAggregate(errs)
}
// IsScalarResourceName validates the resource for Extended, Hugepages, Native and AttachableVolume resources
func IsScalarResourceName(name v1.ResourceName) bool {
return v1helper.IsExtendedResourceName(name) || v1helper.IsHugePageResourceName(name) ||
v1helper.IsPrefixedNativeResource(name) || v1helper.IsAttachableVolumeResourceName(name)
}
// As converts two objects to the given type.
// Both objects must be of the same type. If not, an error is returned.
// nil objects are allowed and will be converted to nil.
// For oldObj, cache.DeletedFinalStateUnknown is handled and the
// object stored in it will be converted instead.
func As[T any](oldObj, newobj interface{}) (T, T, error) {
var oldTyped T
var newTyped T
var ok bool
if newobj != nil {
newTyped, ok = newobj.(T)
if !ok {
return oldTyped, newTyped, fmt.Errorf("expected %T, but got %T", newTyped, newobj)
}
}
if oldObj != nil {
if realOldObj, ok := oldObj.(cache.DeletedFinalStateUnknown); ok {
oldObj = realOldObj.Obj
}
oldTyped, ok = oldObj.(T)
if !ok {
return oldTyped, newTyped, fmt.Errorf("expected %T, but got %T", oldTyped, oldObj)
}
}
return oldTyped, newTyped, nil
}