mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
rebase: bump the k8s-dependencies group with 1 update
Bumps the k8s-dependencies group with 1 update: [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes). Updates `k8s.io/kubernetes` from 1.29.2 to 1.29.3 - [Release notes](https://github.com/kubernetes/kubernetes/releases) - [Commits](https://github.com/kubernetes/kubernetes/compare/v1.29.2...v1.29.3) --- updated-dependencies: - dependency-name: k8s.io/kubernetes dependency-type: direct:production update-type: version-update:semver-patch dependency-group: k8s-dependencies ... Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
b9543d3fd3
commit
5b9730ce6e
9
vendor/k8s.io/apiserver/pkg/features/kube_features.go
generated
vendored
9
vendor/k8s.io/apiserver/pkg/features/kube_features.go
generated
vendored
@ -163,6 +163,13 @@ const (
|
||||
// Deprecates and removes SelfLink from ObjectMeta and ListMeta.
|
||||
RemoveSelfLink featuregate.Feature = "RemoveSelfLink"
|
||||
|
||||
// owner: @serathius
|
||||
// beta: v1.30
|
||||
//
|
||||
// Allow watch cache to create a watch on a dedicated RPC.
|
||||
// This prevents watch cache from being starved by other watches.
|
||||
SeparateCacheWatchRPC featuregate.Feature = "SeparateCacheWatchRPC"
|
||||
|
||||
// owner: @apelisse, @lavalamp
|
||||
// alpha: v1.14
|
||||
// beta: v1.16
|
||||
@ -303,6 +310,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
RemoveSelfLink: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
|
||||
SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
|
||||
|
||||
ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
|
||||
|
13
vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go
generated
vendored
13
vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@ -397,10 +398,18 @@ func NewCacherFromConfig(config Config) (*Cacher, error) {
|
||||
// so that future reuse does not get a spurious timeout.
|
||||
<-cacher.timer.C
|
||||
}
|
||||
progressRequester := newConditionalProgressRequester(config.Storage.RequestWatchProgress, config.Clock)
|
||||
var contextMetadata metadata.MD
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.SeparateCacheWatchRPC) {
|
||||
// Add grpc context metadata to watch and progress notify requests done by cacher to:
|
||||
// * Prevent starvation of watch opened by cacher, by moving it to separate Watch RPC than watch request that bypass cacher.
|
||||
// * Ensure that progress notification requests are executed on the same Watch RPC as their watch, which is required for it to work.
|
||||
contextMetadata = metadata.New(map[string]string{"source": "cache"})
|
||||
}
|
||||
|
||||
progressRequester := newConditionalProgressRequester(config.Storage.RequestWatchProgress, config.Clock, contextMetadata)
|
||||
watchCache := newWatchCache(
|
||||
config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers, config.Clock, config.GroupResource, progressRequester)
|
||||
listerWatcher := NewListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc)
|
||||
listerWatcher := NewListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc, contextMetadata)
|
||||
reflectorName := "storage/cacher.go:" + config.ResourcePrefix
|
||||
|
||||
reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0)
|
||||
|
30
vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go
generated
vendored
30
vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go
generated
vendored
@ -19,6 +19,8 @@ package cacher
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -30,17 +32,19 @@ import (
|
||||
|
||||
// listerWatcher opaques storage.Interface to expose cache.ListerWatcher.
|
||||
type listerWatcher struct {
|
||||
storage storage.Interface
|
||||
resourcePrefix string
|
||||
newListFunc func() runtime.Object
|
||||
storage storage.Interface
|
||||
resourcePrefix string
|
||||
newListFunc func() runtime.Object
|
||||
contextMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewListerWatcher returns a storage.Interface backed ListerWatcher.
|
||||
func NewListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher {
|
||||
func NewListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object, contextMetadata metadata.MD) cache.ListerWatcher {
|
||||
return &listerWatcher{
|
||||
storage: storage,
|
||||
resourcePrefix: resourcePrefix,
|
||||
newListFunc: newListFunc,
|
||||
storage: storage,
|
||||
resourcePrefix: resourcePrefix,
|
||||
newListFunc: newListFunc,
|
||||
contextMetadata: contextMetadata,
|
||||
}
|
||||
}
|
||||
|
||||
@ -59,7 +63,11 @@ func (lw *listerWatcher) List(options metav1.ListOptions) (runtime.Object, error
|
||||
Predicate: pred,
|
||||
Recursive: true,
|
||||
}
|
||||
if err := lw.storage.GetList(context.TODO(), lw.resourcePrefix, storageOpts, list); err != nil {
|
||||
ctx := context.Background()
|
||||
if lw.contextMetadata != nil {
|
||||
ctx = metadata.NewOutgoingContext(ctx, lw.contextMetadata)
|
||||
}
|
||||
if err := lw.storage.GetList(ctx, lw.resourcePrefix, storageOpts, list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return list, nil
|
||||
@ -73,5 +81,9 @@ func (lw *listerWatcher) Watch(options metav1.ListOptions) (watch.Interface, err
|
||||
Recursive: true,
|
||||
ProgressNotify: true,
|
||||
}
|
||||
return lw.storage.Watch(context.TODO(), lw.resourcePrefix, opts)
|
||||
ctx := context.Background()
|
||||
if lw.contextMetadata != nil {
|
||||
ctx = metadata.NewOutgoingContext(ctx, lw.contextMetadata)
|
||||
}
|
||||
return lw.storage.Watch(ctx, lw.resourcePrefix, opts)
|
||||
}
|
||||
|
9
vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go
generated
vendored
9
vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go
generated
vendored
@ -21,6 +21,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
@ -34,10 +36,11 @@ const (
|
||||
progressRequestPeriod = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
func newConditionalProgressRequester(requestWatchProgress WatchProgressRequester, clock TickerFactory) *conditionalProgressRequester {
|
||||
func newConditionalProgressRequester(requestWatchProgress WatchProgressRequester, clock TickerFactory, contextMetadata metadata.MD) *conditionalProgressRequester {
|
||||
pr := &conditionalProgressRequester{
|
||||
clock: clock,
|
||||
requestWatchProgress: requestWatchProgress,
|
||||
contextMetadata: contextMetadata,
|
||||
}
|
||||
pr.cond = sync.NewCond(pr.mux.RLocker())
|
||||
return pr
|
||||
@ -54,6 +57,7 @@ type TickerFactory interface {
|
||||
type conditionalProgressRequester struct {
|
||||
clock TickerFactory
|
||||
requestWatchProgress WatchProgressRequester
|
||||
contextMetadata metadata.MD
|
||||
|
||||
mux sync.RWMutex
|
||||
cond *sync.Cond
|
||||
@ -63,6 +67,9 @@ type conditionalProgressRequester struct {
|
||||
|
||||
func (pr *conditionalProgressRequester) Run(stopCh <-chan struct{}) {
|
||||
ctx := wait.ContextForChannel(stopCh)
|
||||
if pr.contextMetadata != nil {
|
||||
ctx = metadata.NewOutgoingContext(ctx, pr.contextMetadata)
|
||||
}
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
<-stopCh
|
||||
|
42
vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
generated
vendored
42
vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
generated
vendored
@ -5141,6 +5141,46 @@ func ValidateContainerStateTransition(newStatuses, oldStatuses []core.ContainerS
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateInitContainerStateTransition test to if any illegal init container state transitions are being attempted
|
||||
func ValidateInitContainerStateTransition(newStatuses, oldStatuses []core.ContainerStatus, fldpath *field.Path, podSpec *core.PodSpec) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
// If we should always restart, containers are allowed to leave the terminated state
|
||||
if podSpec.RestartPolicy == core.RestartPolicyAlways {
|
||||
return allErrs
|
||||
}
|
||||
for i, oldStatus := range oldStatuses {
|
||||
// Skip any container that is not terminated
|
||||
if oldStatus.State.Terminated == nil {
|
||||
continue
|
||||
}
|
||||
// Skip any container that failed but is allowed to restart
|
||||
if oldStatus.State.Terminated.ExitCode != 0 && podSpec.RestartPolicy == core.RestartPolicyOnFailure {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip any restartable init container that is allowed to restart
|
||||
isRestartableInitContainer := false
|
||||
for _, c := range podSpec.InitContainers {
|
||||
if oldStatus.Name == c.Name {
|
||||
if c.RestartPolicy != nil && *c.RestartPolicy == core.ContainerRestartPolicyAlways {
|
||||
isRestartableInitContainer = true
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if isRestartableInitContainer {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, newStatus := range newStatuses {
|
||||
if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldpath.Index(i).Child("state"), "may not be transitioned to non-terminated state"))
|
||||
}
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodStatusUpdate checks for changes to status that shouldn't occur in normal operation.
|
||||
func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList {
|
||||
fldPath := field.NewPath("metadata")
|
||||
@ -5162,7 +5202,7 @@ func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions
|
||||
// If pod should not restart, make sure the status update does not transition
|
||||
// any terminated containers to a non-terminated state.
|
||||
allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.ContainerStatuses, oldPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), oldPod.Spec.RestartPolicy)...)
|
||||
allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), oldPod.Spec.RestartPolicy)...)
|
||||
allErrs = append(allErrs, ValidateInitContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), &oldPod.Spec)...)
|
||||
// The kubelet will never restart ephemeral containers, so treat them like they have an implicit RestartPolicyNever.
|
||||
allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.EphemeralContainerStatuses, oldPod.Status.EphemeralContainerStatuses, fldPath.Child("ephemeralContainerStatuses"), core.RestartPolicyNever)...)
|
||||
allErrs = append(allErrs, validatePodResourceClaimStatuses(newPod.Status.ResourceClaimStatuses, newPod.Spec.ResourceClaims, fldPath.Child("resourceClaimStatuses"))...)
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/features/kube_features.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/features/kube_features.go
generated
vendored
@ -1263,6 +1263,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
genericfeatures.OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
genericfeatures.SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
|
||||
|
||||
genericfeatures.ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
|
||||
|
17
vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go
generated
vendored
@ -102,6 +102,23 @@ func IsFailedPreconditionError(err error) bool {
|
||||
return errors.As(err, &failedPreconditionError)
|
||||
}
|
||||
|
||||
type OperationNotSupported struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (err *OperationNotSupported) Error() string {
|
||||
return err.msg
|
||||
}
|
||||
|
||||
func NewOperationNotSupportedError(msg string) *OperationNotSupported {
|
||||
return &OperationNotSupported{msg: msg}
|
||||
}
|
||||
|
||||
func IsOperationNotSupportedError(err error) bool {
|
||||
var operationNotSupportedError *OperationNotSupported
|
||||
return errors.As(err, &operationNotSupportedError)
|
||||
}
|
||||
|
||||
// TransientOperationFailure indicates operation failed with a transient error
|
||||
// and may fix itself when retried.
|
||||
type TransientOperationFailure struct {
|
||||
|
6
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
@ -232,7 +232,7 @@ const (
|
||||
|
||||
func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config) {
|
||||
configs := map[ImageID]Config{}
|
||||
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.45"}
|
||||
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.47"}
|
||||
configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"}
|
||||
configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"}
|
||||
configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"}
|
||||
@ -241,8 +241,8 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config
|
||||
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.36.1-1"}
|
||||
configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"}
|
||||
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.3"}
|
||||
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.4.5"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.10-0"}
|
||||
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.4.6"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.12-0"}
|
||||
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"}
|
||||
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"}
|
||||
configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"}
|
||||
|
84
vendor/modules.txt
vendored
84
vendor/modules.txt
vendored
@ -942,7 +942,7 @@ gopkg.in/yaml.v2
|
||||
# gopkg.in/yaml.v3 v3.0.1
|
||||
## explicit
|
||||
gopkg.in/yaml.v3
|
||||
# k8s.io/api v0.29.2 => k8s.io/api v0.29.2
|
||||
# k8s.io/api v0.29.3 => k8s.io/api v0.29.3
|
||||
## explicit; go 1.21
|
||||
k8s.io/api/admission/v1
|
||||
k8s.io/api/admission/v1beta1
|
||||
@ -998,12 +998,12 @@ k8s.io/api/scheduling/v1beta1
|
||||
k8s.io/api/storage/v1
|
||||
k8s.io/api/storage/v1alpha1
|
||||
k8s.io/api/storage/v1beta1
|
||||
# k8s.io/apiextensions-apiserver v0.29.0 => k8s.io/apiextensions-apiserver v0.29.2
|
||||
# k8s.io/apiextensions-apiserver v0.29.0 => k8s.io/apiextensions-apiserver v0.29.3
|
||||
## explicit; go 1.21
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
|
||||
k8s.io/apiextensions-apiserver/pkg/features
|
||||
# k8s.io/apimachinery v0.29.2 => k8s.io/apimachinery v0.29.2
|
||||
# k8s.io/apimachinery v0.29.3 => k8s.io/apimachinery v0.29.3
|
||||
## explicit; go 1.21
|
||||
k8s.io/apimachinery/pkg/api/equality
|
||||
k8s.io/apimachinery/pkg/api/errors
|
||||
@ -1066,7 +1066,7 @@ k8s.io/apimachinery/pkg/watch
|
||||
k8s.io/apimachinery/third_party/forked/golang/json
|
||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||
# k8s.io/apiserver v0.29.2 => k8s.io/apiserver v0.29.2
|
||||
# k8s.io/apiserver v0.29.3 => k8s.io/apiserver v0.29.3
|
||||
## explicit; go 1.21
|
||||
k8s.io/apiserver/pkg/admission
|
||||
k8s.io/apiserver/pkg/admission/cel
|
||||
@ -1214,7 +1214,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate
|
||||
k8s.io/apiserver/plugin/pkg/audit/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authorizer/webhook
|
||||
# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.29.2
|
||||
# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.29.3
|
||||
## explicit; go 1.21
|
||||
k8s.io/client-go/applyconfigurations/admissionregistration/v1
|
||||
k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1
|
||||
@ -1485,7 +1485,7 @@ k8s.io/client-go/util/homedir
|
||||
k8s.io/client-go/util/keyutil
|
||||
k8s.io/client-go/util/retry
|
||||
k8s.io/client-go/util/workqueue
|
||||
# k8s.io/cloud-provider v0.29.2 => k8s.io/cloud-provider v0.29.2
|
||||
# k8s.io/cloud-provider v0.29.3 => k8s.io/cloud-provider v0.29.3
|
||||
## explicit; go 1.21
|
||||
k8s.io/cloud-provider
|
||||
k8s.io/cloud-provider/app/config
|
||||
@ -1500,7 +1500,7 @@ k8s.io/cloud-provider/names
|
||||
k8s.io/cloud-provider/options
|
||||
k8s.io/cloud-provider/volume
|
||||
k8s.io/cloud-provider/volume/helpers
|
||||
# k8s.io/component-base v0.29.2 => k8s.io/component-base v0.29.2
|
||||
# k8s.io/component-base v0.29.3 => k8s.io/component-base v0.29.3
|
||||
## explicit; go 1.21
|
||||
k8s.io/component-base/cli/flag
|
||||
k8s.io/component-base/config
|
||||
@ -1522,13 +1522,13 @@ k8s.io/component-base/metrics/testutil
|
||||
k8s.io/component-base/tracing
|
||||
k8s.io/component-base/tracing/api/v1
|
||||
k8s.io/component-base/version
|
||||
# k8s.io/component-helpers v0.29.2 => k8s.io/component-helpers v0.29.2
|
||||
# k8s.io/component-helpers v0.29.3 => k8s.io/component-helpers v0.29.3
|
||||
## explicit; go 1.21
|
||||
k8s.io/component-helpers/node/util/sysctl
|
||||
k8s.io/component-helpers/scheduling/corev1
|
||||
k8s.io/component-helpers/scheduling/corev1/nodeaffinity
|
||||
k8s.io/component-helpers/storage/volume
|
||||
# k8s.io/controller-manager v0.29.2 => k8s.io/controller-manager v0.29.2
|
||||
# k8s.io/controller-manager v0.29.3 => k8s.io/controller-manager v0.29.3
|
||||
## explicit; go 1.21
|
||||
k8s.io/controller-manager/config
|
||||
k8s.io/controller-manager/config/v1
|
||||
@ -1549,7 +1549,7 @@ k8s.io/klog/v2/internal/dbg
|
||||
k8s.io/klog/v2/internal/serialize
|
||||
k8s.io/klog/v2/internal/severity
|
||||
k8s.io/klog/v2/internal/sloghandler
|
||||
# k8s.io/kms v0.29.2
|
||||
# k8s.io/kms v0.29.3
|
||||
## explicit; go 1.21
|
||||
k8s.io/kms/apis/v1beta1
|
||||
k8s.io/kms/apis/v2
|
||||
@ -1576,15 +1576,15 @@ k8s.io/kube-openapi/pkg/validation/errors
|
||||
k8s.io/kube-openapi/pkg/validation/spec
|
||||
k8s.io/kube-openapi/pkg/validation/strfmt
|
||||
k8s.io/kube-openapi/pkg/validation/strfmt/bson
|
||||
# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.29.2
|
||||
# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.29.3
|
||||
## explicit; go 1.21
|
||||
k8s.io/kubectl/pkg/scale
|
||||
k8s.io/kubectl/pkg/util/podutils
|
||||
# k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.29.2
|
||||
# k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.29.3
|
||||
## explicit; go 1.21
|
||||
k8s.io/kubelet/pkg/apis
|
||||
k8s.io/kubelet/pkg/apis/stats/v1alpha1
|
||||
# k8s.io/kubernetes v1.29.2
|
||||
# k8s.io/kubernetes v1.29.3
|
||||
## explicit; go 1.21
|
||||
k8s.io/kubernetes/pkg/api/legacyscheme
|
||||
k8s.io/kubernetes/pkg/api/service
|
||||
@ -1649,10 +1649,10 @@ k8s.io/kubernetes/test/utils
|
||||
k8s.io/kubernetes/test/utils/format
|
||||
k8s.io/kubernetes/test/utils/image
|
||||
k8s.io/kubernetes/test/utils/kubeconfig
|
||||
# k8s.io/mount-utils v0.29.2 => k8s.io/mount-utils v0.29.2
|
||||
# k8s.io/mount-utils v0.29.3 => k8s.io/mount-utils v0.29.3
|
||||
## explicit; go 1.21
|
||||
k8s.io/mount-utils
|
||||
# k8s.io/pod-security-admission v0.29.2 => k8s.io/pod-security-admission v0.29.2
|
||||
# k8s.io/pod-security-admission v0.29.3 => k8s.io/pod-security-admission v0.29.3
|
||||
## explicit; go 1.21
|
||||
k8s.io/pod-security-admission/api
|
||||
k8s.io/pod-security-admission/policy
|
||||
@ -1740,31 +1740,31 @@ sigs.k8s.io/yaml/goyaml.v2
|
||||
# github.com/ceph/ceph-csi/api => ./api
|
||||
# github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3
|
||||
# gomodules.xyz/jsonpatch/v2 => github.com/gomodules/jsonpatch/v2 v2.2.0
|
||||
# k8s.io/api => k8s.io/api v0.29.2
|
||||
# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.2
|
||||
# k8s.io/apimachinery => k8s.io/apimachinery v0.29.2
|
||||
# k8s.io/apiserver => k8s.io/apiserver v0.29.2
|
||||
# k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.2
|
||||
# k8s.io/client-go => k8s.io/client-go v0.29.2
|
||||
# k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.2
|
||||
# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.2
|
||||
# k8s.io/code-generator => k8s.io/code-generator v0.29.2
|
||||
# k8s.io/component-base => k8s.io/component-base v0.29.2
|
||||
# k8s.io/component-helpers => k8s.io/component-helpers v0.29.2
|
||||
# k8s.io/controller-manager => k8s.io/controller-manager v0.29.2
|
||||
# k8s.io/cri-api => k8s.io/cri-api v0.29.2
|
||||
# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.2
|
||||
# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.2
|
||||
# k8s.io/endpointslice => k8s.io/endpointslice v0.29.2
|
||||
# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.2
|
||||
# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.2
|
||||
# k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.2
|
||||
# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.2
|
||||
# k8s.io/kubectl => k8s.io/kubectl v0.29.2
|
||||
# k8s.io/kubelet => k8s.io/kubelet v0.29.2
|
||||
# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.2
|
||||
# k8s.io/metrics => k8s.io/metrics v0.29.2
|
||||
# k8s.io/mount-utils => k8s.io/mount-utils v0.29.2
|
||||
# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.29.2
|
||||
# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.2
|
||||
# k8s.io/api => k8s.io/api v0.29.3
|
||||
# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.3
|
||||
# k8s.io/apimachinery => k8s.io/apimachinery v0.29.3
|
||||
# k8s.io/apiserver => k8s.io/apiserver v0.29.3
|
||||
# k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.3
|
||||
# k8s.io/client-go => k8s.io/client-go v0.29.3
|
||||
# k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.3
|
||||
# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.3
|
||||
# k8s.io/code-generator => k8s.io/code-generator v0.29.3
|
||||
# k8s.io/component-base => k8s.io/component-base v0.29.3
|
||||
# k8s.io/component-helpers => k8s.io/component-helpers v0.29.3
|
||||
# k8s.io/controller-manager => k8s.io/controller-manager v0.29.3
|
||||
# k8s.io/cri-api => k8s.io/cri-api v0.29.3
|
||||
# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.3
|
||||
# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.29.3
|
||||
# k8s.io/endpointslice => k8s.io/endpointslice v0.29.3
|
||||
# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.3
|
||||
# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.3
|
||||
# k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.3
|
||||
# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.3
|
||||
# k8s.io/kubectl => k8s.io/kubectl v0.29.3
|
||||
# k8s.io/kubelet => k8s.io/kubelet v0.29.3
|
||||
# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.3
|
||||
# k8s.io/metrics => k8s.io/metrics v0.29.3
|
||||
# k8s.io/mount-utils => k8s.io/mount-utils v0.29.3
|
||||
# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.29.3
|
||||
# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.3
|
||||
# layeh.com/radius => github.com/layeh/radius v0.0.0-20190322222518-890bc1058917
|
||||
|
Reference in New Issue
Block a user