mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
rebase: update replaced k8s.io modules to v0.33.0
Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
committed by
mergify[bot]
parent
dd77e72800
commit
107407b44b
52
e2e/vendor/k8s.io/kubernetes/pkg/api/service/warnings.go
generated
vendored
52
e2e/vendor/k8s.io/kubernetes/pkg/api/service/warnings.go
generated
vendored
@ -18,8 +18,8 @@ package service
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
|
||||
utilvalidation "k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
@ -37,20 +37,20 @@ func GetWarningsForService(service, oldService *api.Service) []string {
|
||||
|
||||
if helper.IsServiceIPSet(service) {
|
||||
for i, clusterIP := range service.Spec.ClusterIPs {
|
||||
warnings = append(warnings, getWarningsForIP(field.NewPath("spec").Child("clusterIPs").Index(i), clusterIP)...)
|
||||
warnings = append(warnings, utilvalidation.GetWarningsForIP(field.NewPath("spec").Child("clusterIPs").Index(i), clusterIP)...)
|
||||
}
|
||||
}
|
||||
|
||||
for i, externalIP := range service.Spec.ExternalIPs {
|
||||
warnings = append(warnings, getWarningsForIP(field.NewPath("spec").Child("externalIPs").Index(i), externalIP)...)
|
||||
warnings = append(warnings, utilvalidation.GetWarningsForIP(field.NewPath("spec").Child("externalIPs").Index(i), externalIP)...)
|
||||
}
|
||||
|
||||
if len(service.Spec.LoadBalancerIP) > 0 {
|
||||
warnings = append(warnings, getWarningsForIP(field.NewPath("spec").Child("loadBalancerIP"), service.Spec.LoadBalancerIP)...)
|
||||
warnings = append(warnings, utilvalidation.GetWarningsForIP(field.NewPath("spec").Child("loadBalancerIP"), service.Spec.LoadBalancerIP)...)
|
||||
}
|
||||
|
||||
for i, cidr := range service.Spec.LoadBalancerSourceRanges {
|
||||
warnings = append(warnings, getWarningsForCIDR(field.NewPath("spec").Child("loadBalancerSourceRanges").Index(i), cidr)...)
|
||||
warnings = append(warnings, utilvalidation.GetWarningsForCIDR(field.NewPath("spec").Child("loadBalancerSourceRanges").Index(i), cidr)...)
|
||||
}
|
||||
|
||||
if service.Spec.Type == api.ServiceTypeExternalName && len(service.Spec.ExternalIPs) > 0 {
|
||||
@ -62,45 +62,3 @@ func GetWarningsForService(service, oldService *api.Service) []string {
|
||||
|
||||
return warnings
|
||||
}
|
||||
|
||||
func getWarningsForIP(fieldPath *field.Path, address string) []string {
|
||||
// IPv4 addresses with leading zeros CVE-2021-29923 are not valid in golang since 1.17
|
||||
// This will also warn about possible future changes on the golang std library
|
||||
// xref: https://issues.k8s.io/108074
|
||||
ip, err := netip.ParseAddr(address)
|
||||
if err != nil {
|
||||
return []string{fmt.Sprintf("%s: IP address was accepted, but will be invalid in a future Kubernetes release: %v", fieldPath, err)}
|
||||
}
|
||||
// A Recommendation for IPv6 Address Text Representation
|
||||
//
|
||||
// "All of the above examples represent the same IPv6 address. This
|
||||
// flexibility has caused many problems for operators, systems
|
||||
// engineers, and customers.
|
||||
// ..."
|
||||
// https://datatracker.ietf.org/doc/rfc5952/
|
||||
if ip.Is6() && ip.String() != address {
|
||||
return []string{fmt.Sprintf("%s: IPv6 address %q is not in RFC 5952 canonical format (%q), which may cause controller apply-loops", fieldPath, address, ip.String())}
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func getWarningsForCIDR(fieldPath *field.Path, cidr string) []string {
|
||||
// IPv4 addresses with leading zeros CVE-2021-29923 are not valid in golang since 1.17
|
||||
// This will also warn about possible future changes on the golang std library
|
||||
// xref: https://issues.k8s.io/108074
|
||||
prefix, err := netip.ParsePrefix(cidr)
|
||||
if err != nil {
|
||||
return []string{fmt.Sprintf("%s: IP prefix was accepted, but will be invalid in a future Kubernetes release: %v", fieldPath, err)}
|
||||
}
|
||||
// A Recommendation for IPv6 Address Text Representation
|
||||
//
|
||||
// "All of the above examples represent the same IPv6 address. This
|
||||
// flexibility has caused many problems for operators, systems
|
||||
// engineers, and customers.
|
||||
// ..."
|
||||
// https://datatracker.ietf.org/doc/rfc5952/
|
||||
if prefix.Addr().Is6() && prefix.String() != cidr {
|
||||
return []string{fmt.Sprintf("%s: IPv6 prefix %q is not in RFC 5952 canonical format (%q), which may cause controller apply-loops", fieldPath, cidr, prefix.String())}
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
|
30
e2e/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go
generated
vendored
30
e2e/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go
generated
vendored
@ -23,6 +23,8 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// FindPort locates the container port for the given pod and portName. If the
|
||||
@ -416,3 +418,31 @@ func IsRestartableInitContainer(initContainer *v1.Container) bool {
|
||||
}
|
||||
return *initContainer.RestartPolicy == v1.ContainerRestartPolicyAlways
|
||||
}
|
||||
|
||||
// We will emit status.observedGeneration if the feature is enabled OR if status.observedGeneration is already set.
|
||||
// This protects against an infinite loop of kubelet trying to clear the value after the FG is turned off, and
|
||||
// the API server preserving existing values when an incoming update tries to clear it.
|
||||
func GetPodObservedGenerationIfEnabled(pod *v1.Pod) int64 {
|
||||
if pod.Status.ObservedGeneration != 0 || utilfeature.DefaultFeatureGate.Enabled(features.PodObservedGenerationTracking) {
|
||||
return pod.Generation
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// We will emit condition.observedGeneration if the feature is enabled OR if condition.observedGeneration is already set.
|
||||
// This protects against an infinite loop of kubelet trying to clear the value after the FG is turned off, and
|
||||
// the API server preserving existing values when an incoming update tries to clear it.
|
||||
func GetPodObservedGenerationIfEnabledOnCondition(podStatus *v1.PodStatus, generation int64, conditionType v1.PodConditionType) int64 {
|
||||
if podStatus == nil {
|
||||
return 0
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodObservedGenerationTracking) {
|
||||
return generation
|
||||
}
|
||||
for _, condition := range podStatus.Conditions {
|
||||
if condition.Type == conditionType && condition.ObservedGeneration != 0 {
|
||||
return generation
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go
generated
vendored
@ -16,4 +16,4 @@ limitations under the License.
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
package apps // import "k8s.io/kubernetes/pkg/apis/apps"
|
||||
package apps
|
||||
|
34
e2e/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go
generated
vendored
34
e2e/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go
generated
vendored
@ -198,6 +198,7 @@ type StatefulSetSpec struct {
|
||||
// the network identity of the set. Pods get DNS/hostnames that follow the
|
||||
// pattern: pod-specific-string.serviceName.default.svc.cluster.local
|
||||
// where "pod-specific-string" is managed by the StatefulSet controller.
|
||||
// +optional
|
||||
ServiceName string
|
||||
|
||||
// PodManagementPolicy controls how pods are created during initial scale up,
|
||||
@ -507,19 +508,19 @@ type DeploymentStatus struct {
|
||||
// +optional
|
||||
ObservedGeneration int64
|
||||
|
||||
// Total number of non-terminated pods targeted by this deployment (their labels match the selector).
|
||||
// Total number of non-terminating pods targeted by this deployment (their labels match the selector).
|
||||
// +optional
|
||||
Replicas int32
|
||||
|
||||
// Total number of non-terminated pods targeted by this deployment that have the desired template spec.
|
||||
// Total number of non-terminating pods targeted by this deployment that have the desired template spec.
|
||||
// +optional
|
||||
UpdatedReplicas int32
|
||||
|
||||
// Total number of ready pods targeted by this deployment.
|
||||
// Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
|
||||
// +optional
|
||||
ReadyReplicas int32
|
||||
|
||||
// Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
|
||||
// Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
|
||||
// +optional
|
||||
AvailableReplicas int32
|
||||
|
||||
@ -529,6 +530,13 @@ type DeploymentStatus struct {
|
||||
// +optional
|
||||
UnavailableReplicas int32
|
||||
|
||||
// Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
|
||||
// .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
|
||||
//
|
||||
// This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
|
||||
// +optional
|
||||
TerminatingReplicas *int32
|
||||
|
||||
// Represents the latest available observations of a deployment's current state.
|
||||
Conditions []DeploymentCondition
|
||||
|
||||
@ -865,22 +873,30 @@ type ReplicaSetSpec struct {
|
||||
|
||||
// ReplicaSetStatus represents the current status of a ReplicaSet.
|
||||
type ReplicaSetStatus struct {
|
||||
// Replicas is the number of actual replicas.
|
||||
// Replicas is the most recently observed number of non-terminating pods.
|
||||
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
|
||||
Replicas int32
|
||||
|
||||
// The number of pods that have labels matching the labels of the pod template of the replicaset.
|
||||
// The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.
|
||||
// +optional
|
||||
FullyLabeledReplicas int32
|
||||
|
||||
// The number of ready replicas for this replica set.
|
||||
// The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.
|
||||
// +optional
|
||||
ReadyReplicas int32
|
||||
|
||||
// The number of available replicas (ready for at least minReadySeconds) for this replica set.
|
||||
// The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.
|
||||
// +optional
|
||||
AvailableReplicas int32
|
||||
|
||||
// ObservedGeneration is the most recent generation observed by the controller.
|
||||
// The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
|
||||
// and have not yet reached the Failed or Succeeded .status.phase.
|
||||
//
|
||||
// This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
|
||||
// +optional
|
||||
TerminatingReplicas *int32
|
||||
|
||||
// ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
|
||||
// +optional
|
||||
ObservedGeneration int64
|
||||
|
||||
|
10
e2e/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go
generated
vendored
10
e2e/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go
generated
vendored
@ -396,6 +396,11 @@ func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
|
||||
*out = *in
|
||||
if in.TerminatingReplicas != nil {
|
||||
in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]DeploymentCondition, len(*in))
|
||||
@ -545,6 +550,11 @@ func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) {
|
||||
*out = *in
|
||||
if in.TerminatingReplicas != nil {
|
||||
in, out := &in.TerminatingReplicas, &out.TerminatingReplicas
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]ReplicaSetCondition, len(*in))
|
||||
|
8
e2e/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go
generated
vendored
8
e2e/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go
generated
vendored
@ -36,3 +36,11 @@ const DefaultCPUUtilization = 80
|
||||
// BehaviorSpecsAnnotation is the annotation which holds the HPA constraints specs
|
||||
// when converting the `Behavior` field from autoscaling/v2beta2
|
||||
const BehaviorSpecsAnnotation = "autoscaling.alpha.kubernetes.io/behavior"
|
||||
|
||||
// ToleranceScaleDownAnnotation is the annotation which holds the HPA tolerance specs
|
||||
// when converting the `ScaleDown.Tolerance` field from autoscaling/v2
|
||||
const ToleranceScaleDownAnnotation = "autoscaling.alpha.kubernetes.io/scale-down-tolerance"
|
||||
|
||||
// ToleranceScaleUpAnnotation is the annotation which holds the HPA tolerance specs
|
||||
// when converting the `ScaleUp.Tolerance` field from autoscaling/v2
|
||||
const ToleranceScaleUpAnnotation = "autoscaling.alpha.kubernetes.io/scale-up-tolerance"
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go
generated
vendored
@ -16,4 +16,4 @@ limitations under the License.
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
package autoscaling // import "k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
package autoscaling
|
||||
|
10
e2e/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/helpers.go
generated
vendored
10
e2e/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/helpers.go
generated
vendored
@ -16,8 +16,10 @@ limitations under the License.
|
||||
|
||||
package autoscaling
|
||||
|
||||
// DropRoundTripHorizontalPodAutoscalerAnnotations removes any annotations used to serialize round-tripped fields from later API versions,
|
||||
// DropRoundTripHorizontalPodAutoscalerAnnotations removes any annotations used to
|
||||
// serialize round-tripped fields from HorizontalPodAutoscaler later API versions,
|
||||
// and returns false if no changes were made and the original input object was returned.
|
||||
//
|
||||
// It should always be called when converting internal -> external versions, prior
|
||||
// to setting any of the custom annotations:
|
||||
//
|
||||
@ -34,12 +36,16 @@ package autoscaling
|
||||
func DropRoundTripHorizontalPodAutoscalerAnnotations(in map[string]string) (out map[string]string, copied bool) {
|
||||
_, hasMetricsSpecs := in[MetricSpecsAnnotation]
|
||||
_, hasBehaviorSpecs := in[BehaviorSpecsAnnotation]
|
||||
_, hasToleranceScaleDown := in[ToleranceScaleDownAnnotation]
|
||||
_, hasToleranceScaleUp := in[ToleranceScaleUpAnnotation]
|
||||
_, hasMetricsStatuses := in[MetricStatusesAnnotation]
|
||||
_, hasConditions := in[HorizontalPodAutoscalerConditionsAnnotation]
|
||||
if hasMetricsSpecs || hasBehaviorSpecs || hasMetricsStatuses || hasConditions {
|
||||
if hasMetricsSpecs || hasBehaviorSpecs || hasToleranceScaleDown || hasToleranceScaleUp || hasMetricsStatuses || hasConditions {
|
||||
out = DeepCopyStringMap(in)
|
||||
delete(out, MetricSpecsAnnotation)
|
||||
delete(out, BehaviorSpecsAnnotation)
|
||||
delete(out, ToleranceScaleDownAnnotation)
|
||||
delete(out, ToleranceScaleUpAnnotation)
|
||||
delete(out, MetricStatusesAnnotation)
|
||||
delete(out, HorizontalPodAutoscalerConditionsAnnotation)
|
||||
return out, true
|
||||
|
31
e2e/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go
generated
vendored
31
e2e/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go
generated
vendored
@ -138,12 +138,18 @@ const (
|
||||
DisabledPolicySelect ScalingPolicySelect = "Disabled"
|
||||
)
|
||||
|
||||
// HPAScalingRules configures the scaling behavior for one direction.
|
||||
// These Rules are applied after calculating DesiredReplicas from metrics for the HPA.
|
||||
// HPAScalingRules configures the scaling behavior for one direction via
|
||||
// scaling Policy Rules and a configurable metric tolerance.
|
||||
//
|
||||
// Scaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA.
|
||||
// They can limit the scaling velocity by specifying scaling policies.
|
||||
// They can prevent flapping by specifying the stabilization window, so that the
|
||||
// number of replicas is not set instantly, instead, the safest value from the stabilization
|
||||
// window is chosen.
|
||||
//
|
||||
// The tolerance is applied to the metric values and prevents scaling too
|
||||
// eagerly for small metric variations. (Note that setting a tolerance requires
|
||||
// enabling the alpha HPAConfigurableTolerance feature gate.)
|
||||
type HPAScalingRules struct {
|
||||
// StabilizationWindowSeconds is the number of seconds for which past recommendations should be
|
||||
// considered while scaling up or scaling down.
|
||||
@ -157,10 +163,27 @@ type HPAScalingRules struct {
|
||||
// If not set, the default value MaxPolicySelect is used.
|
||||
// +optional
|
||||
SelectPolicy *ScalingPolicySelect
|
||||
// policies is a list of potential scaling polices which can used during scaling.
|
||||
// At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
|
||||
// policies is a list of potential scaling polices which can be used during scaling.
|
||||
// If not set, use the default values:
|
||||
// - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window.
|
||||
// - For scale down: allow all pods to be removed in a 15s window.
|
||||
// +optional
|
||||
Policies []HPAScalingPolicy
|
||||
// tolerance is the tolerance on the ratio between the current and desired
|
||||
// metric value under which no updates are made to the desired number of
|
||||
// replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not
|
||||
// set, the default cluster-wide tolerance is applied (by default 10%).
|
||||
//
|
||||
// For example, if autoscaling is configured with a memory consumption target of 100Mi,
|
||||
// and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be
|
||||
// triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
|
||||
//
|
||||
// This is an alpha field and requires enabling the HPAConfigurableTolerance
|
||||
// feature gate.
|
||||
//
|
||||
// +featureGate=HPAConfigurableTolerance
|
||||
// +optional
|
||||
Tolerance *resource.Quantity
|
||||
}
|
||||
|
||||
// HPAScalingPolicyType is the type of the policy which could be used while making scaling decisions.
|
||||
|
5
e2e/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go
generated
vendored
5
e2e/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go
generated
vendored
@ -146,6 +146,11 @@ func (in *HPAScalingRules) DeepCopyInto(out *HPAScalingRules) {
|
||||
*out = make([]HPAScalingPolicy, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Tolerance != nil {
|
||||
in, out := &in.Tolerance, &out.Tolerance
|
||||
x := (*in).DeepCopy()
|
||||
*out = &x
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/apis/batch/doc.go
generated
vendored
@ -16,4 +16,4 @@ limitations under the License.
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
package batch // import "k8s.io/kubernetes/pkg/apis/batch"
|
||||
package batch
|
||||
|
11
e2e/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go
generated
vendored
11
e2e/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go
generated
vendored
@ -132,7 +132,6 @@ const (
|
||||
// This is an action which might be taken on a pod failure - mark the
|
||||
// Job's index as failed to avoid restarts within this index. This action
|
||||
// can only be used when backoffLimitPerIndex is set.
|
||||
// This value is beta-level.
|
||||
PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex"
|
||||
|
||||
// This is an action which might be taken on a pod failure - the counter towards
|
||||
@ -226,8 +225,6 @@ type PodFailurePolicyRule struct {
|
||||
// running pods are terminated.
|
||||
// - FailIndex: indicates that the pod's index is marked as Failed and will
|
||||
// not be restarted.
|
||||
// This value is alpha-level. It can be used when the
|
||||
// `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).
|
||||
// - Ignore: indicates that the counter towards the .backoffLimit is not
|
||||
// incremented and a replacement pod is created.
|
||||
// - Count: indicates that the pod is handled in the default way - the
|
||||
@ -339,8 +336,6 @@ type JobSpec struct {
|
||||
// When the field is specified, it must be immutable and works only for the Indexed Jobs.
|
||||
// Once the Job meets the SuccessPolicy, the lingering pods are terminated.
|
||||
//
|
||||
// This field is beta-level. To use this field, you must enable the
|
||||
// `JobSuccessPolicy` feature gate (enabled by default).
|
||||
// +optional
|
||||
SuccessPolicy *SuccessPolicy
|
||||
|
||||
@ -363,8 +358,6 @@ type JobSpec struct {
|
||||
// batch.kubernetes.io/job-index-failure-count annotation. It can only
|
||||
// be set when Job's completionMode=Indexed, and the Pod's restart
|
||||
// policy is Never. The field is immutable.
|
||||
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (enabled by default).
|
||||
// +optional
|
||||
BackoffLimitPerIndex *int32
|
||||
|
||||
@ -376,8 +369,6 @@ type JobSpec struct {
|
||||
// It can only be specified when backoffLimitPerIndex is set.
|
||||
// It can be null or up to completions. It is required and must be
|
||||
// less than or equal to 10^4 when is completions greater than 10^5.
|
||||
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (enabled by default).
|
||||
// +optional
|
||||
MaxFailedIndexes *int32
|
||||
|
||||
@ -571,8 +562,6 @@ type JobStatus struct {
|
||||
// represented as "1,3-5,7".
|
||||
// The set of failed indexes cannot overlap with the set of completed indexes.
|
||||
//
|
||||
// This field is beta-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (enabled by default).
|
||||
// +optional
|
||||
FailedIndexes *string
|
||||
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go
generated
vendored
@ -22,4 +22,4 @@ limitations under the License.
|
||||
// The contract presented to clients is located in the versioned packages,
|
||||
// which are sub-directories. The first one is "v1". Those packages
|
||||
// describe how a particular version is serialized to storage/network.
|
||||
package core // import "k8s.io/kubernetes/pkg/apis/core"
|
||||
package core
|
||||
|
26
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go
generated
vendored
26
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go
generated
vendored
@ -119,6 +119,7 @@ var standardResourceQuotaScopes = sets.New(
|
||||
core.ResourceQuotaScopeBestEffort,
|
||||
core.ResourceQuotaScopeNotBestEffort,
|
||||
core.ResourceQuotaScopePriorityClass,
|
||||
core.ResourceQuotaScopeVolumeAttributesClass,
|
||||
)
|
||||
|
||||
// IsStandardResourceQuotaScope returns true if the scope is a standard value
|
||||
@ -139,6 +140,14 @@ var podComputeQuotaResources = sets.New(
|
||||
core.ResourceRequestsMemory,
|
||||
)
|
||||
|
||||
var pvcObjectCountQuotaResources = sets.New(
|
||||
core.ResourcePersistentVolumeClaims,
|
||||
)
|
||||
|
||||
var pvcStorageQuotaResources = sets.New(
|
||||
core.ResourceRequestsStorage,
|
||||
)
|
||||
|
||||
// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope
|
||||
func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resource core.ResourceName) bool {
|
||||
switch scope {
|
||||
@ -147,6 +156,8 @@ func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resourc
|
||||
return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource)
|
||||
case core.ResourceQuotaScopeBestEffort:
|
||||
return podObjectCountQuotaResources.Has(resource)
|
||||
case core.ResourceQuotaScopeVolumeAttributesClass:
|
||||
return pvcObjectCountQuotaResources.Has(resource) || pvcStorageQuotaResources.Has(resource)
|
||||
default:
|
||||
return true
|
||||
}
|
||||
@ -500,3 +511,18 @@ func validFirstDigit(str string) bool {
|
||||
}
|
||||
return str[0] == '-' || (str[0] == '0' && str == "0") || (str[0] >= '1' && str[0] <= '9')
|
||||
}
|
||||
|
||||
// HasInvalidLabelValueInNodeSelectorTerms checks if there's an invalid label value
|
||||
// in one NodeSelectorTerm's MatchExpression values
|
||||
func HasInvalidLabelValueInNodeSelectorTerms(terms []core.NodeSelectorTerm) bool {
|
||||
for _, term := range terms {
|
||||
for _, expression := range term.MatchExpressions {
|
||||
for _, value := range expression.Values {
|
||||
if len(validation.IsValidLabelValue(value)) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
184
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/types.go
generated
vendored
184
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/types.go
generated
vendored
@ -220,7 +220,7 @@ type VolumeSource struct {
|
||||
// The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
|
||||
// The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
|
||||
// The volume will be mounted read-only (ro) and non-executable files (noexec).
|
||||
// Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
|
||||
// Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
|
||||
// The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
|
||||
// +featureGate=ImageVolume
|
||||
// +optional
|
||||
@ -531,8 +531,8 @@ type PersistentVolumeClaimSpec struct {
|
||||
// * An existing PVC (PersistentVolumeClaim)
|
||||
// If the provisioner or an external controller can support the specified data source,
|
||||
// it will create a new volume based on the contents of the specified data source.
|
||||
// When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
|
||||
// and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
|
||||
// dataSource contents will be copied to dataSourceRef, and dataSourceRef contents
|
||||
// will be copied to dataSource when dataSourceRef.namespace is not specified.
|
||||
// If the namespace is specified, then dataSourceRef will not be copied to dataSource.
|
||||
// +optional
|
||||
DataSource *TypedLocalObjectReference
|
||||
@ -557,8 +557,6 @@ type PersistentVolumeClaimSpec struct {
|
||||
// specified.
|
||||
// * While dataSource only allows local objects, dataSourceRef allows objects
|
||||
// in any namespaces.
|
||||
// (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
|
||||
// (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
|
||||
// +optional
|
||||
DataSourceRef *TypedObjectReference
|
||||
// volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
|
||||
@ -2263,9 +2261,9 @@ type SecretKeySelector struct {
|
||||
Optional *bool
|
||||
}
|
||||
|
||||
// EnvFromSource represents the source of a set of ConfigMaps
|
||||
// EnvFromSource represents the source of a set of ConfigMaps or Secrets
|
||||
type EnvFromSource struct {
|
||||
// An optional identifier to prepend to each key in the ConfigMap.
|
||||
// Optional text to prepend to the name of each environment variable. Must be a C_IDENTIFIER.
|
||||
// +optional
|
||||
Prefix string
|
||||
// The ConfigMap to select from.
|
||||
@ -2675,6 +2673,78 @@ type GRPCAction struct {
|
||||
Service *string
|
||||
}
|
||||
|
||||
// Signal defines the stop signal of containers
|
||||
// +enum
|
||||
type Signal string
|
||||
|
||||
const (
|
||||
SIGABRT Signal = "SIGABRT"
|
||||
SIGALRM Signal = "SIGALRM"
|
||||
SIGBUS Signal = "SIGBUS"
|
||||
SIGCHLD Signal = "SIGCHLD"
|
||||
SIGCLD Signal = "SIGCLD"
|
||||
SIGCONT Signal = "SIGCONT"
|
||||
SIGFPE Signal = "SIGFPE"
|
||||
SIGHUP Signal = "SIGHUP"
|
||||
SIGILL Signal = "SIGILL"
|
||||
SIGINT Signal = "SIGINT"
|
||||
SIGIO Signal = "SIGIO"
|
||||
SIGIOT Signal = "SIGIOT"
|
||||
SIGKILL Signal = "SIGKILL"
|
||||
SIGPIPE Signal = "SIGPIPE"
|
||||
SIGPOLL Signal = "SIGPOLL"
|
||||
SIGPROF Signal = "SIGPROF"
|
||||
SIGPWR Signal = "SIGPWR"
|
||||
SIGQUIT Signal = "SIGQUIT"
|
||||
SIGSEGV Signal = "SIGSEGV"
|
||||
SIGSTKFLT Signal = "SIGSTKFLT"
|
||||
SIGSTOP Signal = "SIGSTOP"
|
||||
SIGSYS Signal = "SIGSYS"
|
||||
SIGTERM Signal = "SIGTERM"
|
||||
SIGTRAP Signal = "SIGTRAP"
|
||||
SIGTSTP Signal = "SIGTSTP"
|
||||
SIGTTIN Signal = "SIGTTIN"
|
||||
SIGTTOU Signal = "SIGTTOU"
|
||||
SIGURG Signal = "SIGURG"
|
||||
SIGUSR1 Signal = "SIGUSR1"
|
||||
SIGUSR2 Signal = "SIGUSR2"
|
||||
SIGVTALRM Signal = "SIGVTALRM"
|
||||
SIGWINCH Signal = "SIGWINCH"
|
||||
SIGXCPU Signal = "SIGXCPU"
|
||||
SIGXFSZ Signal = "SIGXFSZ"
|
||||
SIGRTMIN Signal = "SIGRTMIN"
|
||||
SIGRTMINPLUS1 Signal = "SIGRTMIN+1"
|
||||
SIGRTMINPLUS2 Signal = "SIGRTMIN+2"
|
||||
SIGRTMINPLUS3 Signal = "SIGRTMIN+3"
|
||||
SIGRTMINPLUS4 Signal = "SIGRTMIN+4"
|
||||
SIGRTMINPLUS5 Signal = "SIGRTMIN+5"
|
||||
SIGRTMINPLUS6 Signal = "SIGRTMIN+6"
|
||||
SIGRTMINPLUS7 Signal = "SIGRTMIN+7"
|
||||
SIGRTMINPLUS8 Signal = "SIGRTMIN+8"
|
||||
SIGRTMINPLUS9 Signal = "SIGRTMIN+9"
|
||||
SIGRTMINPLUS10 Signal = "SIGRTMIN+10"
|
||||
SIGRTMINPLUS11 Signal = "SIGRTMIN+11"
|
||||
SIGRTMINPLUS12 Signal = "SIGRTMIN+12"
|
||||
SIGRTMINPLUS13 Signal = "SIGRTMIN+13"
|
||||
SIGRTMINPLUS14 Signal = "SIGRTMIN+14"
|
||||
SIGRTMINPLUS15 Signal = "SIGRTMIN+15"
|
||||
SIGRTMAXMINUS14 Signal = "SIGRTMAX-14"
|
||||
SIGRTMAXMINUS13 Signal = "SIGRTMAX-13"
|
||||
SIGRTMAXMINUS12 Signal = "SIGRTMAX-12"
|
||||
SIGRTMAXMINUS11 Signal = "SIGRTMAX-11"
|
||||
SIGRTMAXMINUS10 Signal = "SIGRTMAX-10"
|
||||
SIGRTMAXMINUS9 Signal = "SIGRTMAX-9"
|
||||
SIGRTMAXMINUS8 Signal = "SIGRTMAX-8"
|
||||
SIGRTMAXMINUS7 Signal = "SIGRTMAX-7"
|
||||
SIGRTMAXMINUS6 Signal = "SIGRTMAX-6"
|
||||
SIGRTMAXMINUS5 Signal = "SIGRTMAX-5"
|
||||
SIGRTMAXMINUS4 Signal = "SIGRTMAX-4"
|
||||
SIGRTMAXMINUS3 Signal = "SIGRTMAX-3"
|
||||
SIGRTMAXMINUS2 Signal = "SIGRTMAX-2"
|
||||
SIGRTMAXMINUS1 Signal = "SIGRTMAX-1"
|
||||
SIGRTMAX Signal = "SIGRTMAX"
|
||||
)
|
||||
|
||||
// Lifecycle describes actions that the management system should take in response to container lifecycle
|
||||
// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
|
||||
// until the action is complete, unless the container process fails, in which case the handler is aborted.
|
||||
@ -2695,6 +2765,11 @@ type Lifecycle struct {
|
||||
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
|
||||
// +optional
|
||||
PreStop *LifecycleHandler
|
||||
// StopSignal defines which signal will be sent to a container when it is being stopped.
|
||||
// If not specified, the default is defined by the container runtime in use.
|
||||
// StopSignal can only be set for Pods with a non-empty .spec.os.name
|
||||
// +optional
|
||||
StopSignal *Signal
|
||||
}
|
||||
|
||||
// The below types are used by kube_client and api_server.
|
||||
@ -2833,6 +2908,12 @@ type ContainerStatus struct {
|
||||
// +featureGate=ResourceHealthStatus
|
||||
// +optional
|
||||
AllocatedResourcesStatus []ResourceStatus
|
||||
// StopSignal is the signal which will be sent to this container when it is stopped. This might be
|
||||
// the stop signal specified by the user, the signal specified in the container image, or the default
|
||||
// stop signal of the container runtime on this node.
|
||||
// +featureGate=ContainerStopSignals
|
||||
// +optional
|
||||
StopSignal *Signal
|
||||
}
|
||||
|
||||
type ResourceStatus struct {
|
||||
@ -2953,12 +3034,26 @@ const (
|
||||
// DisruptionTarget indicates the pod is about to be terminated due to a
|
||||
// disruption (such as preemption, eviction API or garbage-collection).
|
||||
DisruptionTarget PodConditionType = "DisruptionTarget"
|
||||
// PodResizePending indicates that the pod has been resized, but kubelet has not
|
||||
// yet allocated the resources. If both PodResizePending and PodResizeInProgress
|
||||
// are set, it means that a new resize was requested in the middle of a previous
|
||||
// pod resize that is still in progress.
|
||||
PodResizePending PodConditionType = "PodResizePending"
|
||||
// PodResizeInProgress indicates that a resize is in progress, and is present whenever
|
||||
// the Kubelet has allocated resources for the resize, but has not yet actuated all of
|
||||
// the required changes.
|
||||
// If both PodResizePending and PodResizeInProgress are set, it means that a new resize was
|
||||
// requested in the middle of a previous pod resize that is still in progress.
|
||||
PodResizeInProgress PodConditionType = "PodResizeInProgress"
|
||||
)
|
||||
|
||||
// PodCondition represents pod's condition
|
||||
type PodCondition struct {
|
||||
Type PodConditionType
|
||||
Status ConditionStatus
|
||||
Type PodConditionType
|
||||
// +featureGate=PodObservedGenerationTracking
|
||||
// +optional
|
||||
ObservedGeneration int64
|
||||
Status ConditionStatus
|
||||
// +optional
|
||||
LastProbeTime metav1.Time
|
||||
// +optional
|
||||
@ -2969,12 +3064,10 @@ type PodCondition struct {
|
||||
Message string
|
||||
}
|
||||
|
||||
// PodResizeStatus shows status of desired resize of a pod's containers.
|
||||
// Deprecated: PodResizeStatus shows status of desired resize of a pod's containers.
|
||||
type PodResizeStatus string
|
||||
|
||||
const (
|
||||
// Pod resources resize has been requested and will be evaluated by node.
|
||||
PodResizeStatusProposed PodResizeStatus = "Proposed"
|
||||
// Pod resources resize has been accepted by node and is being actuated.
|
||||
PodResizeStatusInProgress PodResizeStatus = "InProgress"
|
||||
// Node cannot resize the pod at this time and will keep retrying.
|
||||
@ -3258,7 +3351,6 @@ type PodAffinityTerm struct {
|
||||
// pod labels will be ignored. The default value is empty.
|
||||
// The same key is forbidden to exist in both matchLabelKeys and labelSelector.
|
||||
// Also, matchLabelKeys cannot be set when labelSelector isn't set.
|
||||
// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
//
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
@ -3271,7 +3363,6 @@ type PodAffinityTerm struct {
|
||||
// pod labels will be ignored. The default value is empty.
|
||||
// The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
|
||||
// Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
|
||||
// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
|
||||
//
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
@ -3959,7 +4050,7 @@ type AppArmorProfile struct {
|
||||
// Localhost - a profile pre-loaded on the node.
|
||||
// RuntimeDefault - the container runtime's default profile.
|
||||
// Unconfined - no AppArmor enforcement.
|
||||
// +unionDescriminator
|
||||
// +unionDiscriminator
|
||||
Type AppArmorProfileType
|
||||
|
||||
// localhostProfile indicates a profile loaded on the node that should be used.
|
||||
@ -4166,6 +4257,11 @@ type EphemeralContainer struct {
|
||||
// PodStatus represents information about the status of a pod. Status may trail the actual
|
||||
// state of a system.
|
||||
type PodStatus struct {
|
||||
// If set, this represents the .metadata.generation that the pod status was set based upon.
|
||||
// This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
|
||||
// +featureGate=PodObservedGenerationTracking
|
||||
// +optional
|
||||
ObservedGeneration int64
|
||||
// +optional
|
||||
Phase PodPhase
|
||||
// +optional
|
||||
@ -4247,6 +4343,9 @@ type PodStatus struct {
|
||||
// Status of resources resize desired for pod's containers.
|
||||
// It is empty if no resources resize is pending.
|
||||
// Any changes to container resources will automatically set this to "Proposed"
|
||||
// Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress.
|
||||
// PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources.
|
||||
// PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.
|
||||
// +featureGate=InPlacePodVerticalScaling
|
||||
// +optional
|
||||
Resize PodResizeStatus
|
||||
@ -4328,7 +4427,7 @@ type PodTemplateList struct {
|
||||
// a TemplateRef or a Template set.
|
||||
type ReplicationControllerSpec struct {
|
||||
// Replicas is the number of desired replicas.
|
||||
Replicas int32
|
||||
Replicas *int32
|
||||
|
||||
// Minimum number of seconds for which a newly created pod should be ready
|
||||
// without any of its container crashing, for it to be considered available.
|
||||
@ -4544,14 +4643,27 @@ const (
|
||||
|
||||
// These are valid values for the TrafficDistribution field of a Service.
|
||||
const (
|
||||
// Indicates a preference for routing traffic to endpoints that are
|
||||
// topologically proximate to the client. The interpretation of "topologically
|
||||
// proximate" may vary across implementations and could encompass endpoints
|
||||
// within the same node, rack, zone, or even region. Setting this value gives
|
||||
// implementations permission to make different tradeoffs, e.g. optimizing for
|
||||
// proximity rather than equal distribution of load. Users should not set this
|
||||
// value if such tradeoffs are not acceptable.
|
||||
// Indicates a preference for routing traffic to endpoints that are in the same
|
||||
// zone as the client. Users should not set this value unless they have ensured
|
||||
// that clients and endpoints are distributed in such a way that the "same zone"
|
||||
// preference will not result in endpoints getting overloaded.
|
||||
ServiceTrafficDistributionPreferClose = "PreferClose"
|
||||
|
||||
// Indicates a preference for routing traffic to endpoints that are in the same
|
||||
// zone as the client. Users should not set this value unless they have ensured
|
||||
// that clients and endpoints are distributed in such a way that the "same zone"
|
||||
// preference will not result in endpoints getting overloaded.
|
||||
// This is an alias for "PreferClose", but it is an Alpha feature and is only
|
||||
// recognized if the PreferSameTrafficDistribution feature gate is enabled.
|
||||
ServiceTrafficDistributionPreferSameZone = "PreferSameZone"
|
||||
|
||||
// Indicates a preference for routing traffic to endpoints that are on the same
|
||||
// node as the client. Users should not set this value unless they have ensured
|
||||
// that clients and endpoints are distributed in such a way that the "same node"
|
||||
// preference will not result in endpoints getting overloaded.
|
||||
// This is an Alpha feature and is only recognized if the
|
||||
// PreferSameTrafficDistribution feature gate is enabled.
|
||||
ServiceTrafficDistributionPreferSameNode = "PreferSameNode"
|
||||
)
|
||||
|
||||
// These are the valid conditions of a service.
|
||||
@ -4818,12 +4930,12 @@ type ServiceSpec struct {
|
||||
// +optional
|
||||
InternalTrafficPolicy *ServiceInternalTrafficPolicy
|
||||
|
||||
// TrafficDistribution offers a way to express preferences for how traffic is
|
||||
// distributed to Service endpoints. Implementations can use this field as a
|
||||
// hint, but are not required to guarantee strict adherence. If the field is
|
||||
// not set, the implementation will apply its default routing strategy. If set
|
||||
// to "PreferClose", implementations should prioritize endpoints that are
|
||||
// topologically close (e.g., same zone).
|
||||
// TrafficDistribution offers a way to express preferences for how traffic
|
||||
// is distributed to Service endpoints. Implementations can use this field
|
||||
// as a hint, but are not required to guarantee strict adherence. If the
|
||||
// field is not set, the implementation will apply its default routing
|
||||
// strategy. If set to "PreferClose", implementations should prioritize
|
||||
// endpoints that are in the same zone.
|
||||
// +optional
|
||||
TrafficDistribution *string
|
||||
}
|
||||
@ -5174,6 +5286,15 @@ type NodeSystemInfo struct {
|
||||
OperatingSystem string
|
||||
// The Architecture reported by the node
|
||||
Architecture string
|
||||
// Swap Info reported by the node.
|
||||
Swap *NodeSwapStatus
|
||||
}
|
||||
|
||||
// NodeSwapStatus represents swap memory information.
|
||||
type NodeSwapStatus struct {
|
||||
// Total amount of swap memory in bytes.
|
||||
// +optional
|
||||
Capacity *int64
|
||||
}
|
||||
|
||||
// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
|
||||
@ -6046,6 +6167,9 @@ const (
|
||||
ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass"
|
||||
// Match all pod objects that have cross-namespace pod (anti)affinity mentioned
|
||||
ResourceQuotaScopeCrossNamespacePodAffinity ResourceQuotaScope = "CrossNamespacePodAffinity"
|
||||
|
||||
// Match all pvc objects that have volume attributes class mentioned.
|
||||
ResourceQuotaScopeVolumeAttributesClass ResourceQuotaScope = "VolumeAttributesClass"
|
||||
)
|
||||
|
||||
// ResourceQuotaSpec defines the desired hard limits to enforce for Quota
|
||||
@ -6682,7 +6806,6 @@ type TopologySpreadConstraint struct {
|
||||
// - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
|
||||
//
|
||||
// If this value is nil, the behavior is equivalent to the Honor policy.
|
||||
// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
// +optional
|
||||
NodeAffinityPolicy *NodeInclusionPolicy
|
||||
// NodeTaintsPolicy indicates how we will treat node taints when calculating
|
||||
@ -6692,7 +6815,6 @@ type TopologySpreadConstraint struct {
|
||||
// - Ignore: node taints are ignored. All nodes are included.
|
||||
//
|
||||
// If this value is nil, the behavior is equivalent to the Ignore policy.
|
||||
// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
// +optional
|
||||
NodeTaintsPolicy *NodeInclusionPolicy
|
||||
// MatchLabelKeys is a set of pod label keys to select the pods over which
|
||||
|
8
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go
generated
vendored
8
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go
generated
vendored
@ -199,7 +199,9 @@ func Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus(in *apps.Re
|
||||
}
|
||||
|
||||
func Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *core.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error {
|
||||
out.Replicas = &in.Replicas
|
||||
if err := autoConvert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in, out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.MinReadySeconds = in.MinReadySeconds
|
||||
out.Selector = in.Selector
|
||||
if in.Template != nil {
|
||||
@ -214,8 +216,8 @@ func Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *
|
||||
}
|
||||
|
||||
func Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *core.ReplicationControllerSpec, s conversion.Scope) error {
|
||||
if in.Replicas != nil {
|
||||
out.Replicas = *in.Replicas
|
||||
if err := autoConvert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in, out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.MinReadySeconds = in.MinReadySeconds
|
||||
out.Selector = in.Selector
|
||||
|
77
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go
generated
vendored
77
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
resourcehelper "k8s.io/component-helpers/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1/service"
|
||||
corev1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/parsers"
|
||||
)
|
||||
@ -60,10 +61,7 @@ func SetDefaults_ReplicationController(obj *v1.ReplicationController) {
|
||||
obj.Labels = labels
|
||||
}
|
||||
}
|
||||
if obj.Spec.Replicas == nil {
|
||||
obj.Spec.Replicas = new(int32)
|
||||
*obj.Spec.Replicas = 1
|
||||
}
|
||||
// obj.Spec.Replicas is defaulted declaratively
|
||||
}
|
||||
func SetDefaults_Volume(obj *v1.Volume) {
|
||||
if ptr.AllPtrFieldsNil(&obj.VolumeSource) {
|
||||
@ -182,29 +180,6 @@ func SetDefaults_Pod(obj *v1.Pod) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) &&
|
||||
obj.Spec.Containers[i].Resources.Requests != nil {
|
||||
// For normal containers, set resize restart policy to default value (NotRequired), if not specified.
|
||||
resizePolicySpecified := make(map[v1.ResourceName]bool)
|
||||
for _, p := range obj.Spec.Containers[i].ResizePolicy {
|
||||
resizePolicySpecified[p.ResourceName] = true
|
||||
}
|
||||
setDefaultResizePolicy := func(resourceName v1.ResourceName) {
|
||||
if _, found := resizePolicySpecified[resourceName]; !found {
|
||||
obj.Spec.Containers[i].ResizePolicy = append(obj.Spec.Containers[i].ResizePolicy,
|
||||
v1.ContainerResizePolicy{
|
||||
ResourceName: resourceName,
|
||||
RestartPolicy: v1.NotRequired,
|
||||
})
|
||||
}
|
||||
}
|
||||
if _, exists := obj.Spec.Containers[i].Resources.Requests[v1.ResourceCPU]; exists {
|
||||
setDefaultResizePolicy(v1.ResourceCPU)
|
||||
}
|
||||
if _, exists := obj.Spec.Containers[i].Resources.Requests[v1.ResourceMemory]; exists {
|
||||
setDefaultResizePolicy(v1.ResourceMemory)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range obj.Spec.InitContainers {
|
||||
if obj.Spec.InitContainers[i].Resources.Limits != nil {
|
||||
@ -222,6 +197,7 @@ func SetDefaults_Pod(obj *v1.Pod) {
|
||||
// Pod Requests default values must be applied after container-level default values
|
||||
// have been populated.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) {
|
||||
defaultHugePagePodLimits(obj)
|
||||
defaultPodRequests(obj)
|
||||
}
|
||||
|
||||
@ -479,7 +455,9 @@ func defaultPodRequests(obj *v1.Pod) {
|
||||
// PodLevelResources feature) and pod-level requests are not set, the pod-level requests
|
||||
// default to the effective requests of all the containers for that resource.
|
||||
for key, aggrCtrLim := range aggrCtrReqs {
|
||||
if _, exists := podReqs[key]; !exists && resourcehelper.IsSupportedPodLevelResource(key) {
|
||||
// Defaulting for pod level hugepages requests takes them directly from the pod limit,
|
||||
// hugepages cannot be overcommited and must have the limit, so we skip them here.
|
||||
if _, exists := podReqs[key]; !exists && resourcehelper.IsSupportedPodLevelResource(key) && !corev1helper.IsHugePageResourceName(key) {
|
||||
podReqs[key] = aggrCtrLim.DeepCopy()
|
||||
}
|
||||
}
|
||||
@ -487,6 +465,8 @@ func defaultPodRequests(obj *v1.Pod) {
|
||||
// When no containers specify requests for a resource, the pod-level requests
|
||||
// will default to match the pod-level limits, if pod-level
|
||||
// limits exist for that resource.
|
||||
// Defaulting for pod level hugepages requests is dependent on defaultHugePagePodLimits,
|
||||
// if defaultHugePagePodLimits defined the limit, the request will be set here.
|
||||
for key, podLim := range obj.Spec.Resources.Limits {
|
||||
if _, exists := podReqs[key]; !exists && resourcehelper.IsSupportedPodLevelResource(key) {
|
||||
podReqs[key] = podLim.DeepCopy()
|
||||
@ -499,3 +479,44 @@ func defaultPodRequests(obj *v1.Pod) {
|
||||
obj.Spec.Resources.Requests = podReqs
|
||||
}
|
||||
}
|
||||
|
||||
// defaultHugePagePodLimits applies default values for pod-level limits, only when
|
||||
// container hugepage limits are set, but not at pod level, in following
|
||||
// scenario:
|
||||
// 1. When at least one container (regular, init or sidecar) has hugepage
|
||||
// limits set:
|
||||
// The pod-level limit becomes equal to the aggregated hugepages limit of all
|
||||
// the containers in the pod.
|
||||
func defaultHugePagePodLimits(obj *v1.Pod) {
|
||||
// We only populate defaults when the pod-level resources are partly specified already.
|
||||
if obj.Spec.Resources == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(obj.Spec.Resources.Limits) == 0 && len(obj.Spec.Resources.Requests) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var podLims v1.ResourceList
|
||||
podLims = obj.Spec.Resources.Limits
|
||||
if podLims == nil {
|
||||
podLims = make(v1.ResourceList)
|
||||
}
|
||||
|
||||
aggrCtrLims := resourcehelper.AggregateContainerLimits(obj, resourcehelper.PodResourcesOptions{})
|
||||
|
||||
// When containers specify limits for hugepages and pod-level limits are not
|
||||
// set for that resource, the pod-level limit will default to the aggregated
|
||||
// hugepages limit of all the containers.
|
||||
for key, aggrCtrLim := range aggrCtrLims {
|
||||
if _, exists := podLims[key]; !exists && resourcehelper.IsSupportedPodLevelResource(key) && corev1helper.IsHugePageResourceName(key) {
|
||||
podLims[key] = aggrCtrLim.DeepCopy()
|
||||
}
|
||||
}
|
||||
|
||||
// Only set pod-level resource limits in the PodSpec if the requirements map
|
||||
// contains entries after collecting container-level limits and pod-level limits for hugepages.
|
||||
if len(podLims) > 0 {
|
||||
obj.Spec.Resources.Limits = podLims
|
||||
}
|
||||
}
|
||||
|
4
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go
generated
vendored
4
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go
generated
vendored
@ -18,6 +18,8 @@ limitations under the License.
|
||||
// +k8s:conversion-gen-external-types=k8s.io/api/core/v1
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +k8s:defaulter-gen-input=k8s.io/api/core/v1
|
||||
// +k8s:validation-gen=TypeMeta
|
||||
// +k8s:validation-gen-input=k8s.io/api/core/v1
|
||||
|
||||
// Package v1 is the v1 version of the API.
|
||||
package v1 // import "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
package v1
|
||||
|
48
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go
generated
vendored
48
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go
generated
vendored
@ -1162,6 +1162,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*corev1.NodeSwapStatus)(nil), (*core.NodeSwapStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_NodeSwapStatus_To_core_NodeSwapStatus(a.(*corev1.NodeSwapStatus), b.(*core.NodeSwapStatus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.NodeSwapStatus)(nil), (*corev1.NodeSwapStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_NodeSwapStatus_To_v1_NodeSwapStatus(a.(*core.NodeSwapStatus), b.(*corev1.NodeSwapStatus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*corev1.NodeSystemInfo)(nil), (*core.NodeSystemInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo(a.(*corev1.NodeSystemInfo), b.(*core.NodeSystemInfo), scope)
|
||||
}); err != nil {
|
||||
@ -3405,6 +3415,7 @@ func autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in *corev1.Container
|
||||
out.VolumeMounts = *(*[]core.VolumeMountStatus)(unsafe.Pointer(&in.VolumeMounts))
|
||||
out.User = (*core.ContainerUser)(unsafe.Pointer(in.User))
|
||||
out.AllocatedResourcesStatus = *(*[]core.ResourceStatus)(unsafe.Pointer(&in.AllocatedResourcesStatus))
|
||||
out.StopSignal = (*core.Signal)(unsafe.Pointer(in.StopSignal))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -3432,6 +3443,7 @@ func autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerSt
|
||||
out.VolumeMounts = *(*[]corev1.VolumeMountStatus)(unsafe.Pointer(&in.VolumeMounts))
|
||||
out.User = (*corev1.ContainerUser)(unsafe.Pointer(in.User))
|
||||
out.AllocatedResourcesStatus = *(*[]corev1.ResourceStatus)(unsafe.Pointer(&in.AllocatedResourcesStatus))
|
||||
out.StopSignal = (*corev1.Signal)(unsafe.Pointer(in.StopSignal))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -4493,6 +4505,7 @@ func Convert_core_KeyToPath_To_v1_KeyToPath(in *core.KeyToPath, out *corev1.KeyT
|
||||
func autoConvert_v1_Lifecycle_To_core_Lifecycle(in *corev1.Lifecycle, out *core.Lifecycle, s conversion.Scope) error {
|
||||
out.PostStart = (*core.LifecycleHandler)(unsafe.Pointer(in.PostStart))
|
||||
out.PreStop = (*core.LifecycleHandler)(unsafe.Pointer(in.PreStop))
|
||||
out.StopSignal = (*core.Signal)(unsafe.Pointer(in.StopSignal))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -4504,6 +4517,7 @@ func Convert_v1_Lifecycle_To_core_Lifecycle(in *corev1.Lifecycle, out *core.Life
|
||||
func autoConvert_core_Lifecycle_To_v1_Lifecycle(in *core.Lifecycle, out *corev1.Lifecycle, s conversion.Scope) error {
|
||||
out.PostStart = (*corev1.LifecycleHandler)(unsafe.Pointer(in.PostStart))
|
||||
out.PreStop = (*corev1.LifecycleHandler)(unsafe.Pointer(in.PreStop))
|
||||
out.StopSignal = (*corev1.Signal)(unsafe.Pointer(in.StopSignal))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -5419,6 +5433,26 @@ func Convert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *corev1.N
|
||||
return autoConvert_core_NodeStatus_To_v1_NodeStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_NodeSwapStatus_To_core_NodeSwapStatus(in *corev1.NodeSwapStatus, out *core.NodeSwapStatus, s conversion.Scope) error {
|
||||
out.Capacity = (*int64)(unsafe.Pointer(in.Capacity))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_NodeSwapStatus_To_core_NodeSwapStatus is an autogenerated conversion function.
|
||||
func Convert_v1_NodeSwapStatus_To_core_NodeSwapStatus(in *corev1.NodeSwapStatus, out *core.NodeSwapStatus, s conversion.Scope) error {
|
||||
return autoConvert_v1_NodeSwapStatus_To_core_NodeSwapStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_NodeSwapStatus_To_v1_NodeSwapStatus(in *core.NodeSwapStatus, out *corev1.NodeSwapStatus, s conversion.Scope) error {
|
||||
out.Capacity = (*int64)(unsafe.Pointer(in.Capacity))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_NodeSwapStatus_To_v1_NodeSwapStatus is an autogenerated conversion function.
|
||||
func Convert_core_NodeSwapStatus_To_v1_NodeSwapStatus(in *core.NodeSwapStatus, out *corev1.NodeSwapStatus, s conversion.Scope) error {
|
||||
return autoConvert_core_NodeSwapStatus_To_v1_NodeSwapStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in *corev1.NodeSystemInfo, out *core.NodeSystemInfo, s conversion.Scope) error {
|
||||
out.MachineID = in.MachineID
|
||||
out.SystemUUID = in.SystemUUID
|
||||
@ -5430,6 +5464,7 @@ func autoConvert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in *corev1.NodeSystemI
|
||||
out.KubeProxyVersion = in.KubeProxyVersion
|
||||
out.OperatingSystem = in.OperatingSystem
|
||||
out.Architecture = in.Architecture
|
||||
out.Swap = (*core.NodeSwapStatus)(unsafe.Pointer(in.Swap))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -5449,6 +5484,7 @@ func autoConvert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in *core.NodeSystemInf
|
||||
out.KubeProxyVersion = in.KubeProxyVersion
|
||||
out.OperatingSystem = in.OperatingSystem
|
||||
out.Architecture = in.Architecture
|
||||
out.Swap = (*corev1.NodeSwapStatus)(unsafe.Pointer(in.Swap))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -6105,6 +6141,7 @@ func Convert_url_Values_To_v1_PodAttachOptions(in *url.Values, out *corev1.PodAt
|
||||
|
||||
func autoConvert_v1_PodCondition_To_core_PodCondition(in *corev1.PodCondition, out *core.PodCondition, s conversion.Scope) error {
|
||||
out.Type = core.PodConditionType(in.Type)
|
||||
out.ObservedGeneration = in.ObservedGeneration
|
||||
out.Status = core.ConditionStatus(in.Status)
|
||||
out.LastProbeTime = in.LastProbeTime
|
||||
out.LastTransitionTime = in.LastTransitionTime
|
||||
@ -6120,6 +6157,7 @@ func Convert_v1_PodCondition_To_core_PodCondition(in *corev1.PodCondition, out *
|
||||
|
||||
func autoConvert_core_PodCondition_To_v1_PodCondition(in *core.PodCondition, out *corev1.PodCondition, s conversion.Scope) error {
|
||||
out.Type = corev1.PodConditionType(in.Type)
|
||||
out.ObservedGeneration = in.ObservedGeneration
|
||||
out.Status = corev1.ConditionStatus(in.Status)
|
||||
out.LastProbeTime = in.LastProbeTime
|
||||
out.LastTransitionTime = in.LastTransitionTime
|
||||
@ -6811,6 +6849,7 @@ func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *corev1.PodSpe
|
||||
}
|
||||
|
||||
func autoConvert_v1_PodStatus_To_core_PodStatus(in *corev1.PodStatus, out *core.PodStatus, s conversion.Scope) error {
|
||||
out.ObservedGeneration = in.ObservedGeneration
|
||||
out.Phase = core.PodPhase(in.Phase)
|
||||
out.Conditions = *(*[]core.PodCondition)(unsafe.Pointer(&in.Conditions))
|
||||
out.Message = in.Message
|
||||
@ -6831,6 +6870,7 @@ func autoConvert_v1_PodStatus_To_core_PodStatus(in *corev1.PodStatus, out *core.
|
||||
}
|
||||
|
||||
func autoConvert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *corev1.PodStatus, s conversion.Scope) error {
|
||||
out.ObservedGeneration = in.ObservedGeneration
|
||||
out.Phase = corev1.PodPhase(in.Phase)
|
||||
out.Conditions = *(*[]corev1.PodCondition)(unsafe.Pointer(&in.Conditions))
|
||||
out.Message = in.Message
|
||||
@ -7412,9 +7452,7 @@ func Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in *
|
||||
}
|
||||
|
||||
func autoConvert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in *corev1.ReplicationControllerSpec, out *core.ReplicationControllerSpec, s conversion.Scope) error {
|
||||
if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Replicas = (*int32)(unsafe.Pointer(in.Replicas))
|
||||
out.MinReadySeconds = in.MinReadySeconds
|
||||
out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector))
|
||||
if in.Template != nil {
|
||||
@ -7430,9 +7468,7 @@ func autoConvert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(
|
||||
}
|
||||
|
||||
func autoConvert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *core.ReplicationControllerSpec, out *corev1.ReplicationControllerSpec, s conversion.Scope) error {
|
||||
if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Replicas = (*int32)(unsafe.Pointer(in.Replicas))
|
||||
out.MinReadySeconds = in.MinReadySeconds
|
||||
out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector))
|
||||
if in.Template != nil {
|
||||
|
4
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go
generated
vendored
4
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go
generated
vendored
@ -878,6 +878,10 @@ func SetObjectDefaults_PodTemplateList(in *corev1.PodTemplateList) {
|
||||
|
||||
func SetObjectDefaults_ReplicationController(in *corev1.ReplicationController) {
|
||||
SetDefaults_ReplicationController(in)
|
||||
if in.Spec.Replicas == nil {
|
||||
var ptrVar1 int32 = 1
|
||||
in.Spec.Replicas = &ptrVar1
|
||||
}
|
||||
if in.Spec.Template != nil {
|
||||
SetDefaults_PodSpec(&in.Spec.Template.Spec)
|
||||
for i := range in.Spec.Template.Spec.Volumes {
|
||||
|
112
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.validations.go
generated
vendored
Normal file
112
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.validations.go
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by validation-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
operation "k8s.io/apimachinery/pkg/api/operation"
|
||||
safe "k8s.io/apimachinery/pkg/api/safe"
|
||||
validate "k8s.io/apimachinery/pkg/api/validate"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
field "k8s.io/apimachinery/pkg/util/validation/field"
|
||||
)
|
||||
|
||||
func init() { localSchemeBuilder.Register(RegisterValidations) }
|
||||
|
||||
// RegisterValidations adds validation functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
func RegisterValidations(scheme *runtime.Scheme) error {
|
||||
scheme.AddValidationFunc((*corev1.ReplicationController)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}, subresources ...string) field.ErrorList {
|
||||
if len(subresources) == 0 {
|
||||
return Validate_ReplicationController(ctx, op, nil /* fldPath */, obj.(*corev1.ReplicationController), safe.Cast[*corev1.ReplicationController](oldObj))
|
||||
}
|
||||
if len(subresources) == 1 && subresources[0] == "status" {
|
||||
return nil // corev1.ReplicationControllerStatus has no validation
|
||||
}
|
||||
return field.ErrorList{field.InternalError(nil, fmt.Errorf("no validation found for %T, subresources: %v", obj, subresources))}
|
||||
})
|
||||
scheme.AddValidationFunc((*corev1.ReplicationControllerList)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}, subresources ...string) field.ErrorList {
|
||||
if len(subresources) == 0 {
|
||||
return Validate_ReplicationControllerList(ctx, op, nil /* fldPath */, obj.(*corev1.ReplicationControllerList), safe.Cast[*corev1.ReplicationControllerList](oldObj))
|
||||
}
|
||||
return field.ErrorList{field.InternalError(nil, fmt.Errorf("no validation found for %T, subresources: %v", obj, subresources))}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func Validate_ReplicationController(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *corev1.ReplicationController) (errs field.ErrorList) {
|
||||
// field corev1.ReplicationController.TypeMeta has no validation
|
||||
// field corev1.ReplicationController.ObjectMeta has no validation
|
||||
|
||||
// field corev1.ReplicationController.Spec
|
||||
errs = append(errs,
|
||||
func(fldPath *field.Path, obj, oldObj *corev1.ReplicationControllerSpec) (errs field.ErrorList) {
|
||||
errs = append(errs, Validate_ReplicationControllerSpec(ctx, op, fldPath, obj, oldObj)...)
|
||||
return
|
||||
}(fldPath.Child("spec"), &obj.Spec, safe.Field(oldObj, func(oldObj *corev1.ReplicationController) *corev1.ReplicationControllerSpec { return &oldObj.Spec }))...)
|
||||
|
||||
// field corev1.ReplicationController.Status has no validation
|
||||
return errs
|
||||
}
|
||||
|
||||
func Validate_ReplicationControllerList(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *corev1.ReplicationControllerList) (errs field.ErrorList) {
|
||||
// field corev1.ReplicationControllerList.TypeMeta has no validation
|
||||
// field corev1.ReplicationControllerList.ListMeta has no validation
|
||||
|
||||
// field corev1.ReplicationControllerList.Items
|
||||
errs = append(errs,
|
||||
func(fldPath *field.Path, obj, oldObj []corev1.ReplicationController) (errs field.ErrorList) {
|
||||
errs = append(errs, validate.EachSliceVal(ctx, op, fldPath, obj, oldObj, nil, Validate_ReplicationController)...)
|
||||
return
|
||||
}(fldPath.Child("items"), obj.Items, safe.Field(oldObj, func(oldObj *corev1.ReplicationControllerList) []corev1.ReplicationController { return oldObj.Items }))...)
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func Validate_ReplicationControllerSpec(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *corev1.ReplicationControllerSpec) (errs field.ErrorList) {
|
||||
// field corev1.ReplicationControllerSpec.Replicas
|
||||
errs = append(errs,
|
||||
func(fldPath *field.Path, obj, oldObj *int32) (errs field.ErrorList) {
|
||||
// optional fields with default values are effectively required
|
||||
if e := validate.RequiredPointer(ctx, op, fldPath, obj, oldObj); len(e) != 0 {
|
||||
errs = append(errs, e...)
|
||||
return // do not proceed
|
||||
}
|
||||
errs = append(errs, validate.Minimum(ctx, op, fldPath, obj, oldObj, 0)...)
|
||||
return
|
||||
}(fldPath.Child("replicas"), obj.Replicas, safe.Field(oldObj, func(oldObj *corev1.ReplicationControllerSpec) *int32 { return oldObj.Replicas }))...)
|
||||
|
||||
// field corev1.ReplicationControllerSpec.MinReadySeconds
|
||||
errs = append(errs,
|
||||
func(fldPath *field.Path, obj, oldObj *int32) (errs field.ErrorList) {
|
||||
// optional value-type fields with zero-value defaults are purely documentation
|
||||
errs = append(errs, validate.Minimum(ctx, op, fldPath, obj, oldObj, 0)...)
|
||||
return
|
||||
}(fldPath.Child("minReadySeconds"), &obj.MinReadySeconds, safe.Field(oldObj, func(oldObj *corev1.ReplicationControllerSpec) *int32 { return &oldObj.MinReadySeconds }))...)
|
||||
|
||||
// field corev1.ReplicationControllerSpec.Selector has no validation
|
||||
// field corev1.ReplicationControllerSpec.Template has no validation
|
||||
return errs
|
||||
}
|
2
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/validation/doc.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/validation/doc.go
generated
vendored
@ -16,4 +16,4 @@ limitations under the License.
|
||||
|
||||
// Package validation has functions for validating the correctness of api
|
||||
// objects and explaining what is wrong with them when they aren't valid.
|
||||
package validation // import "k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
package validation
|
||||
|
628
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
generated
vendored
628
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
generated
vendored
File diff suppressed because it is too large
Load Diff
43
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go
generated
vendored
43
e2e/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go
generated
vendored
@ -1055,6 +1055,11 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.StopSignal != nil {
|
||||
in, out := &in.StopSignal, &out.StopSignal
|
||||
*out = new(Signal)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -2101,6 +2106,11 @@ func (in *Lifecycle) DeepCopyInto(out *Lifecycle) {
|
||||
*out = new(LifecycleHandler)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.StopSignal != nil {
|
||||
in, out := &in.StopSignal, &out.StopSignal
|
||||
*out = new(Signal)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -3004,7 +3014,7 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
|
||||
copy(*out, *in)
|
||||
}
|
||||
out.DaemonEndpoints = in.DaemonEndpoints
|
||||
out.NodeInfo = in.NodeInfo
|
||||
in.NodeInfo.DeepCopyInto(&out.NodeInfo)
|
||||
if in.Images != nil {
|
||||
in, out := &in.Images, &out.Images
|
||||
*out = make([]ContainerImage, len(*in))
|
||||
@ -3052,9 +3062,35 @@ func (in *NodeStatus) DeepCopy() *NodeStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeSwapStatus) DeepCopyInto(out *NodeSwapStatus) {
|
||||
*out = *in
|
||||
if in.Capacity != nil {
|
||||
in, out := &in.Capacity, &out.Capacity
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSwapStatus.
|
||||
func (in *NodeSwapStatus) DeepCopy() *NodeSwapStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeSwapStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) {
|
||||
*out = *in
|
||||
if in.Swap != nil {
|
||||
in, out := &in.Swap, &out.Swap
|
||||
*out = new(NodeSwapStatus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -4920,6 +4956,11 @@ func (in *ReplicationControllerList) DeepCopyObject() runtime.Object {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec) {
|
||||
*out = *in
|
||||
if in.Replicas != nil {
|
||||
in, out := &in.Replicas, &out.Replicas
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = make(map[string]string, len(*in))
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go
generated
vendored
@ -16,4 +16,4 @@ limitations under the License.
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
package extensions // import "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
package extensions
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go
generated
vendored
@ -17,4 +17,4 @@ limitations under the License.
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=networking.k8s.io
|
||||
|
||||
package networking // import "k8s.io/kubernetes/pkg/apis/networking"
|
||||
package networking
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go
generated
vendored
@ -17,4 +17,4 @@ limitations under the License.
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=scheduling.k8s.io
|
||||
|
||||
package scheduling // import "k8s.io/kubernetes/pkg/apis/scheduling"
|
||||
package scheduling
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go
generated
vendored
@ -15,4 +15,4 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
// Package capabilities manages system level capabilities
|
||||
package capabilities // import "k8s.io/kubernetes/pkg/capabilities"
|
||||
package capabilities
|
||||
|
54
e2e/vendor/k8s.io/kubernetes/pkg/cluster/ports/ports.go
generated
vendored
54
e2e/vendor/k8s.io/kubernetes/pkg/cluster/ports/ports.go
generated
vendored
@ -1,54 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ports
|
||||
|
||||
import (
|
||||
cpoptions "k8s.io/cloud-provider/options"
|
||||
)
|
||||
|
||||
// In this file, we can see all default port of cluster.
|
||||
// It's also an important documentation for us. So don't remove them easily.
|
||||
const (
|
||||
// ProxyStatusPort is the default port for the proxy metrics server.
|
||||
// May be overridden by a flag at startup.
|
||||
ProxyStatusPort = 10249
|
||||
// KubeletPort is the default port for the kubelet server on each host machine.
|
||||
// May be overridden by a flag at startup.
|
||||
KubeletPort = 10250
|
||||
// KubeletReadOnlyPort exposes basic read-only services from the kubelet.
|
||||
// May be overridden by a flag at startup.
|
||||
// This is necessary for heapster to collect monitoring stats from the kubelet
|
||||
// until heapster can transition to using the SSL endpoint.
|
||||
// TODO(roberthbailey): Remove this once we have a better solution for heapster.
|
||||
KubeletReadOnlyPort = 10255
|
||||
// KubeletHealthzPort exposes a healthz endpoint from the kubelet.
|
||||
// May be overridden by a flag at startup.
|
||||
KubeletHealthzPort = 10248
|
||||
// ProxyHealthzPort is the default port for the proxy healthz server.
|
||||
// May be overridden by a flag at startup.
|
||||
ProxyHealthzPort = 10256
|
||||
// KubeControllerManagerPort is the default port for the controller manager status server.
|
||||
// May be overridden by a flag at startup.
|
||||
KubeControllerManagerPort = 10257
|
||||
// CloudControllerManagerPort is the default port for the cloud controller manager server.
|
||||
// This value may be overridden by a flag at startup.
|
||||
CloudControllerManagerPort = 10258
|
||||
// CloudControllerManagerWebhookPort is the default port for the cloud
|
||||
// controller manager webhook server. May be overridden by a flag at
|
||||
// startup.
|
||||
CloudControllerManagerWebhookPort = cpoptions.CloudControllerManagerWebhookPort
|
||||
)
|
73
e2e/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
73
e2e/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
@ -83,6 +83,16 @@ const (
|
||||
// The number of batches is given by:
|
||||
// 1+floor(log_2(ceil(N/SlowStartInitialBatchSize)))
|
||||
SlowStartInitialBatchSize = 1
|
||||
|
||||
// PodNodeNameKeyIndex is the name of the index used by PodInformer to index pods by their node name.
|
||||
PodNodeNameKeyIndex = "spec.nodeName"
|
||||
|
||||
// OrphanPodIndexKey is used to index all Orphan pods to this key
|
||||
OrphanPodIndexKey = "_ORPHAN_POD"
|
||||
|
||||
// podControllerUIDIndex is the name for the Pod store's index function,
|
||||
// which is to index by pods's controllerUID.
|
||||
PodControllerUIDIndex = "podControllerUID"
|
||||
)
|
||||
|
||||
var UpdateTaintBackoff = wait.Backoff{
|
||||
@ -973,14 +983,27 @@ func compareMaxContainerRestarts(pi *v1.Pod, pj *v1.Pod) *bool {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FilterClaimedPods returns pods that are controlled by the controller and match the selector.
|
||||
func FilterClaimedPods(controller metav1.Object, selector labels.Selector, pods []*v1.Pod) []*v1.Pod {
|
||||
var result []*v1.Pod
|
||||
for _, pod := range pods {
|
||||
if !metav1.IsControlledBy(pod, controller) {
|
||||
// It's an orphan or owned by someone else.
|
||||
continue
|
||||
}
|
||||
if selector.Matches(labels.Set(pod.Labels)) {
|
||||
result = append(result, pod)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// FilterActivePods returns pods that have not terminated.
|
||||
func FilterActivePods(logger klog.Logger, pods []*v1.Pod) []*v1.Pod {
|
||||
var result []*v1.Pod
|
||||
for _, p := range pods {
|
||||
if IsPodActive(p) {
|
||||
result = append(result, p)
|
||||
} else {
|
||||
logger.V(4).Info("Ignoring inactive pod", "pod", klog.KObj(p), "phase", p.Status.Phase, "deletionTime", klog.SafePtr(p.DeletionTimestamp))
|
||||
}
|
||||
}
|
||||
return result
|
||||
@ -1038,6 +1061,52 @@ func FilterReplicaSets(RSes []*apps.ReplicaSet, filterFn filterRS) []*apps.Repli
|
||||
return filtered
|
||||
}
|
||||
|
||||
// AddPodNodeNameIndexer adds an indexer for Pod's nodeName to the given PodInformer.
|
||||
// This indexer is used to efficiently look up pods by their node name.
|
||||
func AddPodNodeNameIndexer(podInformer cache.SharedIndexInformer) error {
|
||||
if _, exists := podInformer.GetIndexer().GetIndexers()[PodNodeNameKeyIndex]; exists {
|
||||
// indexer already exists, do nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
return podInformer.AddIndexers(cache.Indexers{
|
||||
PodNodeNameKeyIndex: func(obj interface{}) ([]string, error) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
return []string{}, nil
|
||||
}
|
||||
if len(pod.Spec.NodeName) == 0 {
|
||||
return []string{}, nil
|
||||
}
|
||||
return []string{pod.Spec.NodeName}, nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// AddPodControllerUIDIndexer adds an indexer for Pod's controllerRef.UID to the given PodInformer.
|
||||
// This indexer is used to efficiently look up pods by their ControllerRef.UID
|
||||
func AddPodControllerUIDIndexer(podInformer cache.SharedIndexInformer) error {
|
||||
if _, exists := podInformer.GetIndexer().GetIndexers()[PodControllerUIDIndex]; exists {
|
||||
// indexer already exists, do nothing
|
||||
return nil
|
||||
}
|
||||
return podInformer.AddIndexers(cache.Indexers{
|
||||
PodControllerUIDIndex: func(obj interface{}) ([]string, error) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
// Get the ControllerRef of the Pod to check if it's managed by a controller
|
||||
if ref := metav1.GetControllerOf(pod); ref != nil {
|
||||
return []string{string(ref.UID)}, nil
|
||||
}
|
||||
// If the Pod has no controller (i.e., it's orphaned), index it with the OrphanPodIndexKey
|
||||
// This helps identify orphan pods for reconciliation and adoption by controllers
|
||||
return []string{OrphanPodIndexKey}, nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// PodKey returns a key unique to the given pod within a cluster.
|
||||
// It's used so we consistently use the same key scheme in this module.
|
||||
// It does exactly what cache.MetaNamespaceKeyFunc would have done
|
||||
|
20
e2e/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
20
e2e/vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
@ -714,6 +714,26 @@ func GetAvailableReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) int3
|
||||
return totalAvailableReplicas
|
||||
}
|
||||
|
||||
// GetTerminatingReplicaCountForReplicaSets returns the number of terminating pods for all replica sets
|
||||
// or returns an error if any replica sets have been synced by the controller but do not report their terminating count.
|
||||
func GetTerminatingReplicaCountForReplicaSets(replicaSets []*apps.ReplicaSet) *int32 {
|
||||
terminatingReplicas := int32(0)
|
||||
for _, rs := range replicaSets {
|
||||
switch {
|
||||
case rs == nil:
|
||||
// No-op
|
||||
case rs.Status.ObservedGeneration == 0 && rs.Status.TerminatingReplicas == nil:
|
||||
// Replicasets that have never been synced by the controller don't contribute to TerminatingReplicas
|
||||
case rs.Status.TerminatingReplicas == nil:
|
||||
// If any replicaset synced by the controller hasn't reported TerminatingReplicas, we cannot calculate a sum
|
||||
return nil
|
||||
default:
|
||||
terminatingReplicas += *rs.Status.TerminatingReplicas
|
||||
}
|
||||
}
|
||||
return &terminatingReplicas
|
||||
}
|
||||
|
||||
// IsRollingUpdate returns true if the strategy type is a rolling update.
|
||||
func IsRollingUpdate(deployment *apps.Deployment) bool {
|
||||
return deployment.Spec.Strategy.Type == apps.RollingUpdateDeploymentStrategyType
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/controller/doc.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/controller/doc.go
generated
vendored
@ -16,4 +16,4 @@ limitations under the License.
|
||||
|
||||
// Package controller contains code for controllers (like the replication
|
||||
// controller).
|
||||
package controller // import "k8s.io/kubernetes/pkg/controller"
|
||||
package controller
|
||||
|
8
e2e/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS
generated
vendored
Normal file
8
e2e/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- sig-auth-authenticators-approvers
|
||||
reviewers:
|
||||
- sig-auth-authenticators-reviewers
|
||||
labels:
|
||||
- sig/auth
|
319
e2e/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go
generated
vendored
Normal file
319
e2e/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go
generated
vendored
Normal file
@ -0,0 +1,319 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package credentialprovider
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
maxReadLength = 10 * 1 << 20 // 10MB
|
||||
)
|
||||
|
||||
// DockerConfigJSON represents ~/.docker/config.json file info
|
||||
// see https://github.com/docker/docker/pull/12009
|
||||
type DockerConfigJSON struct {
|
||||
Auths DockerConfig `json:"auths"`
|
||||
// +optional
|
||||
HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"`
|
||||
}
|
||||
|
||||
// DockerConfig represents the config file used by the docker CLI.
|
||||
// This config that represents the credentials that should be used
|
||||
// when pulling images from specific image repositories.
|
||||
type DockerConfig map[string]DockerConfigEntry
|
||||
|
||||
// DockerConfigEntry wraps a docker config as a entry
|
||||
type DockerConfigEntry struct {
|
||||
Username string
|
||||
Password string
|
||||
Email string
|
||||
Provider DockerConfigProvider
|
||||
}
|
||||
|
||||
var (
|
||||
preferredPathLock sync.Mutex
|
||||
preferredPath = ""
|
||||
workingDirPath = ""
|
||||
homeDirPath, _ = os.UserHomeDir()
|
||||
rootDirPath = "/"
|
||||
homeJSONDirPath = filepath.Join(homeDirPath, ".docker")
|
||||
rootJSONDirPath = filepath.Join(rootDirPath, ".docker")
|
||||
|
||||
configFileName = ".dockercfg"
|
||||
configJSONFileName = "config.json"
|
||||
)
|
||||
|
||||
// SetPreferredDockercfgPath set preferred docker config path
|
||||
func SetPreferredDockercfgPath(path string) {
|
||||
preferredPathLock.Lock()
|
||||
defer preferredPathLock.Unlock()
|
||||
preferredPath = path
|
||||
}
|
||||
|
||||
// GetPreferredDockercfgPath get preferred docker config path
|
||||
func GetPreferredDockercfgPath() string {
|
||||
preferredPathLock.Lock()
|
||||
defer preferredPathLock.Unlock()
|
||||
return preferredPath
|
||||
}
|
||||
|
||||
// DefaultDockercfgPaths returns default search paths of .dockercfg
|
||||
func DefaultDockercfgPaths() []string {
|
||||
return []string{GetPreferredDockercfgPath(), workingDirPath, homeDirPath, rootDirPath}
|
||||
}
|
||||
|
||||
// DefaultDockerConfigJSONPaths returns default search paths of .docker/config.json
|
||||
func DefaultDockerConfigJSONPaths() []string {
|
||||
return []string{GetPreferredDockercfgPath(), workingDirPath, homeJSONDirPath, rootJSONDirPath}
|
||||
}
|
||||
|
||||
// ReadDockercfgFile attempts to read a legacy dockercfg file from the given paths.
|
||||
// if searchPaths is empty, the default paths are used.
|
||||
func ReadDockercfgFile(searchPaths []string) (cfg DockerConfig, err error) {
|
||||
if len(searchPaths) == 0 {
|
||||
searchPaths = DefaultDockercfgPaths()
|
||||
}
|
||||
|
||||
for _, configPath := range searchPaths {
|
||||
absDockerConfigFileLocation, err := filepath.Abs(filepath.Join(configPath, configFileName))
|
||||
if err != nil {
|
||||
klog.Errorf("while trying to canonicalize %s: %v", configPath, err)
|
||||
continue
|
||||
}
|
||||
klog.V(4).Infof("looking for .dockercfg at %s", absDockerConfigFileLocation)
|
||||
contents, err := os.ReadFile(absDockerConfigFileLocation)
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
klog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err)
|
||||
continue
|
||||
}
|
||||
cfg, err := ReadDockerConfigFileFromBytes(contents)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("couldn't get the config from %q contents: %v", absDockerConfigFileLocation, err)
|
||||
continue
|
||||
}
|
||||
|
||||
klog.V(4).Infof("found .dockercfg at %s", absDockerConfigFileLocation)
|
||||
return cfg, nil
|
||||
|
||||
}
|
||||
return nil, fmt.Errorf("couldn't find valid .dockercfg after checking in %v", searchPaths)
|
||||
}
|
||||
|
||||
// ReadDockerConfigJSONFile attempts to read a docker config.json file from the given paths.
|
||||
// if searchPaths is empty, the default paths are used.
|
||||
func ReadDockerConfigJSONFile(searchPaths []string) (cfg DockerConfig, err error) {
|
||||
if len(searchPaths) == 0 {
|
||||
searchPaths = DefaultDockerConfigJSONPaths()
|
||||
}
|
||||
for _, configPath := range searchPaths {
|
||||
absDockerConfigFileLocation, err := filepath.Abs(filepath.Join(configPath, configJSONFileName))
|
||||
if err != nil {
|
||||
klog.Errorf("while trying to canonicalize %s: %v", configPath, err)
|
||||
continue
|
||||
}
|
||||
klog.V(4).Infof("looking for %s at %s", configJSONFileName, absDockerConfigFileLocation)
|
||||
cfg, err = ReadSpecificDockerConfigJSONFile(absDockerConfigFileLocation)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
klog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
klog.V(4).Infof("found valid %s at %s", configJSONFileName, absDockerConfigFileLocation)
|
||||
return cfg, nil
|
||||
}
|
||||
return nil, fmt.Errorf("couldn't find valid %s after checking in %v", configJSONFileName, searchPaths)
|
||||
|
||||
}
|
||||
|
||||
// ReadSpecificDockerConfigJSONFile attempts to read docker configJSON from a given file path.
|
||||
func ReadSpecificDockerConfigJSONFile(filePath string) (cfg DockerConfig, err error) {
|
||||
var contents []byte
|
||||
|
||||
if contents, err = os.ReadFile(filePath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readDockerConfigJSONFileFromBytes(contents)
|
||||
}
|
||||
|
||||
// ReadDockerConfigFile read a docker config file from default path
|
||||
func ReadDockerConfigFile() (cfg DockerConfig, err error) {
|
||||
if cfg, err := ReadDockerConfigJSONFile(nil); err == nil {
|
||||
return cfg, nil
|
||||
}
|
||||
// Can't find latest config file so check for the old one
|
||||
return ReadDockercfgFile(nil)
|
||||
}
|
||||
|
||||
// HTTPError wraps a non-StatusOK error code as an error.
|
||||
type HTTPError struct {
|
||||
StatusCode int
|
||||
URL string
|
||||
}
|
||||
|
||||
// Error implements error
|
||||
func (he *HTTPError) Error() string {
|
||||
return fmt.Sprintf("http status code: %d while fetching url %s",
|
||||
he.StatusCode, he.URL)
|
||||
}
|
||||
|
||||
// ReadURL read contents from given url
|
||||
func ReadURL(url string, client *http.Client, header *http.Header) (body []byte, err error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if header != nil {
|
||||
req.Header = *header
|
||||
}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
klog.V(2).InfoS("Failed to read URL", "statusCode", resp.StatusCode, "URL", url)
|
||||
return nil, &HTTPError{
|
||||
StatusCode: resp.StatusCode,
|
||||
URL: url,
|
||||
}
|
||||
}
|
||||
|
||||
limitedReader := &io.LimitedReader{R: resp.Body, N: maxReadLength}
|
||||
contents, err := io.ReadAll(limitedReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if limitedReader.N <= 0 {
|
||||
return nil, errors.New("the read limit is reached")
|
||||
}
|
||||
|
||||
return contents, nil
|
||||
}
|
||||
|
||||
// ReadDockerConfigFileFromBytes read a docker config file from the given bytes
|
||||
func ReadDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error) {
|
||||
if err = json.Unmarshal(contents, &cfg); err != nil {
|
||||
return nil, errors.New("error occurred while trying to unmarshal json")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func readDockerConfigJSONFileFromBytes(contents []byte) (cfg DockerConfig, err error) {
|
||||
var cfgJSON DockerConfigJSON
|
||||
if err = json.Unmarshal(contents, &cfgJSON); err != nil {
|
||||
return nil, errors.New("error occurred while trying to unmarshal json")
|
||||
}
|
||||
cfg = cfgJSON.Auths
|
||||
return
|
||||
}
|
||||
|
||||
// dockerConfigEntryWithAuth is used solely for deserializing the Auth field
|
||||
// into a dockerConfigEntry during JSON deserialization.
|
||||
type dockerConfigEntryWithAuth struct {
|
||||
// +optional
|
||||
Username string `json:"username,omitempty"`
|
||||
// +optional
|
||||
Password string `json:"password,omitempty"`
|
||||
// +optional
|
||||
Email string `json:"email,omitempty"`
|
||||
// +optional
|
||||
Auth string `json:"auth,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (ident *DockerConfigEntry) UnmarshalJSON(data []byte) error {
|
||||
var tmp dockerConfigEntryWithAuth
|
||||
err := json.Unmarshal(data, &tmp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ident.Username = tmp.Username
|
||||
ident.Password = tmp.Password
|
||||
ident.Email = tmp.Email
|
||||
|
||||
if len(tmp.Auth) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ident.Username, ident.Password, err = decodeDockerConfigFieldAuth(tmp.Auth)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (ident DockerConfigEntry) MarshalJSON() ([]byte, error) {
|
||||
toEncode := dockerConfigEntryWithAuth{ident.Username, ident.Password, ident.Email, ""}
|
||||
toEncode.Auth = encodeDockerConfigFieldAuth(ident.Username, ident.Password)
|
||||
|
||||
return json.Marshal(toEncode)
|
||||
}
|
||||
|
||||
// decodeDockerConfigFieldAuth deserializes the "auth" field from dockercfg into a
|
||||
// username and a password. The format of the auth field is base64(<username>:<password>).
|
||||
func decodeDockerConfigFieldAuth(field string) (username, password string, err error) {
|
||||
|
||||
var decoded []byte
|
||||
|
||||
// StdEncoding can only decode padded string
|
||||
// RawStdEncoding can only decode unpadded string
|
||||
if strings.HasSuffix(strings.TrimSpace(field), "=") {
|
||||
// decode padded data
|
||||
decoded, err = base64.StdEncoding.DecodeString(field)
|
||||
} else {
|
||||
// decode unpadded data
|
||||
decoded, err = base64.RawStdEncoding.DecodeString(field)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
parts := strings.SplitN(string(decoded), ":", 2)
|
||||
if len(parts) != 2 {
|
||||
err = fmt.Errorf("unable to parse auth field, must be formatted as base64(username:password)")
|
||||
return
|
||||
}
|
||||
|
||||
username = parts[0]
|
||||
password = parts[1]
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func encodeDockerConfigFieldAuth(username, password string) string {
|
||||
fieldValue := username + ":" + password
|
||||
|
||||
return base64.StdEncoding.EncodeToString([]byte(fieldValue))
|
||||
}
|
@ -14,6 +14,6 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package ports defines ports used by various pieces of the kubernetes
|
||||
// infrastructure.
|
||||
package ports // import "k8s.io/kubernetes/pkg/cluster/ports"
|
||||
// Package credentialprovider supplies interfaces and implementations for
|
||||
// docker registry providers to expose their authentication scheme.
|
||||
package credentialprovider
|
367
e2e/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go
generated
vendored
Normal file
367
e2e/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go
generated
vendored
Normal file
@ -0,0 +1,367 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package credentialprovider
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"net"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// DockerKeyring tracks a set of docker registry credentials, maintaining a
|
||||
// reverse index across the registry endpoints. A registry endpoint is made
|
||||
// up of a host (e.g. registry.example.com), but it may also contain a path
|
||||
// (e.g. registry.example.com/foo) This index is important for two reasons:
|
||||
// - registry endpoints may overlap, and when this happens we must find the
|
||||
// most specific match for a given image
|
||||
// - iterating a map does not yield predictable results
|
||||
type DockerKeyring interface {
|
||||
Lookup(image string) ([]TrackedAuthConfig, bool)
|
||||
}
|
||||
|
||||
// BasicDockerKeyring is a trivial map-backed implementation of DockerKeyring
|
||||
type BasicDockerKeyring struct {
|
||||
index []string
|
||||
creds map[string][]TrackedAuthConfig
|
||||
}
|
||||
|
||||
// providersDockerKeyring is an implementation of DockerKeyring that
|
||||
// materializes its dockercfg based on a set of dockerConfigProviders.
|
||||
type providersDockerKeyring struct {
|
||||
Providers []DockerConfigProvider
|
||||
}
|
||||
|
||||
// TrackedAuthConfig wraps the AuthConfig and adds information about the source
|
||||
// of the credentials.
|
||||
type TrackedAuthConfig struct {
|
||||
AuthConfig
|
||||
AuthConfigHash string
|
||||
|
||||
Source *CredentialSource
|
||||
}
|
||||
|
||||
// NewTrackedAuthConfig initializes the TrackedAuthConfig structure by adding
|
||||
// the source information to the supplied AuthConfig. It also counts a hash of the
|
||||
// AuthConfig and keeps it in the returned structure.
|
||||
//
|
||||
// The supplied CredentialSource is only used when the "KubeletEnsureSecretPulledImages"
|
||||
// is enabled, the same applies for counting the hash.
|
||||
func NewTrackedAuthConfig(c *AuthConfig, src *CredentialSource) *TrackedAuthConfig {
|
||||
if c == nil {
|
||||
panic("cannot construct TrackedAuthConfig with a nil AuthConfig")
|
||||
}
|
||||
|
||||
authConfig := &TrackedAuthConfig{
|
||||
AuthConfig: *c,
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletEnsureSecretPulledImages) {
|
||||
authConfig.Source = src
|
||||
authConfig.AuthConfigHash = hashAuthConfig(c)
|
||||
}
|
||||
return authConfig
|
||||
}
|
||||
|
||||
type CredentialSource struct {
|
||||
Secret SecretCoordinates
|
||||
}
|
||||
|
||||
type SecretCoordinates struct {
|
||||
UID string
|
||||
Namespace string
|
||||
Name string
|
||||
}
|
||||
|
||||
// AuthConfig contains authorization information for connecting to a Registry
|
||||
// This type mirrors "github.com/docker/docker/api/types.AuthConfig"
|
||||
type AuthConfig struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Auth string `json:"auth,omitempty"`
|
||||
|
||||
// Email is an optional value associated with the username.
|
||||
// This field is deprecated and will be removed in a later
|
||||
// version of docker.
|
||||
Email string `json:"email,omitempty"`
|
||||
|
||||
ServerAddress string `json:"serveraddress,omitempty"`
|
||||
|
||||
// IdentityToken is used to authenticate the user and get
|
||||
// an access token for the registry.
|
||||
IdentityToken string `json:"identitytoken,omitempty"`
|
||||
|
||||
// RegistryToken is a bearer token to be sent to a registry
|
||||
RegistryToken string `json:"registrytoken,omitempty"`
|
||||
}
|
||||
|
||||
// Add inserts the docker config `cfg` into the basic docker keyring. It attaches
|
||||
// the `src` information that describes where the docker config `cfg` comes from.
|
||||
// `src` is nil if the docker config is globally available on the node.
|
||||
func (dk *BasicDockerKeyring) Add(src *CredentialSource, cfg DockerConfig) {
|
||||
if dk.index == nil {
|
||||
dk.index = make([]string, 0)
|
||||
dk.creds = make(map[string][]TrackedAuthConfig)
|
||||
}
|
||||
for loc, ident := range cfg {
|
||||
creds := AuthConfig{
|
||||
Username: ident.Username,
|
||||
Password: ident.Password,
|
||||
Email: ident.Email,
|
||||
}
|
||||
|
||||
value := loc
|
||||
if !strings.HasPrefix(value, "https://") && !strings.HasPrefix(value, "http://") {
|
||||
value = "https://" + value
|
||||
}
|
||||
parsed, err := url.Parse(value)
|
||||
if err != nil {
|
||||
klog.Errorf("Entry %q in dockercfg invalid (%v), ignoring", loc, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// The docker client allows exact matches:
|
||||
// foo.bar.com/namespace
|
||||
// Or hostname matches:
|
||||
// foo.bar.com
|
||||
// It also considers /v2/ and /v1/ equivalent to the hostname
|
||||
// See ResolveAuthConfig in docker/registry/auth.go.
|
||||
effectivePath := parsed.Path
|
||||
if strings.HasPrefix(effectivePath, "/v2/") || strings.HasPrefix(effectivePath, "/v1/") {
|
||||
effectivePath = effectivePath[3:]
|
||||
}
|
||||
var key string
|
||||
if (len(effectivePath) > 0) && (effectivePath != "/") {
|
||||
key = parsed.Host + effectivePath
|
||||
} else {
|
||||
key = parsed.Host
|
||||
}
|
||||
trackedCreds := NewTrackedAuthConfig(&creds, src)
|
||||
|
||||
dk.creds[key] = append(dk.creds[key], *trackedCreds)
|
||||
dk.index = append(dk.index, key)
|
||||
}
|
||||
|
||||
eliminateDupes := sets.NewString(dk.index...)
|
||||
dk.index = eliminateDupes.List()
|
||||
|
||||
// Update the index used to identify which credentials to use for a given
|
||||
// image. The index is reverse-sorted so more specific paths are matched
|
||||
// first. For example, if for the given image "gcr.io/etcd-development/etcd",
|
||||
// credentials for "quay.io/coreos" should match before "quay.io".
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(dk.index)))
|
||||
}
|
||||
|
||||
const (
|
||||
defaultRegistryHost = "index.docker.io"
|
||||
defaultRegistryKey = defaultRegistryHost + "/v1/"
|
||||
)
|
||||
|
||||
// isDefaultRegistryMatch determines whether the given image will
|
||||
// pull from the default registry (DockerHub) based on the
|
||||
// characteristics of its name.
|
||||
func isDefaultRegistryMatch(image string) bool {
|
||||
parts := strings.SplitN(image, "/", 2)
|
||||
|
||||
if len(parts[0]) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(parts) == 1 {
|
||||
// e.g. library/ubuntu
|
||||
return true
|
||||
}
|
||||
|
||||
if parts[0] == "docker.io" || parts[0] == "index.docker.io" {
|
||||
// resolve docker.io/image and index.docker.io/image as default registry
|
||||
return true
|
||||
}
|
||||
|
||||
// From: http://blog.docker.com/2013/07/how-to-use-your-own-registry/
|
||||
// Docker looks for either a “.” (domain separator) or “:” (port separator)
|
||||
// to learn that the first part of the repository name is a location and not
|
||||
// a user name.
|
||||
return !strings.ContainsAny(parts[0], ".:")
|
||||
}
|
||||
|
||||
// ParseSchemelessURL parses a schemeless url and returns a url.URL
|
||||
// url.Parse require a scheme, but ours don't have schemes. Adding a
|
||||
// scheme to make url.Parse happy, then clear out the resulting scheme.
|
||||
func ParseSchemelessURL(schemelessURL string) (*url.URL, error) {
|
||||
parsed, err := url.Parse("https://" + schemelessURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// clear out the resulting scheme
|
||||
parsed.Scheme = ""
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
// SplitURL splits the host name into parts, as well as the port
|
||||
func SplitURL(url *url.URL) (parts []string, port string) {
|
||||
host, port, err := net.SplitHostPort(url.Host)
|
||||
if err != nil {
|
||||
// could not parse port
|
||||
host, port = url.Host, ""
|
||||
}
|
||||
return strings.Split(host, "."), port
|
||||
}
|
||||
|
||||
// URLsMatchStr is wrapper for URLsMatch, operating on strings instead of URLs.
|
||||
func URLsMatchStr(glob string, target string) (bool, error) {
|
||||
globURL, err := ParseSchemelessURL(glob)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
targetURL, err := ParseSchemelessURL(target)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return URLsMatch(globURL, targetURL)
|
||||
}
|
||||
|
||||
// URLsMatch checks whether the given target url matches the glob url, which may have
|
||||
// glob wild cards in the host name.
|
||||
//
|
||||
// Examples:
|
||||
//
|
||||
// globURL=*.docker.io, targetURL=blah.docker.io => match
|
||||
// globURL=*.docker.io, targetURL=not.right.io => no match
|
||||
//
|
||||
// Note that we don't support wildcards in ports and paths yet.
|
||||
func URLsMatch(globURL *url.URL, targetURL *url.URL) (bool, error) {
|
||||
globURLParts, globPort := SplitURL(globURL)
|
||||
targetURLParts, targetPort := SplitURL(targetURL)
|
||||
if globPort != targetPort {
|
||||
// port doesn't match
|
||||
return false, nil
|
||||
}
|
||||
if len(globURLParts) != len(targetURLParts) {
|
||||
// host name does not have the same number of parts
|
||||
return false, nil
|
||||
}
|
||||
if !strings.HasPrefix(targetURL.Path, globURL.Path) {
|
||||
// the path of the credential must be a prefix
|
||||
return false, nil
|
||||
}
|
||||
for k, globURLPart := range globURLParts {
|
||||
targetURLPart := targetURLParts[k]
|
||||
matched, err := filepath.Match(globURLPart, targetURLPart)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !matched {
|
||||
// glob mismatch for some part
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
// everything matches
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Lookup implements the DockerKeyring method for fetching credentials based on image name.
|
||||
// Multiple credentials may be returned if there are multiple potentially valid credentials
|
||||
// available. This allows for rotation.
|
||||
func (dk *BasicDockerKeyring) Lookup(image string) ([]TrackedAuthConfig, bool) {
|
||||
// range over the index as iterating over a map does not provide a predictable ordering
|
||||
ret := []TrackedAuthConfig{}
|
||||
for _, k := range dk.index {
|
||||
// both k and image are schemeless URLs because even though schemes are allowed
|
||||
// in the credential configurations, we remove them in Add.
|
||||
if matched, _ := URLsMatchStr(k, image); matched {
|
||||
ret = append(ret, dk.creds[k]...)
|
||||
}
|
||||
}
|
||||
|
||||
if len(ret) > 0 {
|
||||
return ret, true
|
||||
}
|
||||
|
||||
// Use credentials for the default registry if provided, and appropriate
|
||||
if isDefaultRegistryMatch(image) {
|
||||
if auth, ok := dk.creds[defaultRegistryHost]; ok {
|
||||
return auth, true
|
||||
}
|
||||
}
|
||||
|
||||
return []TrackedAuthConfig{}, false
|
||||
}
|
||||
|
||||
// Lookup implements the DockerKeyring method for fetching credentials
|
||||
// based on image name.
|
||||
func (dk *providersDockerKeyring) Lookup(image string) ([]TrackedAuthConfig, bool) {
|
||||
keyring := &BasicDockerKeyring{}
|
||||
|
||||
for _, p := range dk.Providers {
|
||||
// TODO: the source should probably change once we depend on service accounts (KEP-4412).
|
||||
// Perhaps `Provide()` should return the source modified to accommodate this?
|
||||
keyring.Add(nil, p.Provide(image))
|
||||
}
|
||||
|
||||
return keyring.Lookup(image)
|
||||
}
|
||||
|
||||
// FakeKeyring a fake config credentials
|
||||
type FakeKeyring struct {
|
||||
auth []TrackedAuthConfig
|
||||
ok bool
|
||||
}
|
||||
|
||||
// Lookup implements the DockerKeyring method for fetching credentials based on image name
|
||||
// return fake auth and ok
|
||||
func (f *FakeKeyring) Lookup(image string) ([]TrackedAuthConfig, bool) {
|
||||
return f.auth, f.ok
|
||||
}
|
||||
|
||||
// UnionDockerKeyring delegates to a set of keyrings.
|
||||
type UnionDockerKeyring []DockerKeyring
|
||||
|
||||
// Lookup implements the DockerKeyring method for fetching credentials based on image name.
|
||||
// return each credentials
|
||||
func (k UnionDockerKeyring) Lookup(image string) ([]TrackedAuthConfig, bool) {
|
||||
authConfigs := []TrackedAuthConfig{}
|
||||
for _, subKeyring := range k {
|
||||
if subKeyring == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
currAuthResults, _ := subKeyring.Lookup(image)
|
||||
authConfigs = append(authConfigs, currAuthResults...)
|
||||
}
|
||||
|
||||
return authConfigs, (len(authConfigs) > 0)
|
||||
}
|
||||
|
||||
func hashAuthConfig(creds *AuthConfig) string {
|
||||
credBytes, err := json.Marshal(creds)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
hash := sha256.New()
|
||||
hash.Write([]byte(credBytes))
|
||||
return hex.EncodeToString(hash.Sum(nil))
|
||||
}
|
113
e2e/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go
generated
vendored
Normal file
113
e2e/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package credentialprovider
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// DockerConfigProvider is the interface that registered extensions implement
|
||||
// to materialize 'dockercfg' credentials.
|
||||
type DockerConfigProvider interface {
|
||||
// Enabled returns true if the config provider is enabled.
|
||||
// Implementations can be blocking - e.g. metadata server unavailable.
|
||||
Enabled() bool
|
||||
// Provide returns docker configuration.
|
||||
// Implementations can be blocking - e.g. metadata server unavailable.
|
||||
// The image is passed in as context in the event that the
|
||||
// implementation depends on information in the image name to return
|
||||
// credentials; implementations are safe to ignore the image.
|
||||
Provide(image string) DockerConfig
|
||||
}
|
||||
|
||||
// A DockerConfigProvider that simply reads the .dockercfg file
|
||||
type defaultDockerConfigProvider struct{}
|
||||
|
||||
// CachingDockerConfigProvider implements DockerConfigProvider by composing
|
||||
// with another DockerConfigProvider and caching the DockerConfig it provides
|
||||
// for a pre-specified lifetime.
|
||||
type CachingDockerConfigProvider struct {
|
||||
Provider DockerConfigProvider
|
||||
Lifetime time.Duration
|
||||
|
||||
// ShouldCache is an optional function that returns true if the specific config should be cached.
|
||||
// If nil, all configs are treated as cacheable.
|
||||
ShouldCache func(DockerConfig) bool
|
||||
|
||||
// cache fields
|
||||
cacheDockerConfig DockerConfig
|
||||
expiration time.Time
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// Enabled implements dockerConfigProvider
|
||||
func (d *defaultDockerConfigProvider) Enabled() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Provide implements dockerConfigProvider
|
||||
func (d *defaultDockerConfigProvider) Provide(image string) DockerConfig {
|
||||
// Read the standard Docker credentials from .dockercfg
|
||||
if cfg, err := ReadDockerConfigFile(); err == nil {
|
||||
return cfg
|
||||
} else if !os.IsNotExist(err) {
|
||||
klog.V(2).Infof("Docker config file not found: %v", err)
|
||||
}
|
||||
return DockerConfig{}
|
||||
}
|
||||
|
||||
// Enabled implements dockerConfigProvider
|
||||
func (d *CachingDockerConfigProvider) Enabled() bool {
|
||||
return d.Provider.Enabled()
|
||||
}
|
||||
|
||||
// Provide implements dockerConfigProvider
|
||||
func (d *CachingDockerConfigProvider) Provide(image string) DockerConfig {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
// If the cache hasn't expired, return our cache
|
||||
if time.Now().Before(d.expiration) {
|
||||
return d.cacheDockerConfig
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Refreshing cache for provider: %v", reflect.TypeOf(d.Provider).String())
|
||||
config := d.Provider.Provide(image)
|
||||
if d.ShouldCache == nil || d.ShouldCache(config) {
|
||||
d.cacheDockerConfig = config
|
||||
d.expiration = time.Now().Add(d.Lifetime)
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
// NewDefaultDockerKeyring creates a DockerKeyring to use for resolving credentials,
|
||||
// which returns the default credentials from the .dockercfg file.
|
||||
func NewDefaultDockerKeyring() DockerKeyring {
|
||||
return &providersDockerKeyring{
|
||||
Providers: []DockerConfigProvider{
|
||||
&CachingDockerConfigProvider{
|
||||
Provider: &defaultDockerConfigProvider{},
|
||||
Lifetime: 5 * time.Minute,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
2
e2e/vendor/k8s.io/kubernetes/pkg/features/client_adapter.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/features/client_adapter.go
generated
vendored
@ -65,7 +65,7 @@ func (a *clientAdapter) Add(in map[clientfeatures.Feature]clientfeatures.Feature
|
||||
}
|
||||
out[featuregate.Feature(name)] = converted
|
||||
}
|
||||
return a.mfg.Add(out)
|
||||
return a.mfg.Add(out) //nolint:forbidigo // No need to support versioned feature gates in client adapter
|
||||
}
|
||||
|
||||
// Set implements the unexported interface that client-go feature gate testing expects for
|
||||
|
1170
e2e/vendor/k8s.io/kubernetes/pkg/features/kube_features.go
generated
vendored
1170
e2e/vendor/k8s.io/kubernetes/pkg/features/kube_features.go
generated
vendored
File diff suppressed because it is too large
Load Diff
841
e2e/vendor/k8s.io/kubernetes/pkg/features/versioned_kube_features.go
generated
vendored
841
e2e/vendor/k8s.io/kubernetes/pkg/features/versioned_kube_features.go
generated
vendored
@ -1,841 +0,0 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package features
|
||||
|
||||
import (
|
||||
apiextensionsfeatures "k8s.io/apiextensions-apiserver/pkg/features"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
genericfeatures "k8s.io/apiserver/pkg/features"
|
||||
"k8s.io/component-base/featuregate"
|
||||
zpagesfeatures "k8s.io/component-base/zpages/features"
|
||||
kcmfeatures "k8s.io/controller-manager/pkg/features"
|
||||
)
|
||||
|
||||
// defaultVersionedKubernetesFeatureGates consists of all known Kubernetes-specific feature keys with VersionedSpecs.
|
||||
// To add a new feature, define a key for it in pkg/features/kube_features.go and add it here. The features will be
|
||||
// available throughout Kubernetes binaries.
|
||||
// For features available via specific kubernetes components like apiserver,
|
||||
// cloud-controller-manager, etc find the respective kube_features.go file
|
||||
// (eg:staging/src/apiserver/pkg/features/kube_features.go), define the versioned
|
||||
// feature gate there, and reference it in this file.
|
||||
// To support n-3 compatibility version, features may only be removed 3 releases after graduation.
|
||||
//
|
||||
// Entries are alphabetized.
|
||||
var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate.VersionedSpecs{
|
||||
AllowDNSOnlyNodeCSR: {
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Deprecated},
|
||||
},
|
||||
|
||||
AllowInsecureKubeletCertificateSigningRequests: {
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Deprecated},
|
||||
},
|
||||
|
||||
AllowOverwriteTerminationGracePeriodSeconds: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Deprecated},
|
||||
},
|
||||
|
||||
AllowServiceLBStatusOnNonLB: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Deprecated},
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Deprecated, LockToDefault: true}, // remove in 1.35
|
||||
},
|
||||
|
||||
AnyVolumeDataSource: {
|
||||
{Version: version.MustParse("1.18"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
AppArmor: {
|
||||
{Version: version.MustParse("1.4"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
|
||||
},
|
||||
|
||||
AppArmorFields: {
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
|
||||
},
|
||||
|
||||
AuthorizeNodeWithSelectors: {
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
kcmfeatures.CloudControllerManagerWebhook: {
|
||||
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
ClusterTrustBundle: {
|
||||
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
ClusterTrustBundleProjection: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
ContainerCheckpoint: {
|
||||
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
CPUCFSQuotaPeriod: {
|
||||
{Version: version.MustParse("1.12"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
CPUManager: {
|
||||
{Version: version.MustParse("1.8"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.10"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.26
|
||||
},
|
||||
|
||||
CPUManagerPolicyAlphaOptions: {
|
||||
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
CPUManagerPolicyBetaOptions: {
|
||||
{Version: version.MustParse("1.23"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
CPUManagerPolicyOptions: {
|
||||
{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.23"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
CronJobsScheduledAnnotation: {
|
||||
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.35
|
||||
},
|
||||
|
||||
// inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed
|
||||
// unintentionally on either side:
|
||||
apiextensionsfeatures.CRDValidationRatcheting: {
|
||||
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
CrossNamespaceVolumeDataSource: {
|
||||
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
CSIMigrationPortworx: {
|
||||
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta}, // On by default (requires Portworx CSI driver)
|
||||
},
|
||||
|
||||
CSIVolumeHealth: {
|
||||
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
// inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed
|
||||
// unintentionally on either side:
|
||||
apiextensionsfeatures.CustomResourceFieldSelectors: {
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.32"), Default: true, LockToDefault: true, PreRelease: featuregate.GA},
|
||||
},
|
||||
|
||||
DevicePluginCDIDevices: {
|
||||
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
|
||||
},
|
||||
|
||||
DisableAllocatorDualWrite: {
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha}, // remove after MultiCIDRServiceAllocator is GA
|
||||
},
|
||||
|
||||
DisableCloudProviders: {
|
||||
{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
},
|
||||
|
||||
DisableKubeletCloudCredentialProviders: {
|
||||
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
},
|
||||
|
||||
DisableNodeKubeProxyVersion: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Deprecated},
|
||||
},
|
||||
|
||||
DRAAdminAccess: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
DynamicResourceAllocation: {
|
||||
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
DRAResourceClaimDeviceStatus: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
KubeletCrashLoopBackOffMax: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
ElasticIndexedJob: {
|
||||
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31, remove in 1.32
|
||||
},
|
||||
|
||||
EventedPLEG: {
|
||||
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
ExecProbeTimeout: {
|
||||
{Version: version.MustParse("1.20"), Default: true, PreRelease: featuregate.GA}, // lock to default and remove after v1.22 based on KEP #1972 update
|
||||
},
|
||||
|
||||
ExternalServiceAccountTokenSigner: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
genericfeatures.AdmissionWebhookMatchConditions: {
|
||||
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
},
|
||||
|
||||
genericfeatures.AggregatedDiscoveryEndpoint: {
|
||||
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
},
|
||||
|
||||
genericfeatures.AllowUnsafeMalformedObjectDeletion: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
genericfeatures.AnonymousAuthConfigurableEndpoints: {
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
genericfeatures.APIListChunking: {
|
||||
{Version: version.MustParse("1.8"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.9"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
},
|
||||
|
||||
genericfeatures.APIResponseCompression: {
|
||||
{Version: version.MustParse("1.8"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
genericfeatures.APIServerIdentity: {
|
||||
{Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
genericfeatures.APIServerTracing: {
|
||||
{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
genericfeatures.APIServingWithRoutine: {
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
genericfeatures.AuthorizeWithSelectors: {
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
genericfeatures.BtreeWatchCache: {
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
genericfeatures.CBORServingAndStorage: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
genericfeatures.ConcurrentWatchObjectDecode: {
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
genericfeatures.ConsistentListFromCache: {
|
||||
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
genericfeatures.CoordinatedLeaderElection: {
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
genericfeatures.EfficientWatchResumption: {
|
||||
{Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.21"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
},
|
||||
|
||||
genericfeatures.KMSv1: {
|
||||
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Deprecated},
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Deprecated},
|
||||
},
|
||||
|
||||
genericfeatures.MutatingAdmissionPolicy: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
genericfeatures.OpenAPIEnums: {
|
||||
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
genericfeatures.RemainingItemCount: {
|
||||
{Version: version.MustParse("1.15"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
},
|
||||
|
||||
genericfeatures.RemoteRequestHeaderUID: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
genericfeatures.ResilientWatchCacheInitialization: {
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
genericfeatures.RetryGenerateName: {
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.32"), Default: true, LockToDefault: true, PreRelease: featuregate.GA},
|
||||
},
|
||||
|
||||
genericfeatures.SeparateCacheWatchRPC: {
|
||||
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
genericfeatures.StorageVersionAPI: {
|
||||
{Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
genericfeatures.StorageVersionHash: {
|
||||
{Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.15"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
genericfeatures.StrictCostEnforcementForVAP: {
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
},
|
||||
|
||||
genericfeatures.StrictCostEnforcementForWebhooks: {
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
},
|
||||
|
||||
genericfeatures.StructuredAuthenticationConfiguration: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
genericfeatures.StructuredAuthorizationConfiguration: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
},
|
||||
|
||||
genericfeatures.UnauthenticatedHTTP2DOSMitigation: {
|
||||
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
genericfeatures.WatchBookmark: {
|
||||
{Version: version.MustParse("1.15"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.17"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
},
|
||||
|
||||
genericfeatures.WatchCacheInitializationPostStartHook: {
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
genericfeatures.WatchFromStorageWithoutResourceVersion: {
|
||||
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
genericfeatures.WatchList: {
|
||||
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
GracefulNodeShutdown: {
|
||||
{Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.21"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
GracefulNodeShutdownBasedOnPodPriority: {
|
||||
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
HonorPVReclaimPolicy: {
|
||||
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
HPAScaleToZero: {
|
||||
{Version: version.MustParse("1.16"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
ImageMaximumGCAge: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
ImageVolume: {
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
InPlacePodVerticalScaling: {
|
||||
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
InPlacePodVerticalScalingAllocatedStatus: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
InPlacePodVerticalScalingExclusiveCPUs: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
InTreePluginPortworxUnregister: {
|
||||
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
JobBackoffLimitPerIndex: {
|
||||
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
JobManagedBy: {
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
JobPodFailurePolicy: {
|
||||
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
|
||||
},
|
||||
|
||||
JobPodReplacementPolicy: {
|
||||
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
JobSuccessPolicy: {
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
KubeletCgroupDriverFromCRI: {
|
||||
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
KubeletFineGrainedAuthz: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
KubeletInUserNamespace: {
|
||||
{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
KubeletPodResourcesDynamicResources: {
|
||||
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
KubeletPodResourcesGet: {
|
||||
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
KubeletRegistrationGetOnExistsOnly: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Deprecated},
|
||||
},
|
||||
|
||||
KubeletSeparateDiskGC: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
KubeletTracing: {
|
||||
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
KubeProxyDrainingTerminatingNodes: {
|
||||
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31; remove in 1.33
|
||||
},
|
||||
|
||||
LoadBalancerIPMode: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
},
|
||||
|
||||
LocalStorageCapacityIsolationFSQuotaMonitoring: {
|
||||
{Version: version.MustParse("1.15"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
LogarithmicScaleDown: {
|
||||
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.22"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
},
|
||||
|
||||
MatchLabelKeysInPodAffinity: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
MatchLabelKeysInPodTopologySpread: {
|
||||
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
MaxUnavailableStatefulSet: {
|
||||
{Version: version.MustParse("1.24"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
MemoryManager: {
|
||||
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.22"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
},
|
||||
|
||||
MemoryQoS: {
|
||||
{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
MultiCIDRServiceAllocator: {
|
||||
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
NFTablesProxyMode: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
NodeInclusionPolicyInPodTopologySpread: {
|
||||
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
NodeLogQuery: {
|
||||
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
NodeSwap: {
|
||||
{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
OrderedNamespaceDeletion: {
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
PDBUnhealthyPodEvictionPolicy: {
|
||||
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
|
||||
},
|
||||
|
||||
PersistentVolumeLastPhaseTransitionTime: {
|
||||
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
|
||||
},
|
||||
|
||||
PodAndContainerStatsFromCRI: {
|
||||
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
PodDeletionCost: {
|
||||
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.22"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
PodDisruptionConditions: {
|
||||
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
|
||||
},
|
||||
|
||||
PodIndexLabel: {
|
||||
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.35
|
||||
},
|
||||
|
||||
PodLevelResources: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
PodLifecycleSleepAction: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
PodReadyToStartContainersCondition: {
|
||||
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
PodLifecycleSleepActionAllowZero: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
PodSchedulingReadiness: {
|
||||
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.30; remove in 1.32
|
||||
},
|
||||
|
||||
PortForwardWebsockets: {
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
ProcMountType: {
|
||||
{Version: version.MustParse("1.12"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
QOSReserved: {
|
||||
{Version: version.MustParse("1.11"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
RecoverVolumeExpansionFailure: {
|
||||
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
RecursiveReadOnlyMounts: {
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
RelaxedDNSSearchValidation: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
RelaxedEnvironmentVariableValidation: {
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
ReloadKubeletServerCertificateFile: {
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
ResourceHealthStatus: {
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
RotateKubeletServerCertificate: {
|
||||
{Version: version.MustParse("1.7"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.12"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
RuntimeClassInImageCriAPI: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
SchedulerAsyncPreemption: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
SchedulerQueueingHints: {
|
||||
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
SELinuxChangePolicy: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
SELinuxMount: {
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
SELinuxMountReadWriteOncePod: {
|
||||
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
SeparateTaintEvictionController: {
|
||||
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
StorageNamespaceIndex: {
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
ServiceAccountNodeAudienceRestriction: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
ServiceAccountTokenJTI: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.34
|
||||
},
|
||||
|
||||
ServiceAccountTokenNodeBinding: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
ServiceAccountTokenNodeBindingValidation: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.34
|
||||
},
|
||||
|
||||
ServiceAccountTokenPodNodeInfo: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.34
|
||||
},
|
||||
|
||||
ServiceTrafficDistribution: {
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
SidecarContainers: {
|
||||
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
SizeMemoryBackedVolumes: {
|
||||
{Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.22"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.32"), Default: true, LockToDefault: true, PreRelease: featuregate.GA},
|
||||
},
|
||||
|
||||
PodLogsQuerySplitStreams: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
StatefulSetAutoDeletePVC: {
|
||||
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.32, remove in 1.35
|
||||
},
|
||||
|
||||
StatefulSetStartOrdinal: {
|
||||
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31, remove in 1.33
|
||||
},
|
||||
|
||||
StorageVersionMigrator: {
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
SupplementalGroupsPolicy: {
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
SystemdWatchdog: {
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
TopologyAwareHints: {
|
||||
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
TopologyManagerPolicyAlphaOptions: {
|
||||
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
TopologyManagerPolicyBetaOptions: {
|
||||
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
TopologyManagerPolicyOptions: {
|
||||
{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA},
|
||||
},
|
||||
|
||||
TranslateStreamCloseWebsocketRequests: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
UnknownVersionInteroperabilityProxy: {
|
||||
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
UserNamespacesPodSecurityStandards: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
UserNamespacesSupport: {
|
||||
{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
VolumeAttributesClass: {
|
||||
{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
VolumeCapacityPriority: {
|
||||
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
WinDSR: {
|
||||
{Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
WindowsGracefulNodeShutdown: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
WinOverlay: {
|
||||
{Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.20"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
WindowsCPUAndMemoryAffinity: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
WindowsHostNetwork: {
|
||||
{Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
zpagesfeatures.ComponentFlagz: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
zpagesfeatures.ComponentStatusz: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
}
|
2
e2e/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go
generated
vendored
@ -16,4 +16,4 @@ limitations under the License.
|
||||
|
||||
// Package fieldpath supplies methods for extracting fields from objects
|
||||
// given a path to a field.
|
||||
package fieldpath // import "k8s.io/kubernetes/pkg/fieldpath"
|
||||
package fieldpath
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/doc.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/doc.go
generated
vendored
@ -17,4 +17,4 @@ limitations under the License.
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=kubelet.config.k8s.io
|
||||
|
||||
package config // import "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
package config
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/register.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/register.go
generated
vendored
@ -40,6 +40,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
&KubeletConfiguration{},
|
||||
&SerializedNodeConfigSource{},
|
||||
&CredentialProviderConfig{},
|
||||
&ImagePullIntent{},
|
||||
&ImagePulledRecord{},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
200
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go
generated
vendored
200
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go
generated
vendored
@ -155,6 +155,25 @@ type KubeletConfiguration struct {
|
||||
// pulls to burst to this number, while still not exceeding registryPullQPS.
|
||||
// Only used if registryPullQPS > 0.
|
||||
RegistryBurst int32
|
||||
// imagePullCredentialsVerificationPolicy determines how credentials should be
|
||||
// verified when pod requests an image that is already present on the node:
|
||||
// - NeverVerify
|
||||
// - anyone on a node can use any image present on the node
|
||||
// - NeverVerifyPreloadedImages
|
||||
// - images that were pulled to the node by something else than the kubelet
|
||||
// can be used without reverifying pull credentials
|
||||
// - NeverVerifyAllowlistedImages
|
||||
// - like "NeverVerifyPreloadedImages" but only node images from
|
||||
// `preloadedImagesVerificationAllowlist` don't require reverification
|
||||
// - AlwaysVerify
|
||||
// - all images require credential reverification
|
||||
ImagePullCredentialsVerificationPolicy string
|
||||
// preloadedImagesVerificationAllowlist specifies a list of images that are
|
||||
// exempted from credential reverification for the "NeverVerifyAllowlistedImages"
|
||||
// `imagePullCredentialsVerificationPolicy`.
|
||||
// The list accepts a full path segment wildcard suffix "/*".
|
||||
// Only use image specs without an image tag or digest.
|
||||
PreloadedImagesVerificationAllowlist []string
|
||||
// eventRecordQPS is the maximum event creations per second. If 0, there
|
||||
// is no limit enforced.
|
||||
EventRecordQPS int32
|
||||
@ -234,14 +253,11 @@ type KubeletConfiguration struct {
|
||||
// a group. It means that if true, the behavior aligns with the behavior of cgroups v1.
|
||||
SingleProcessOOMKill *bool
|
||||
// CPUManagerPolicy is the name of the policy to use.
|
||||
// Requires the CPUManager feature gate to be enabled.
|
||||
CPUManagerPolicy string
|
||||
// CPUManagerPolicyOptions is a set of key=value which allows to set extra options
|
||||
// to fine tune the behaviour of the cpu manager policies.
|
||||
// Requires both the "CPUManager" and "CPUManagerPolicyOptions" feature gates to be enabled.
|
||||
CPUManagerPolicyOptions map[string]string
|
||||
// CPU Manager reconciliation period.
|
||||
// Requires the CPUManager feature gate to be enabled.
|
||||
CPUManagerReconcilePeriod metav1.Duration
|
||||
// MemoryManagerPolicy is the name of the policy to use.
|
||||
// Requires the MemoryManager feature gate to be enabled.
|
||||
@ -322,6 +338,14 @@ type KubeletConfiguration struct {
|
||||
// amount of a given resource the kubelet will reclaim when performing a pod eviction while
|
||||
// that resource is under pressure. For example: {"imagefs.available": "2Gi"}
|
||||
EvictionMinimumReclaim map[string]string
|
||||
// mergeDefaultEvictionSettings indicates that defaults for the evictionHard, evictionSoft, evictionSoftGracePeriod, and evictionMinimumReclaim
|
||||
// fields should be merged into values specified for those fields in this configuration.
|
||||
// Signals specified in this configuration take precedence.
|
||||
// Signals not specified in this configuration inherit their defaults.
|
||||
// If false, and if any signal is specified in this configuration then other signals that
|
||||
// are not specified in this configuration will be set to 0.
|
||||
// It applies to merging the fields for which the default exists, and currently only evictionHard has default values.
|
||||
MergeDefaultEvictionSettings bool
|
||||
// podsPerCore is the maximum number of pods per core. Cannot exceed MaxPods.
|
||||
// If 0, this field is ignored.
|
||||
PodsPerCore int32
|
||||
@ -514,6 +538,11 @@ type KubeletConfiguration struct {
|
||||
// +featureGate=KubeletCrashLoopBackoffMax
|
||||
// +optional
|
||||
CrashLoopBackOff CrashLoopBackOffConfig
|
||||
|
||||
// UserNamespaces contains User Namespace configurations.
|
||||
// +featureGate=UserNamespaceSupport
|
||||
// +optional
|
||||
UserNamespaces *UserNamespaces
|
||||
}
|
||||
|
||||
// KubeletAuthorizationMode denotes the authorization mode for the kubelet
|
||||
@ -604,7 +633,7 @@ type CredentialProviderConfig struct {
|
||||
// Multiple providers may match against a single image, in which case credentials
|
||||
// from all providers will be returned to the kubelet. If multiple providers are called
|
||||
// for a single image, the results are combined. If providers return overlapping
|
||||
// auth keys, the value from the provider earlier in this list is used.
|
||||
// auth keys, the value from the provider earlier in this list is attempted first.
|
||||
Providers []CredentialProvider
|
||||
}
|
||||
|
||||
@ -614,6 +643,7 @@ type CredentialProvider struct {
|
||||
// name is the required name of the credential provider. It must match the name of the
|
||||
// provider executable as seen by the kubelet. The executable must be in the kubelet's
|
||||
// bin directory (set by the --credential-provider-bin-dir flag).
|
||||
// Required to be unique across all providers.
|
||||
Name string
|
||||
|
||||
// matchImages is a required list of strings used to match against images in order to
|
||||
@ -661,6 +691,64 @@ type CredentialProvider struct {
|
||||
// to pass argument to the plugin.
|
||||
// +optional
|
||||
Env []ExecEnvVar
|
||||
|
||||
// tokenAttributes is the configuration for the service account token that will be passed to the plugin.
|
||||
// The credential provider opts in to using service account tokens for image pull by setting this field.
|
||||
// When this field is set, kubelet will generate a service account token bound to the pod for which the
|
||||
// image is being pulled and pass to the plugin as part of CredentialProviderRequest along with other
|
||||
// attributes required by the plugin.
|
||||
//
|
||||
// The service account metadata and token attributes will be used as a dimension to cache
|
||||
// the credentials in kubelet. The cache key is generated by combining the service account metadata
|
||||
// (namespace, name, UID, and annotations key+value for the keys defined in
|
||||
// serviceAccountTokenAttribute.requiredServiceAccountAnnotationKeys and serviceAccountTokenAttribute.optionalServiceAccountAnnotationKeys).
|
||||
// The pod metadata (namespace, name, UID) that are in the service account token are not used as a dimension
|
||||
// to cache the credentials in kubelet. This means workloads that are using the same service account
|
||||
// could end up using the same credentials for image pull. For plugins that don't want this behavior, or
|
||||
// plugins that operate in pass-through mode; i.e., they return the service account token as-is, they
|
||||
// can set the credentialProviderResponse.cacheDuration to 0. This will disable the caching of
|
||||
// credentials in kubelet and the plugin will be invoked for every image pull. This does result in
|
||||
// token generation overhead for every image pull, but it is the only way to ensure that the
|
||||
// credentials are not shared across pods (even if they are using the same service account).
|
||||
// +optional
|
||||
TokenAttributes *ServiceAccountTokenAttributes
|
||||
}
|
||||
|
||||
// ServiceAccountTokenAttributes is the configuration for the service account token that will be passed to the plugin.
|
||||
type ServiceAccountTokenAttributes struct {
|
||||
// serviceAccountTokenAudience is the intended audience for the projected service account token.
|
||||
// +required
|
||||
ServiceAccountTokenAudience string
|
||||
|
||||
// requireServiceAccount indicates whether the plugin requires the pod to have a service account.
|
||||
// If set to true, kubelet will only invoke the plugin if the pod has a service account.
|
||||
// If set to false, kubelet will invoke the plugin even if the pod does not have a service account
|
||||
// and will not include a token in the CredentialProviderRequest in that scenario. This is useful for plugins that
|
||||
// are used to pull images for pods without service accounts (e.g., static pods).
|
||||
// +required
|
||||
RequireServiceAccount *bool
|
||||
|
||||
// requiredServiceAccountAnnotationKeys is the list of annotation keys that the plugin is interested in
|
||||
// and that are required to be present in the service account.
|
||||
// The keys defined in this list will be extracted from the corresponding service account and passed
|
||||
// to the plugin as part of the CredentialProviderRequest. If any of the keys defined in this list
|
||||
// are not present in the service account, kubelet will not invoke the plugin and will return an error.
|
||||
// This field is optional and may be empty. Plugins may use this field to extract
|
||||
// additional information required to fetch credentials or allow workloads to opt in to
|
||||
// using service account tokens for image pull.
|
||||
// If non-empty, requireServiceAccount must be set to true.
|
||||
// +optional
|
||||
RequiredServiceAccountAnnotationKeys []string
|
||||
|
||||
// optionalServiceAccountAnnotationKeys is the list of annotation keys that the plugin is interested in
|
||||
// and that are optional to be present in the service account.
|
||||
// The keys defined in this list will be extracted from the corresponding service account and passed
|
||||
// to the plugin as part of the CredentialProviderRequest. The plugin is responsible for validating
|
||||
// the existence of annotations and their values.
|
||||
// This field is optional and may be empty. Plugins may use this field to extract
|
||||
// additional information required to fetch credentials.
|
||||
// +optional
|
||||
OptionalServiceAccountAnnotationKeys []string
|
||||
}
|
||||
|
||||
// ExecEnvVar is used for setting environment variables when executing an exec-based
|
||||
@ -702,3 +790,107 @@ type CrashLoopBackOffConfig struct {
|
||||
// +optional
|
||||
MaxContainerRestartPeriod *metav1.Duration
|
||||
}
|
||||
|
||||
// ImagePullCredentialsVerificationPolicy is an enum for the policy that is enforced
|
||||
// when pod is requesting an image that appears on the system
|
||||
type ImagePullCredentialsVerificationPolicy string
|
||||
|
||||
const (
|
||||
// NeverVerify will never require credential verification for images that
|
||||
// already exist on the node
|
||||
NeverVerify ImagePullCredentialsVerificationPolicy = "NeverVerify"
|
||||
// NeverVerifyPreloadedImages does not require credential verification for images
|
||||
// pulled outside the kubelet process
|
||||
NeverVerifyPreloadedImages ImagePullCredentialsVerificationPolicy = "NeverVerifyPreloadedImages"
|
||||
// NeverVerifyAllowlistedImages does not require credential verification for
|
||||
// a list of images that were pulled outside the kubelet process
|
||||
NeverVerifyAllowlistedImages ImagePullCredentialsVerificationPolicy = "NeverVerifyAllowlistedImages"
|
||||
// AlwaysVerify requires credential verification for accessing any image on the
|
||||
// node irregardless how it was pulled
|
||||
AlwaysVerify ImagePullCredentialsVerificationPolicy = "AlwaysVerify"
|
||||
)
|
||||
|
||||
// ImagePullIntent is a record of the kubelet attempting to pull an image.
|
||||
//
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type ImagePullIntent struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
// Image is the image spec from a Container's `image` field.
|
||||
// The filename is a SHA-256 hash of this value. This is to avoid filename-unsafe
|
||||
// characters like ':' and '/'.
|
||||
Image string
|
||||
}
|
||||
|
||||
// ImagePullRecord is a record of an image that was pulled by the kubelet.
|
||||
//
|
||||
// If there are no records in the `kubernetesSecrets` field and both `nodeWideCredentials`
|
||||
// and `anonymous` are `false`, credentials must be re-checked the next time an
|
||||
// image represented by this record is being requested.
|
||||
//
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type ImagePulledRecord struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
// LastUpdatedTime is the time of the last update to this record
|
||||
LastUpdatedTime metav1.Time
|
||||
|
||||
// ImageRef is a reference to the image represented by this file as received
|
||||
// from the CRI.
|
||||
// The filename is a SHA-256 hash of this value. This is to avoid filename-unsafe
|
||||
// characters like ':' and '/'.
|
||||
ImageRef string
|
||||
|
||||
// CredentialMapping maps `image` to the set of credentials that it was
|
||||
// previously pulled with.
|
||||
// `image` in this case is the content of a pod's container `image` field that's
|
||||
// got its tag/digest removed.
|
||||
//
|
||||
// Example:
|
||||
// Container requests the `hello-world:latest@sha256:91fb4b041da273d5a3273b6d587d62d518300a6ad268b28628f74997b93171b2` image:
|
||||
// "credentialMapping": {
|
||||
// "hello-world": { "nodePodsAccessible": true }
|
||||
// }
|
||||
CredentialMapping map[string]ImagePullCredentials
|
||||
}
|
||||
|
||||
// ImagePullCredentials describe credentials that can be used to pull an image.
|
||||
type ImagePullCredentials struct {
|
||||
// KuberneteSecretCoordinates is an index of coordinates of all the kubernetes
|
||||
// secrets that were used to pull the image.
|
||||
// +optional
|
||||
KubernetesSecrets []ImagePullSecret
|
||||
|
||||
// NodePodsAccessible is a flag denoting the pull credentials are accessible
|
||||
// by all the pods on the node, or that no credentials are needed for the pull.
|
||||
//
|
||||
// If true, it is mutually exclusive with the `kubernetesSecrets` field.
|
||||
// +optional
|
||||
NodePodsAccessible bool
|
||||
}
|
||||
|
||||
// ImagePullSecret is a representation of a Kubernetes secret object coordinates along
|
||||
// with a credential hash of the pull secret credentials this object contains.
|
||||
type ImagePullSecret struct {
|
||||
UID string
|
||||
Namespace string
|
||||
Name string
|
||||
|
||||
// CredentialHash is a SHA-256 retrieved by hashing the image pull credentials
|
||||
// content of the secret specified by the UID/Namespace/Name coordinates.
|
||||
CredentialHash string
|
||||
}
|
||||
|
||||
// UserNamespaces contains User Namespace configurations.
|
||||
type UserNamespaces struct {
|
||||
// IDsPerPod is the mapping length of UIDs and GIDs.
|
||||
// The length must be a multiple of 65536, and must be less than 1<<32.
|
||||
// On non-linux such as windows, only null / absent is allowed.
|
||||
//
|
||||
// Changing the value may require recreating all containers on the node.
|
||||
//
|
||||
// Default: 65536
|
||||
// +featureGate=UserNamespaceSupport
|
||||
// +optional
|
||||
IDsPerPod *int64
|
||||
}
|
||||
|
162
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/zz_generated.deepcopy.go
generated
vendored
162
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/zz_generated.deepcopy.go
generated
vendored
@ -72,6 +72,11 @@ func (in *CredentialProvider) DeepCopyInto(out *CredentialProvider) {
|
||||
*out = make([]ExecEnvVar, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.TokenAttributes != nil {
|
||||
in, out := &in.TokenAttributes, &out.TokenAttributes
|
||||
*out = new(ServiceAccountTokenAttributes)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -133,6 +138,101 @@ func (in *ExecEnvVar) DeepCopy() *ExecEnvVar {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImagePullCredentials) DeepCopyInto(out *ImagePullCredentials) {
|
||||
*out = *in
|
||||
if in.KubernetesSecrets != nil {
|
||||
in, out := &in.KubernetesSecrets, &out.KubernetesSecrets
|
||||
*out = make([]ImagePullSecret, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePullCredentials.
|
||||
func (in *ImagePullCredentials) DeepCopy() *ImagePullCredentials {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ImagePullCredentials)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImagePullIntent) DeepCopyInto(out *ImagePullIntent) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePullIntent.
|
||||
func (in *ImagePullIntent) DeepCopy() *ImagePullIntent {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ImagePullIntent)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ImagePullIntent) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImagePullSecret) DeepCopyInto(out *ImagePullSecret) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePullSecret.
|
||||
func (in *ImagePullSecret) DeepCopy() *ImagePullSecret {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ImagePullSecret)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImagePulledRecord) DeepCopyInto(out *ImagePulledRecord) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.LastUpdatedTime.DeepCopyInto(&out.LastUpdatedTime)
|
||||
if in.CredentialMapping != nil {
|
||||
in, out := &in.CredentialMapping, &out.CredentialMapping
|
||||
*out = make(map[string]ImagePullCredentials, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePulledRecord.
|
||||
func (in *ImagePulledRecord) DeepCopy() *ImagePulledRecord {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ImagePulledRecord)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ImagePulledRecord) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeletAnonymousAuthentication) DeepCopyInto(out *KubeletAnonymousAuthentication) {
|
||||
*out = *in
|
||||
@ -214,6 +314,11 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) {
|
||||
}
|
||||
out.Authentication = in.Authentication
|
||||
out.Authorization = in.Authorization
|
||||
if in.PreloadedImagesVerificationAllowlist != nil {
|
||||
in, out := &in.PreloadedImagesVerificationAllowlist, &out.PreloadedImagesVerificationAllowlist
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ClusterDNS != nil {
|
||||
in, out := &in.ClusterDNS, &out.ClusterDNS
|
||||
*out = make([]string, len(*in))
|
||||
@ -354,6 +459,11 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) {
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.CrashLoopBackOff.DeepCopyInto(&out.CrashLoopBackOff)
|
||||
if in.UserNamespaces != nil {
|
||||
in, out := &in.UserNamespaces, &out.UserNamespaces
|
||||
*out = new(UserNamespaces)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -491,6 +601,37 @@ func (in *SerializedNodeConfigSource) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceAccountTokenAttributes) DeepCopyInto(out *ServiceAccountTokenAttributes) {
|
||||
*out = *in
|
||||
if in.RequireServiceAccount != nil {
|
||||
in, out := &in.RequireServiceAccount, &out.RequireServiceAccount
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.RequiredServiceAccountAnnotationKeys != nil {
|
||||
in, out := &in.RequiredServiceAccountAnnotationKeys, &out.RequiredServiceAccountAnnotationKeys
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.OptionalServiceAccountAnnotationKeys != nil {
|
||||
in, out := &in.OptionalServiceAccountAnnotationKeys, &out.OptionalServiceAccountAnnotationKeys
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTokenAttributes.
|
||||
func (in *ServiceAccountTokenAttributes) DeepCopy() *ServiceAccountTokenAttributes {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceAccountTokenAttributes)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ShutdownGracePeriodByPodPriority) DeepCopyInto(out *ShutdownGracePeriodByPodPriority) {
|
||||
*out = *in
|
||||
@ -506,3 +647,24 @@ func (in *ShutdownGracePeriodByPodPriority) DeepCopy() *ShutdownGracePeriodByPod
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *UserNamespaces) DeepCopyInto(out *UserNamespaces) {
|
||||
*out = *in
|
||||
if in.IDsPerPod != nil {
|
||||
in, out := &in.IDsPerPod, &out.IDsPerPod
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserNamespaces.
|
||||
func (in *UserNamespaces) DeepCopy() *UserNamespaces {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(UserNamespaces)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
30
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/server_v1.go
generated
vendored
30
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/server_v1.go
generated
vendored
@ -66,16 +66,13 @@ func (p *v1PodResourcesServer) List(ctx context.Context, req *podresourcesv1.Lis
|
||||
Containers: make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.Containers)),
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SidecarContainers) {
|
||||
pRes.Containers = make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.InitContainers)+len(pod.Spec.Containers))
|
||||
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if !podutil.IsRestartableInitContainer(&container) {
|
||||
continue
|
||||
}
|
||||
|
||||
pRes.Containers = append(pRes.Containers, p.getContainerResources(pod, &container))
|
||||
pRes.Containers = make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.InitContainers)+len(pod.Spec.Containers))
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if !podutil.IsRestartableInitContainer(&container) {
|
||||
continue
|
||||
}
|
||||
|
||||
pRes.Containers = append(pRes.Containers, p.getContainerResources(pod, &container))
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
@ -126,16 +123,13 @@ func (p *v1PodResourcesServer) Get(ctx context.Context, req *podresourcesv1.GetP
|
||||
Containers: make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.Containers)),
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SidecarContainers) {
|
||||
podResources.Containers = make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.InitContainers)+len(pod.Spec.Containers))
|
||||
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if !podutil.IsRestartableInitContainer(&container) {
|
||||
continue
|
||||
}
|
||||
|
||||
podResources.Containers = append(podResources.Containers, p.getContainerResources(pod, &container))
|
||||
podResources.Containers = make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.InitContainers)+len(pod.Spec.Containers))
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if !podutil.IsRestartableInitContainer(&container) {
|
||||
continue
|
||||
}
|
||||
|
||||
podResources.Containers = append(podResources.Containers, p.getContainerResources(pod, &container))
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
|
6
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go
generated
vendored
6
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/cadvisor_linux.go
generated
vendored
@ -39,7 +39,9 @@ import (
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"github.com/google/cadvisor/manager"
|
||||
"github.com/google/cadvisor/utils/sysfs"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
@ -93,6 +95,10 @@ func New(imageFsInfoProvider ImageFsInfoProvider, rootPath string, cgroupRoots [
|
||||
cadvisormetrics.OOMMetrics: struct{}{},
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletPSI) {
|
||||
includedMetrics[cadvisormetrics.PressureMetrics] = struct{}{}
|
||||
}
|
||||
|
||||
if usingLegacyStats || localStorageCapacityIsolation {
|
||||
includedMetrics[cadvisormetrics.DiskUsageMetrics] = struct{}{}
|
||||
}
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/doc.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/doc.go
generated
vendored
@ -15,4 +15,4 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
// Package cadvisor provides an interface for Kubelet interactions with cAdvisor.
|
||||
package cadvisor // import "k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
package cadvisor
|
||||
|
1
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/OWNERS
generated
vendored
1
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/OWNERS
generated
vendored
@ -6,6 +6,7 @@ approvers:
|
||||
- derekwaynecarr
|
||||
- yujuhong
|
||||
- klueska
|
||||
- ffromani
|
||||
reviewers:
|
||||
- sig-node-reviewers
|
||||
emeritus_approvers:
|
||||
|
25
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go
generated
vendored
25
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go
generated
vendored
@ -25,11 +25,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
|
||||
libcontainercgroupmanager "github.com/opencontainers/runc/libcontainer/cgroups/manager"
|
||||
cgroupsystemd "github.com/opencontainers/runc/libcontainer/cgroups/systemd"
|
||||
libcontainerconfigs "github.com/opencontainers/runc/libcontainer/configs"
|
||||
libcontainercgroups "github.com/opencontainers/cgroups"
|
||||
"github.com/opencontainers/cgroups/fscommon"
|
||||
libcontainercgroupmanager "github.com/opencontainers/cgroups/manager"
|
||||
cgroupsystemd "github.com/opencontainers/cgroups/systemd"
|
||||
"k8s.io/klog/v2"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
|
||||
@ -195,14 +194,14 @@ func (m *cgroupCommon) buildCgroupPaths(name CgroupName) map[string]string {
|
||||
}
|
||||
|
||||
// libctCgroupConfig converts CgroupConfig to libcontainer's Cgroup config.
|
||||
func (m *cgroupCommon) libctCgroupConfig(in *CgroupConfig, needResources bool) *libcontainerconfigs.Cgroup {
|
||||
config := &libcontainerconfigs.Cgroup{
|
||||
func (m *cgroupCommon) libctCgroupConfig(in *CgroupConfig, needResources bool) *libcontainercgroups.Cgroup {
|
||||
config := &libcontainercgroups.Cgroup{
|
||||
Systemd: m.useSystemd,
|
||||
}
|
||||
if needResources {
|
||||
config.Resources = m.toResources(in.ResourceParameters)
|
||||
} else {
|
||||
config.Resources = &libcontainerconfigs.Resources{}
|
||||
config.Resources = &libcontainercgroups.Resources{}
|
||||
}
|
||||
|
||||
if !config.Systemd {
|
||||
@ -279,8 +278,8 @@ var (
|
||||
availableRootControllers sets.Set[string]
|
||||
)
|
||||
|
||||
func (m *cgroupCommon) toResources(resourceConfig *ResourceConfig) *libcontainerconfigs.Resources {
|
||||
resources := &libcontainerconfigs.Resources{
|
||||
func (m *cgroupCommon) toResources(resourceConfig *ResourceConfig) *libcontainercgroups.Resources {
|
||||
resources := &libcontainercgroups.Resources{
|
||||
SkipDevices: true,
|
||||
SkipFreezeOnSet: true,
|
||||
}
|
||||
@ -324,7 +323,7 @@ func (m *cgroupCommon) toResources(resourceConfig *ResourceConfig) *libcontainer
|
||||
return resources
|
||||
}
|
||||
|
||||
func (m *cgroupCommon) maybeSetHugetlb(resourceConfig *ResourceConfig, resources *libcontainerconfigs.Resources) {
|
||||
func (m *cgroupCommon) maybeSetHugetlb(resourceConfig *ResourceConfig, resources *libcontainercgroups.Resources) {
|
||||
// Check if hugetlb is supported.
|
||||
if libcontainercgroups.IsCgroup2UnifiedMode() {
|
||||
if !getSupportedUnifiedControllers().Has("hugetlb") {
|
||||
@ -344,7 +343,7 @@ func (m *cgroupCommon) maybeSetHugetlb(resourceConfig *ResourceConfig, resources
|
||||
klog.InfoS("Invalid pageSize", "err", err)
|
||||
continue
|
||||
}
|
||||
resources.HugetlbLimit = append(resources.HugetlbLimit, &libcontainerconfigs.HugepageLimit{
|
||||
resources.HugetlbLimit = append(resources.HugetlbLimit, &libcontainercgroups.HugepageLimit{
|
||||
Pagesize: sizeString,
|
||||
Limit: uint64(limit),
|
||||
})
|
||||
@ -355,7 +354,7 @@ func (m *cgroupCommon) maybeSetHugetlb(resourceConfig *ResourceConfig, resources
|
||||
if pageSizes.Has(pageSize) {
|
||||
continue
|
||||
}
|
||||
resources.HugetlbLimit = append(resources.HugetlbLimit, &libcontainerconfigs.HugepageLimit{
|
||||
resources.HugetlbLimit = append(resources.HugetlbLimit, &libcontainercgroups.HugepageLimit{
|
||||
Pagesize: pageSize,
|
||||
Limit: uint64(0),
|
||||
})
|
||||
|
4
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_v1_manager_linux.go
generated
vendored
4
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_v1_manager_linux.go
generated
vendored
@ -22,8 +22,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
|
||||
libcontainercgroups "github.com/opencontainers/cgroups"
|
||||
"github.com/opencontainers/cgroups/fscommon"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
18
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_v2_manager_linux.go
generated
vendored
18
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_v2_manager_linux.go
generated
vendored
@ -24,13 +24,17 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
|
||||
"github.com/opencontainers/cgroups/fscommon"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util"
|
||||
)
|
||||
|
||||
const cgroupv2MemLimitFile string = "memory.max"
|
||||
const (
|
||||
cgroupv2MemLimitFile = "memory.max"
|
||||
cgroupv2CpuMaxFile = "cpu.max"
|
||||
cgroupv2CpuWeightFile = "cpu.weight"
|
||||
)
|
||||
|
||||
// cgroupV2impl implements the CgroupManager interface
|
||||
// for cgroup v2.
|
||||
@ -100,14 +104,14 @@ func (c *cgroupV2impl) GetCgroupConfig(name CgroupName, resource v1.ResourceName
|
||||
|
||||
func (c *cgroupV2impl) getCgroupCPUConfig(cgroupPath string) (*ResourceConfig, error) {
|
||||
var cpuLimitStr, cpuPeriodStr string
|
||||
cpuLimitAndPeriod, err := fscommon.GetCgroupParamString(cgroupPath, "cpu.max")
|
||||
cpuLimitAndPeriod, err := fscommon.GetCgroupParamString(cgroupPath, cgroupv2CpuMaxFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read cpu.max file for cgroup %v: %w", cgroupPath, err)
|
||||
return nil, fmt.Errorf("failed to read %s file for cgroup %v: %w", cgroupv2CpuMaxFile, cgroupPath, err)
|
||||
}
|
||||
numItems, errScan := fmt.Sscanf(cpuLimitAndPeriod, "%s %s", &cpuLimitStr, &cpuPeriodStr)
|
||||
if errScan != nil || numItems != 2 {
|
||||
return nil, fmt.Errorf("failed to correctly parse content of cpu.max file ('%s') for cgroup %v: %w",
|
||||
cpuLimitAndPeriod, cgroupPath, errScan)
|
||||
return nil, fmt.Errorf("failed to correctly parse content of %s file ('%s') for cgroup %v: %w",
|
||||
cgroupv2CpuMaxFile, cpuLimitAndPeriod, cgroupPath, errScan)
|
||||
}
|
||||
cpuLimit := int64(-1)
|
||||
if cpuLimitStr != Cgroup2MaxCpuLimit {
|
||||
@ -120,7 +124,7 @@ func (c *cgroupV2impl) getCgroupCPUConfig(cgroupPath string) (*ResourceConfig, e
|
||||
if errPeriod != nil {
|
||||
return nil, fmt.Errorf("failed to convert CPU period as integer for cgroup %v: %w", cgroupPath, errPeriod)
|
||||
}
|
||||
cpuWeight, errWeight := fscommon.GetCgroupParamUint(cgroupPath, "cpu.weight")
|
||||
cpuWeight, errWeight := fscommon.GetCgroupParamUint(cgroupPath, cgroupv2CpuWeightFile)
|
||||
if errWeight != nil {
|
||||
return nil, fmt.Errorf("failed to read CPU weight for cgroup %v: %w", cgroupPath, errWeight)
|
||||
}
|
||||
|
62
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go
generated
vendored
62
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager.go
generated
vendored
@ -31,6 +31,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
internalapi "k8s.io/cri-api/pkg/apis"
|
||||
"k8s.io/klog/v2"
|
||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
|
||||
@ -154,6 +155,13 @@ type ContainerManager interface {
|
||||
// Updates returns a channel that receives an Update when the device changed its status.
|
||||
Updates() <-chan resourceupdates.Update
|
||||
|
||||
// PodHasExclusiveCPUs returns true if the provided pod has containers with exclusive CPUs,
|
||||
// This means that at least one sidecar container or one app container has exclusive CPUs allocated.
|
||||
PodHasExclusiveCPUs(pod *v1.Pod) bool
|
||||
|
||||
// ContainerHasExclusiveCPUs returns true if the provided container in the pod has exclusive cpu
|
||||
ContainerHasExclusiveCPUs(pod *v1.Pod, container *v1.Container) bool
|
||||
|
||||
// Implements the PodResources Provider API
|
||||
podresources.CPUsProvider
|
||||
podresources.DevicesProvider
|
||||
@ -161,6 +169,10 @@ type ContainerManager interface {
|
||||
podresources.DynamicResourcesProvider
|
||||
}
|
||||
|
||||
type cpuAllocationReader interface {
|
||||
GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet
|
||||
}
|
||||
|
||||
type NodeConfig struct {
|
||||
NodeName types.NodeName
|
||||
RuntimeCgroupsName string
|
||||
@ -174,19 +186,19 @@ type NodeConfig struct {
|
||||
KubeletRootDir string
|
||||
ProtectKernelDefaults bool
|
||||
NodeAllocatableConfig
|
||||
QOSReserved map[v1.ResourceName]int64
|
||||
CPUManagerPolicy string
|
||||
CPUManagerPolicyOptions map[string]string
|
||||
TopologyManagerScope string
|
||||
CPUManagerReconcilePeriod time.Duration
|
||||
ExperimentalMemoryManagerPolicy string
|
||||
ExperimentalMemoryManagerReservedMemory []kubeletconfig.MemoryReservation
|
||||
PodPidsLimit int64
|
||||
EnforceCPULimits bool
|
||||
CPUCFSQuotaPeriod time.Duration
|
||||
TopologyManagerPolicy string
|
||||
TopologyManagerPolicyOptions map[string]string
|
||||
CgroupVersion int
|
||||
QOSReserved map[v1.ResourceName]int64
|
||||
CPUManagerPolicy string
|
||||
CPUManagerPolicyOptions map[string]string
|
||||
TopologyManagerScope string
|
||||
CPUManagerReconcilePeriod time.Duration
|
||||
MemoryManagerPolicy string
|
||||
MemoryManagerReservedMemory []kubeletconfig.MemoryReservation
|
||||
PodPidsLimit int64
|
||||
EnforceCPULimits bool
|
||||
CPUCFSQuotaPeriod time.Duration
|
||||
TopologyManagerPolicy string
|
||||
TopologyManagerPolicyOptions map[string]string
|
||||
CgroupVersion int
|
||||
}
|
||||
|
||||
type NodeAllocatableConfig struct {
|
||||
@ -212,6 +224,30 @@ func int64Slice(in []int) []int64 {
|
||||
return out
|
||||
}
|
||||
|
||||
func podHasExclusiveCPUs(cr cpuAllocationReader, pod *v1.Pod) bool {
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if containerHasExclusiveCPUs(cr, pod, &container) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if containerHasExclusiveCPUs(cr, pod, &container) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
klog.V(4).InfoS("Pod contains no container with pinned cpus", "podName", pod.Name)
|
||||
return false
|
||||
}
|
||||
|
||||
func containerHasExclusiveCPUs(cr cpuAllocationReader, pod *v1.Pod, container *v1.Container) bool {
|
||||
exclusiveCPUs := cr.GetExclusiveCPUs(string(pod.UID), container.Name)
|
||||
if !exclusiveCPUs.IsEmpty() {
|
||||
klog.V(4).InfoS("Container has pinned cpus", "podName", pod.Name, "containerName", container.Name)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parsePercentage parses the percentage string to numeric value.
|
||||
func parsePercentage(v string) (int64, error) {
|
||||
if !strings.HasSuffix(v, "%") {
|
||||
|
24
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go
generated
vendored
24
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_linux.go
generated
vendored
@ -27,9 +27,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/manager"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/cgroups"
|
||||
"github.com/opencontainers/cgroups/manager"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/mount-utils"
|
||||
utilpath "k8s.io/utils/path"
|
||||
@ -336,10 +335,10 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
||||
cm.topologyManager.AddHintProvider(cm.cpuManager)
|
||||
|
||||
cm.memoryManager, err = memorymanager.NewManager(
|
||||
nodeConfig.ExperimentalMemoryManagerPolicy,
|
||||
nodeConfig.MemoryManagerPolicy,
|
||||
machineInfo,
|
||||
cm.GetNodeAllocatableReservation(),
|
||||
nodeConfig.ExperimentalMemoryManagerReservedMemory,
|
||||
nodeConfig.MemoryManagerReservedMemory,
|
||||
nodeConfig.KubeletRootDir,
|
||||
cm.topologyManager,
|
||||
)
|
||||
@ -365,7 +364,8 @@ func (cm *containerManagerImpl) NewPodContainerManager() PodContainerManager {
|
||||
enforceCPULimits: cm.EnforceCPULimits,
|
||||
// cpuCFSQuotaPeriod is in microseconds. NodeConfig.CPUCFSQuotaPeriod is time.Duration (measured in nano seconds).
|
||||
// Convert (cm.CPUCFSQuotaPeriod) [nanoseconds] / time.Microsecond (1000) to get cpuCFSQuotaPeriod in microseconds.
|
||||
cpuCFSQuotaPeriod: uint64(cm.CPUCFSQuotaPeriod / time.Microsecond),
|
||||
cpuCFSQuotaPeriod: uint64(cm.CPUCFSQuotaPeriod / time.Microsecond),
|
||||
podContainerManager: cm,
|
||||
}
|
||||
}
|
||||
return &podContainerManagerNoop{
|
||||
@ -373,16 +373,24 @@ func (cm *containerManagerImpl) NewPodContainerManager() PodContainerManager {
|
||||
}
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) PodHasExclusiveCPUs(pod *v1.Pod) bool {
|
||||
return podHasExclusiveCPUs(cm.cpuManager, pod)
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) ContainerHasExclusiveCPUs(pod *v1.Pod, container *v1.Container) bool {
|
||||
return containerHasExclusiveCPUs(cm.cpuManager, pod, container)
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) InternalContainerLifecycle() InternalContainerLifecycle {
|
||||
return &internalContainerLifecycleImpl{cm.cpuManager, cm.memoryManager, cm.topologyManager}
|
||||
}
|
||||
|
||||
// Create a cgroup container manager.
|
||||
func createManager(containerName string) (cgroups.Manager, error) {
|
||||
cg := &configs.Cgroup{
|
||||
cg := &cgroups.Cgroup{
|
||||
Parent: "/",
|
||||
Name: containerName,
|
||||
Resources: &configs.Resources{
|
||||
Resources: &cgroups.Resources{
|
||||
SkipDevices: true,
|
||||
},
|
||||
Systemd: false,
|
||||
|
8
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go
generated
vendored
8
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_stub.go
generated
vendored
@ -195,6 +195,14 @@ func (cm *containerManagerStub) Updates() <-chan resourceupdates.Update {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *containerManagerStub) PodHasExclusiveCPUs(pod *v1.Pod) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (cm *containerManagerStub) ContainerHasExclusiveCPUs(pod *v1.Pod, container *v1.Container) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func NewStubContainerManager() ContainerManager {
|
||||
return &containerManagerStub{shouldResetExtendedResourceCapacity: false}
|
||||
}
|
||||
|
12
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go
generated
vendored
12
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/container_manager_windows.go
generated
vendored
@ -168,10 +168,10 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
||||
|
||||
klog.InfoS("Creating memory manager")
|
||||
cm.memoryManager, err = memorymanager.NewManager(
|
||||
nodeConfig.ExperimentalMemoryManagerPolicy,
|
||||
nodeConfig.MemoryManagerPolicy,
|
||||
machineInfo,
|
||||
cm.GetNodeAllocatableReservation(),
|
||||
nodeConfig.ExperimentalMemoryManagerReservedMemory,
|
||||
nodeConfig.MemoryManagerReservedMemory,
|
||||
nodeConfig.KubeletRootDir,
|
||||
cm.topologyManager,
|
||||
)
|
||||
@ -369,3 +369,11 @@ func (cm *containerManagerImpl) UnprepareDynamicResources(ctx context.Context, p
|
||||
func (cm *containerManagerImpl) PodMightNeedToUnprepareResources(UID types.UID) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) PodHasExclusiveCPUs(pod *v1.Pod) bool {
|
||||
return podHasExclusiveCPUs(cm.cpuManager, pod)
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) ContainerHasExclusiveCPUs(pod *v1.Pod, container *v1.Container) bool {
|
||||
return containerHasExclusiveCPUs(cm.cpuManager, pod, container)
|
||||
}
|
||||
|
7
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/containermap/container_map.go
generated
vendored
7
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/containermap/container_map.go
generated
vendored
@ -18,6 +18,7 @@ package containermap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
)
|
||||
|
||||
// cmItem (ContainerMap ITEM) is a pair podUID, containerName
|
||||
@ -36,11 +37,7 @@ func NewContainerMap() ContainerMap {
|
||||
|
||||
// Clone creates a deep copy of the ContainerMap
|
||||
func (cm ContainerMap) Clone() ContainerMap {
|
||||
ret := make(ContainerMap, len(cm))
|
||||
for key, val := range cm {
|
||||
ret[key] = val
|
||||
}
|
||||
return ret
|
||||
return maps.Clone(cm)
|
||||
}
|
||||
|
||||
// Add adds a mapping of (containerID)->(podUID, containerName) to the ContainerMap
|
||||
|
7
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go
generated
vendored
7
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_assignment.go
generated
vendored
@ -18,6 +18,7 @@ package cpumanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
@ -39,11 +40,7 @@ const (
|
||||
type mapIntInt map[int]int
|
||||
|
||||
func (m mapIntInt) Clone() mapIntInt {
|
||||
cp := make(mapIntInt, len(m))
|
||||
for k, v := range m {
|
||||
cp[k] = v
|
||||
}
|
||||
return cp
|
||||
return maps.Clone(m)
|
||||
}
|
||||
|
||||
func (m mapIntInt) Keys() []int {
|
||||
|
4
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go
generated
vendored
4
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/cpu_manager.go
generated
vendored
@ -239,6 +239,8 @@ func (m *manager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesRe
|
||||
return err
|
||||
}
|
||||
|
||||
klog.V(4).InfoS("CPU manager started", "policy", m.policy.Name())
|
||||
|
||||
m.allocatableCPUs = m.policy.GetAllocatableCPUs(m.state)
|
||||
|
||||
if m.policy.Name() == string(PolicyNone) {
|
||||
@ -465,7 +467,7 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec
|
||||
cset := m.state.GetCPUSetOrDefault(string(pod.UID), container.Name)
|
||||
if cset.IsEmpty() {
|
||||
// NOTE: This should not happen outside of tests.
|
||||
klog.V(2).InfoS("ReconcileState: skipping container; assigned cpuset is empty", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
klog.V(2).InfoS("ReconcileState: skipping container; empty cpuset assigned", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID})
|
||||
continue
|
||||
}
|
||||
|
8
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_options.go
generated
vendored
8
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_options.go
generated
vendored
@ -39,16 +39,17 @@ const (
|
||||
|
||||
var (
|
||||
alphaOptions = sets.New[string](
|
||||
DistributeCPUsAcrossNUMAOption,
|
||||
AlignBySocketOption,
|
||||
DistributeCPUsAcrossCoresOption,
|
||||
StrictCPUReservationOption,
|
||||
PreferAlignByUnCoreCacheOption,
|
||||
)
|
||||
betaOptions = sets.New[string](
|
||||
StrictCPUReservationOption,
|
||||
DistributeCPUsAcrossNUMAOption,
|
||||
)
|
||||
stableOptions = sets.New[string](
|
||||
FullPCPUsOnlyOption,
|
||||
)
|
||||
stableOptions = sets.New[string]()
|
||||
)
|
||||
|
||||
// CheckPolicyOptionAvailable verifies if the given option can be used depending on the Feature Gate Settings.
|
||||
@ -66,6 +67,7 @@ func CheckPolicyOptionAvailable(option string) error {
|
||||
return fmt.Errorf("CPU Manager Policy Beta-level Options not enabled, but option %q provided", option)
|
||||
}
|
||||
|
||||
// if the option is stable, we need no CPUManagerPolicy*Options feature gate check
|
||||
return nil
|
||||
}
|
||||
|
||||
|
116
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go
generated
vendored
116
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go
generated
vendored
@ -18,6 +18,7 @@ package cpumanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@ -325,13 +326,16 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
|
||||
defer func() {
|
||||
if rerr != nil {
|
||||
metrics.CPUManagerPinningErrorsTotal.Inc()
|
||||
if p.options.FullPhysicalCPUsOnly {
|
||||
metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedPhysicalCPU).Inc()
|
||||
}
|
||||
return
|
||||
}
|
||||
if !p.options.FullPhysicalCPUsOnly {
|
||||
// TODO: move in updateMetricsOnAllocate
|
||||
if p.options.FullPhysicalCPUsOnly {
|
||||
// increment only if we know we allocate aligned resources
|
||||
return
|
||||
metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedPhysicalCPU).Inc()
|
||||
}
|
||||
metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedPhysicalCPU).Inc()
|
||||
}()
|
||||
|
||||
if p.options.FullPhysicalCPUsOnly {
|
||||
@ -367,8 +371,8 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
|
||||
}
|
||||
}
|
||||
}
|
||||
if cpuset, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
|
||||
p.updateCPUsToReuse(pod, container, cpuset)
|
||||
if cset, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
|
||||
p.updateCPUsToReuse(pod, container, cset)
|
||||
klog.InfoS("Static policy: container already present in state, skipping", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
return nil
|
||||
}
|
||||
@ -378,16 +382,17 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
|
||||
klog.InfoS("Topology Affinity", "pod", klog.KObj(pod), "containerName", container.Name, "affinity", hint)
|
||||
|
||||
// Allocate CPUs according to the NUMA affinity contained in the hint.
|
||||
cpuset, err := p.allocateCPUs(s, numCPUs, hint.NUMANodeAffinity, p.cpusToReuse[string(pod.UID)])
|
||||
cpuAllocation, err := p.allocateCPUs(s, numCPUs, hint.NUMANodeAffinity, p.cpusToReuse[string(pod.UID)])
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Unable to allocate CPUs", "pod", klog.KObj(pod), "containerName", container.Name, "numCPUs", numCPUs)
|
||||
return err
|
||||
}
|
||||
|
||||
s.SetCPUSet(string(pod.UID), container.Name, cpuset)
|
||||
p.updateCPUsToReuse(pod, container, cpuset)
|
||||
p.updateMetricsOnAllocate(cpuset)
|
||||
s.SetCPUSet(string(pod.UID), container.Name, cpuAllocation.CPUs)
|
||||
p.updateCPUsToReuse(pod, container, cpuAllocation.CPUs)
|
||||
p.updateMetricsOnAllocate(s, cpuAllocation)
|
||||
|
||||
klog.V(4).InfoS("Allocated exclusive CPUs", "pod", klog.KObj(pod), "containerName", container.Name, "cpuset", cpuAllocation.CPUs.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -412,18 +417,19 @@ func (p *staticPolicy) RemoveContainer(s state.State, podUID string, containerNa
|
||||
// Mutate the shared pool, adding released cpus.
|
||||
toRelease = toRelease.Difference(cpusInUse)
|
||||
s.SetDefaultCPUSet(s.GetDefaultCPUSet().Union(toRelease))
|
||||
p.updateMetricsOnRelease(toRelease)
|
||||
p.updateMetricsOnRelease(s, toRelease)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bitmask.BitMask, reusableCPUs cpuset.CPUSet) (cpuset.CPUSet, error) {
|
||||
func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bitmask.BitMask, reusableCPUs cpuset.CPUSet) (topology.Allocation, error) {
|
||||
klog.InfoS("AllocateCPUs", "numCPUs", numCPUs, "socket", numaAffinity)
|
||||
|
||||
allocatableCPUs := p.GetAvailableCPUs(s).Union(reusableCPUs)
|
||||
|
||||
// If there are aligned CPUs in numaAffinity, attempt to take those first.
|
||||
result := cpuset.New()
|
||||
result := topology.EmptyAllocation()
|
||||
if numaAffinity != nil {
|
||||
alignedCPUs := p.getAlignedCPUs(numaAffinity, allocatableCPUs)
|
||||
|
||||
@ -432,30 +438,33 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bit
|
||||
numAlignedToAlloc = numCPUs
|
||||
}
|
||||
|
||||
alignedCPUs, err := p.takeByTopology(alignedCPUs, numAlignedToAlloc)
|
||||
allocatedCPUs, err := p.takeByTopology(alignedCPUs, numAlignedToAlloc)
|
||||
if err != nil {
|
||||
return cpuset.New(), err
|
||||
return topology.EmptyAllocation(), err
|
||||
}
|
||||
|
||||
result = result.Union(alignedCPUs)
|
||||
result.CPUs = result.CPUs.Union(allocatedCPUs)
|
||||
}
|
||||
|
||||
// Get any remaining CPUs from what's leftover after attempting to grab aligned ones.
|
||||
remainingCPUs, err := p.takeByTopology(allocatableCPUs.Difference(result), numCPUs-result.Size())
|
||||
remainingCPUs, err := p.takeByTopology(allocatableCPUs.Difference(result.CPUs), numCPUs-result.CPUs.Size())
|
||||
if err != nil {
|
||||
return cpuset.New(), err
|
||||
return topology.EmptyAllocation(), err
|
||||
}
|
||||
result = result.Union(remainingCPUs)
|
||||
result.CPUs = result.CPUs.Union(remainingCPUs)
|
||||
result.Aligned = p.topology.CheckAlignment(result.CPUs)
|
||||
|
||||
// Remove allocated CPUs from the shared CPUSet.
|
||||
s.SetDefaultCPUSet(s.GetDefaultCPUSet().Difference(result))
|
||||
s.SetDefaultCPUSet(s.GetDefaultCPUSet().Difference(result.CPUs))
|
||||
|
||||
klog.InfoS("AllocateCPUs", "result", result)
|
||||
klog.InfoS("AllocateCPUs", "result", result.String())
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int {
|
||||
if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed {
|
||||
qos := v1qos.GetPodQOS(pod)
|
||||
if qos != v1.PodQOSGuaranteed {
|
||||
klog.V(5).InfoS("Exclusive CPU allocation skipped, pod QoS is not guaranteed", "pod", klog.KObj(pod), "containerName", container.Name, "qos", qos)
|
||||
return 0
|
||||
}
|
||||
cpuQuantity := container.Resources.Requests[v1.ResourceCPU]
|
||||
@ -464,11 +473,19 @@ func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int
|
||||
// We should return this value because this is what kubelet agreed to allocate for the container
|
||||
// and the value configured with runtime.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
|
||||
if cs, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, container.Name); ok {
|
||||
containerStatuses := pod.Status.ContainerStatuses
|
||||
if podutil.IsRestartableInitContainer(container) {
|
||||
if len(pod.Status.InitContainerStatuses) != 0 {
|
||||
containerStatuses = append(containerStatuses, pod.Status.InitContainerStatuses...)
|
||||
}
|
||||
}
|
||||
if cs, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok {
|
||||
cpuQuantity = cs.AllocatedResources[v1.ResourceCPU]
|
||||
}
|
||||
}
|
||||
if cpuQuantity.Value()*1000 != cpuQuantity.MilliValue() {
|
||||
cpuValue := cpuQuantity.Value()
|
||||
if cpuValue*1000 != cpuQuantity.MilliValue() {
|
||||
klog.V(5).InfoS("Exclusive CPU allocation skipped, pod requested non-integral CPUs", "pod", klog.KObj(pod), "containerName", container.Name, "cpu", cpuValue)
|
||||
return 0
|
||||
}
|
||||
// Safe downcast to do for all systems with < 2.1 billion CPUs.
|
||||
@ -740,27 +757,60 @@ func (p *staticPolicy) getAlignedCPUs(numaAffinity bitmask.BitMask, allocatableC
|
||||
|
||||
func (p *staticPolicy) initializeMetrics(s state.State) {
|
||||
metrics.CPUManagerSharedPoolSizeMilliCores.Set(float64(p.GetAvailableCPUs(s).Size() * 1000))
|
||||
metrics.CPUManagerExclusiveCPUsAllocationCount.Set(float64(countExclusiveCPUs(s)))
|
||||
metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedPhysicalCPU).Add(0) // ensure the value exists
|
||||
metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedPhysicalCPU).Add(0) // ensure the value exists
|
||||
metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedUncoreCache).Add(0) // ensure the value exists
|
||||
totalAssignedCPUs := getTotalAssignedExclusiveCPUs(s)
|
||||
metrics.CPUManagerExclusiveCPUsAllocationCount.Set(float64(totalAssignedCPUs.Size()))
|
||||
updateAllocationPerNUMAMetric(p.topology, totalAssignedCPUs)
|
||||
}
|
||||
|
||||
func (p *staticPolicy) updateMetricsOnAllocate(cset cpuset.CPUSet) {
|
||||
ncpus := cset.Size()
|
||||
func (p *staticPolicy) updateMetricsOnAllocate(s state.State, cpuAlloc topology.Allocation) {
|
||||
ncpus := cpuAlloc.CPUs.Size()
|
||||
metrics.CPUManagerExclusiveCPUsAllocationCount.Add(float64(ncpus))
|
||||
metrics.CPUManagerSharedPoolSizeMilliCores.Add(float64(-ncpus * 1000))
|
||||
if cpuAlloc.Aligned.UncoreCache {
|
||||
metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedUncoreCache).Inc()
|
||||
}
|
||||
totalAssignedCPUs := getTotalAssignedExclusiveCPUs(s)
|
||||
updateAllocationPerNUMAMetric(p.topology, totalAssignedCPUs)
|
||||
}
|
||||
|
||||
func (p *staticPolicy) updateMetricsOnRelease(cset cpuset.CPUSet) {
|
||||
func (p *staticPolicy) updateMetricsOnRelease(s state.State, cset cpuset.CPUSet) {
|
||||
ncpus := cset.Size()
|
||||
metrics.CPUManagerExclusiveCPUsAllocationCount.Add(float64(-ncpus))
|
||||
metrics.CPUManagerSharedPoolSizeMilliCores.Add(float64(ncpus * 1000))
|
||||
totalAssignedCPUs := getTotalAssignedExclusiveCPUs(s)
|
||||
updateAllocationPerNUMAMetric(p.topology, totalAssignedCPUs.Difference(cset))
|
||||
}
|
||||
|
||||
func countExclusiveCPUs(s state.State) int {
|
||||
exclusiveCPUs := 0
|
||||
for _, cpuAssign := range s.GetCPUAssignments() {
|
||||
for _, cset := range cpuAssign {
|
||||
exclusiveCPUs += cset.Size()
|
||||
func getTotalAssignedExclusiveCPUs(s state.State) cpuset.CPUSet {
|
||||
totalAssignedCPUs := cpuset.New()
|
||||
for _, assignment := range s.GetCPUAssignments() {
|
||||
for _, cset := range assignment {
|
||||
totalAssignedCPUs = totalAssignedCPUs.Union(cset)
|
||||
}
|
||||
|
||||
}
|
||||
return totalAssignedCPUs
|
||||
}
|
||||
|
||||
func updateAllocationPerNUMAMetric(topo *topology.CPUTopology, allocatedCPUs cpuset.CPUSet) {
|
||||
numaCount := make(map[int]int)
|
||||
|
||||
// Count CPUs allocated per NUMA node
|
||||
for _, cpuID := range allocatedCPUs.UnsortedList() {
|
||||
numaNode, err := topo.CPUNUMANodeID(cpuID)
|
||||
if err != nil {
|
||||
//NOTE: We are logging the error but it is highly unlikely to happen as the CPUset
|
||||
// is already computed, evaluated and there is no room for user tampering.
|
||||
klog.ErrorS(err, "Unable to determine NUMA node", "cpuID", cpuID)
|
||||
}
|
||||
numaCount[numaNode]++
|
||||
}
|
||||
|
||||
// Update metric
|
||||
for numaNode, count := range numaCount {
|
||||
metrics.CPUManagerAllocationPerNUMA.WithLabelValues(strconv.Itoa(numaNode)).Set(float64(count))
|
||||
}
|
||||
return exclusiveCPUs
|
||||
}
|
||||
|
10
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go
generated
vendored
10
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state/state_checkpoint.go
generated
vendored
@ -201,7 +201,7 @@ func (sc *stateCheckpoint) SetCPUSet(podUID string, containerName string, cset c
|
||||
sc.cache.SetCPUSet(podUID, containerName, cset)
|
||||
err := sc.storeState()
|
||||
if err != nil {
|
||||
klog.InfoS("Store state to checkpoint error", "err", err)
|
||||
klog.ErrorS(err, "Failed to store state to checkpoint", "podUID", podUID, "containerName", containerName)
|
||||
}
|
||||
}
|
||||
|
||||
@ -212,7 +212,7 @@ func (sc *stateCheckpoint) SetDefaultCPUSet(cset cpuset.CPUSet) {
|
||||
sc.cache.SetDefaultCPUSet(cset)
|
||||
err := sc.storeState()
|
||||
if err != nil {
|
||||
klog.InfoS("Store state to checkpoint error", "err", err)
|
||||
klog.ErrorS(err, "Failed to store state to checkpoint")
|
||||
}
|
||||
}
|
||||
|
||||
@ -223,7 +223,7 @@ func (sc *stateCheckpoint) SetCPUAssignments(a ContainerCPUAssignments) {
|
||||
sc.cache.SetCPUAssignments(a)
|
||||
err := sc.storeState()
|
||||
if err != nil {
|
||||
klog.InfoS("Store state to checkpoint error", "err", err)
|
||||
klog.ErrorS(err, "Failed to store state to checkpoint")
|
||||
}
|
||||
}
|
||||
|
||||
@ -234,7 +234,7 @@ func (sc *stateCheckpoint) Delete(podUID string, containerName string) {
|
||||
sc.cache.Delete(podUID, containerName)
|
||||
err := sc.storeState()
|
||||
if err != nil {
|
||||
klog.InfoS("Store state to checkpoint error", "err", err)
|
||||
klog.ErrorS(err, "Failed to store state to checkpoint", "podUID", podUID, "containerName", containerName)
|
||||
}
|
||||
}
|
||||
|
||||
@ -245,6 +245,6 @@ func (sc *stateCheckpoint) ClearState() {
|
||||
sc.cache.ClearState()
|
||||
err := sc.storeState()
|
||||
if err != nil {
|
||||
klog.InfoS("Store state to checkpoint error", "err", err)
|
||||
klog.ErrorS(err, "Failed to store state to checkpoint")
|
||||
}
|
||||
}
|
||||
|
78
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/alignment.go
generated
vendored
Normal file
78
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/alignment.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package topology
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/utils/cpuset"
|
||||
)
|
||||
|
||||
// Alignment is metadata about a cpuset allocation
|
||||
type Alignment struct {
|
||||
// UncoreCache is true if all the CPUs are uncore-cache aligned,
|
||||
// IOW if they all share the same Uncore cache block.
|
||||
// If the allocated CPU count is greater than a Uncore Group size,
|
||||
// CPUs can't be uncore-aligned; otherwise, they are.
|
||||
// This flag tracks alignment, not interference or lack thereof.
|
||||
UncoreCache bool
|
||||
}
|
||||
|
||||
func (ca Alignment) String() string {
|
||||
return fmt.Sprintf("aligned=<uncore:%v>", ca.UncoreCache)
|
||||
}
|
||||
|
||||
// Allocation represents a CPU set plus alignment metadata
|
||||
type Allocation struct {
|
||||
CPUs cpuset.CPUSet
|
||||
Aligned Alignment
|
||||
}
|
||||
|
||||
func (ca Allocation) String() string {
|
||||
return ca.CPUs.String() + " " + ca.Aligned.String()
|
||||
}
|
||||
|
||||
// EmptyAllocation returns a new zero-valued CPU allocation. Please note that
|
||||
// a empty cpuset is aligned according to every possible way we can consider
|
||||
func EmptyAllocation() Allocation {
|
||||
return Allocation{
|
||||
CPUs: cpuset.New(),
|
||||
Aligned: Alignment{
|
||||
UncoreCache: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func isAlignedAtUncoreCache(topo *CPUTopology, cpuList ...int) bool {
|
||||
if len(cpuList) <= 1 {
|
||||
return true
|
||||
}
|
||||
reference, ok := topo.CPUDetails[cpuList[0]]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
for _, cpu := range cpuList[1:] {
|
||||
info, ok := topo.CPUDetails[cpu]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if info.UncoreCacheID != reference.UncoreCacheID {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/doc.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/doc.go
generated
vendored
@ -15,4 +15,4 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
// Package topology contains helpers for the CPU manager.
|
||||
package topology // import "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
|
||||
package topology
|
||||
|
9
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go
generated
vendored
9
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go
generated
vendored
@ -101,6 +101,15 @@ func (topo *CPUTopology) CPUNUMANodeID(cpu int) (int, error) {
|
||||
return info.NUMANodeID, nil
|
||||
}
|
||||
|
||||
// CheckAlignment returns alignment information for the given cpuset in
|
||||
// the context of the current CPU topology
|
||||
func (topo *CPUTopology) CheckAlignment(cpus cpuset.CPUSet) Alignment {
|
||||
cpuList := cpus.UnsortedList()
|
||||
return Alignment{
|
||||
UncoreCache: isAlignedAtUncoreCache(topo, cpuList...),
|
||||
}
|
||||
}
|
||||
|
||||
// CPUInfo contains the NUMA, socket, UncoreCache and core IDs associated with a CPU.
|
||||
type CPUInfo struct {
|
||||
NUMANodeID int
|
||||
|
44
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go
generated
vendored
44
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/manager.go
generated
vendored
@ -202,15 +202,12 @@ func (m *ManagerImpl) CleanupPluginDirectory(dir string) error {
|
||||
if filePath == m.checkpointFile() {
|
||||
continue
|
||||
}
|
||||
// TODO: Until the bug - https://github.com/golang/go/issues/33357 is fixed, os.stat wouldn't return the
|
||||
// right mode(socket) on windows. Hence deleting the file, without checking whether
|
||||
// its a socket, on windows.
|
||||
stat, err := os.Lstat(filePath)
|
||||
stat, err := os.Stat(filePath)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to stat file", "path", filePath)
|
||||
continue
|
||||
}
|
||||
if stat.IsDir() {
|
||||
if stat.IsDir() || stat.Mode()&os.ModeSocket == 0 {
|
||||
continue
|
||||
}
|
||||
err = os.RemoveAll(filePath)
|
||||
@ -351,7 +348,7 @@ func (m *ManagerImpl) Start(activePods ActivePodsFunc, sourcesReady config.Sourc
|
||||
// Loads in allocatedDevices information from disk.
|
||||
err := m.readCheckpoint()
|
||||
if err != nil {
|
||||
klog.InfoS("Continue after failing to read checkpoint file. Device allocation info may NOT be up-to-date", "err", err)
|
||||
klog.ErrorS(err, "Continue after failing to read checkpoint file. Device allocation info may NOT be up-to-date")
|
||||
}
|
||||
|
||||
return m.server.Start()
|
||||
@ -453,7 +450,7 @@ func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string)
|
||||
// should always be consistent. Otherwise, we run with the risk
|
||||
// of failing to garbage collect non-existing resources or devices.
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Unexpected: healthyDevices and endpoints are out of sync")
|
||||
klog.InfoS("Unexpected: healthyDevices and endpoints are out of sync")
|
||||
}
|
||||
delete(m.endpoints, resourceName)
|
||||
delete(m.healthyDevices, resourceName)
|
||||
@ -468,7 +465,7 @@ func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string)
|
||||
eI, ok := m.endpoints[resourceName]
|
||||
if (ok && eI.e.stopGracePeriodExpired()) || !ok {
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Unexpected: unhealthyDevices and endpoints are out of sync")
|
||||
klog.InfoS("Unexpected: unhealthyDevices and endpoints became out of sync")
|
||||
}
|
||||
delete(m.endpoints, resourceName)
|
||||
delete(m.unhealthyDevices, resourceName)
|
||||
@ -484,7 +481,7 @@ func (m *ManagerImpl) GetCapacity() (v1.ResourceList, v1.ResourceList, []string)
|
||||
m.mutex.Unlock()
|
||||
if needsUpdateCheckpoint {
|
||||
if err := m.writeCheckpoint(); err != nil {
|
||||
klog.ErrorS(err, "Error on writing checkpoint")
|
||||
klog.ErrorS(err, "Failed to write checkpoint file")
|
||||
}
|
||||
}
|
||||
return capacity, allocatable, deletedResources.UnsortedList()
|
||||
@ -503,9 +500,10 @@ func (m *ManagerImpl) writeCheckpoint() error {
|
||||
err := m.checkpointManager.CreateCheckpoint(kubeletDeviceManagerCheckpoint, data)
|
||||
if err != nil {
|
||||
err2 := fmt.Errorf("failed to write checkpoint file %q: %v", kubeletDeviceManagerCheckpoint, err)
|
||||
klog.InfoS("Failed to write checkpoint file", "err", err)
|
||||
klog.ErrorS(err, "Failed to write checkpoint file")
|
||||
return err2
|
||||
}
|
||||
klog.V(4).InfoS("Checkpoint file written", "checkpoint", kubeletDeviceManagerCheckpoint)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -516,7 +514,7 @@ func (m *ManagerImpl) readCheckpoint() error {
|
||||
if err != nil {
|
||||
if err == errors.ErrCheckpointNotFound {
|
||||
// no point in trying anything else
|
||||
klog.InfoS("Failed to read data from checkpoint", "checkpoint", kubeletDeviceManagerCheckpoint, "err", err)
|
||||
klog.ErrorS(err, "Failed to read data from checkpoint", "checkpoint", kubeletDeviceManagerCheckpoint)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
@ -534,6 +532,8 @@ func (m *ManagerImpl) readCheckpoint() error {
|
||||
m.unhealthyDevices[resource] = sets.New[string]()
|
||||
m.endpoints[resource] = endpointInfo{e: newStoppedEndpointImpl(resource), opts: nil}
|
||||
}
|
||||
|
||||
klog.V(4).InfoS("Read data from checkpoint file", "checkpoint", kubeletDeviceManagerCheckpoint)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -596,7 +596,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
|
||||
// running, then it can only be a kubelet restart. On node reboot the runtime and the containers were also shut down. Then, if the container was running, it can only be
|
||||
// because it already has access to all the required devices, so we got nothing to do and we can bail out.
|
||||
if !m.sourcesReady.AllReady() && m.isContainerAlreadyRunning(podUID, contName) {
|
||||
klog.V(3).InfoS("container detected running, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName)
|
||||
klog.V(3).InfoS("Container detected running, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -627,7 +627,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
|
||||
// We handled the known error paths in scenario 3 (node reboot), so from now on we can fall back in a common path.
|
||||
// We cover container restart on kubelet steady state with the same flow.
|
||||
if needed == 0 {
|
||||
klog.V(3).InfoS("no devices needed, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName)
|
||||
klog.V(3).InfoS("No devices needed, nothing to do", "deviceNumber", needed, "resourceName", resource, "podUID", podUID, "containerName", contName)
|
||||
// No change, no work.
|
||||
return nil, nil
|
||||
}
|
||||
@ -836,7 +836,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont
|
||||
for k, v := range container.Resources.Limits {
|
||||
resource := string(k)
|
||||
needed := int(v.Value())
|
||||
klog.V(3).InfoS("Looking for needed resources", "needed", needed, "resourceName", resource)
|
||||
klog.V(3).InfoS("Looking for needed resources", "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name, "needed", needed)
|
||||
if !m.isDevicePluginResource(resource) {
|
||||
continue
|
||||
}
|
||||
@ -882,7 +882,7 @@ func (m *ManagerImpl) allocateContainerResources(pod *v1.Pod, container *v1.Cont
|
||||
devs := allocDevices.UnsortedList()
|
||||
// TODO: refactor this part of code to just append a ContainerAllocationRequest
|
||||
// in a passed in AllocateRequest pointer, and issues a single Allocate call per pod.
|
||||
klog.V(3).InfoS("Making allocation request for device plugin", "devices", devs, "resourceName", resource)
|
||||
klog.V(4).InfoS("Making allocation request for device plugin", "devices", devs, "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
resp, err := eI.e.allocate(devs)
|
||||
metrics.DevicePluginAllocationDuration.WithLabelValues(resource).Observe(metrics.SinceInSeconds(startRPCTime))
|
||||
if err != nil {
|
||||
@ -952,7 +952,7 @@ func (m *ManagerImpl) GetDeviceRunContainerOptions(pod *v1.Pod, container *v1.Co
|
||||
}
|
||||
|
||||
if !m.checkPodActive(pod) {
|
||||
klog.ErrorS(nil, "pod deleted from activePods, skip to reAllocate", "podUID", podUID)
|
||||
klog.V(5).InfoS("Pod deleted from activePods, skip to reAllocate", "pod", klog.KObj(pod), "podUID", podUID, "containerName", container.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -984,7 +984,7 @@ func (m *ManagerImpl) callPreStartContainerIfNeeded(podUID, contName, resource s
|
||||
|
||||
if eI.opts == nil || !eI.opts.PreStartRequired {
|
||||
m.mutex.Unlock()
|
||||
klog.V(4).InfoS("Plugin options indicate to skip PreStartContainer for resource", "resourceName", resource)
|
||||
klog.V(5).InfoS("Plugin options indicate to skip PreStartContainer for resource", "podUID", podUID, "resourceName", resource, "containerName", contName)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1014,12 +1014,12 @@ func (m *ManagerImpl) callGetPreferredAllocationIfAvailable(podUID, contName, re
|
||||
}
|
||||
|
||||
if eI.opts == nil || !eI.opts.GetPreferredAllocationAvailable {
|
||||
klog.V(4).InfoS("Plugin options indicate to skip GetPreferredAllocation for resource", "resourceName", resource)
|
||||
klog.V(5).InfoS("Plugin options indicate to skip GetPreferredAllocation for resource", "resourceName", resource, "podUID", podUID, "containerName", contName)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
m.mutex.Unlock()
|
||||
klog.V(4).InfoS("Issuing a GetPreferredAllocation call for container", "containerName", contName, "podUID", podUID)
|
||||
klog.V(4).InfoS("Issuing a GetPreferredAllocation call for container", "resourceName", resource, "containerName", contName, "podUID", podUID)
|
||||
resp, err := eI.e.getPreferredAllocation(available.UnsortedList(), mustInclude.UnsortedList(), size)
|
||||
m.mutex.Lock()
|
||||
if err != nil {
|
||||
@ -1167,7 +1167,7 @@ func (m *ManagerImpl) ShouldResetExtendedResourceCapacity() bool {
|
||||
func (m *ManagerImpl) isContainerAlreadyRunning(podUID, cntName string) bool {
|
||||
cntID, err := m.containerMap.GetContainerID(podUID, cntName)
|
||||
if err != nil {
|
||||
klog.V(4).InfoS("container not found in the initial map, assumed NOT running", "podUID", podUID, "containerName", cntName, "err", err)
|
||||
klog.ErrorS(err, "Container not found in the initial map, assumed NOT running", "podUID", podUID, "containerName", cntName)
|
||||
return false
|
||||
}
|
||||
|
||||
@ -1175,11 +1175,11 @@ func (m *ManagerImpl) isContainerAlreadyRunning(podUID, cntName string) bool {
|
||||
// so on kubelet restart containers will again fail admission, hitting https://github.com/kubernetes/kubernetes/issues/118559 again.
|
||||
// This scenario should however be rare enough.
|
||||
if !m.containerRunningSet.Has(cntID) {
|
||||
klog.V(4).InfoS("container not present in the initial running set", "podUID", podUID, "containerName", cntName, "containerID", cntID)
|
||||
klog.V(4).InfoS("Container not present in the initial running set", "podUID", podUID, "containerName", cntName, "containerID", cntID)
|
||||
return false
|
||||
}
|
||||
|
||||
// Once we make it here we know we have a running container.
|
||||
klog.V(4).InfoS("container found in the initial set, assumed running", "podUID", podUID, "containerName", cntName, "containerID", cntID)
|
||||
klog.V(4).InfoS("Container found in the initial set, assumed running", "podUID", podUID, "containerName", cntName, "containerID", cntID)
|
||||
return true
|
||||
}
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/client.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/client.go
generated
vendored
@ -106,6 +106,8 @@ func (c *client) Disconnect() error {
|
||||
}
|
||||
c.mutex.Unlock()
|
||||
c.handler.PluginDisconnected(c.resource)
|
||||
|
||||
klog.V(2).InfoS("Device plugin disconnected", "resource", c.resource)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
9
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/handler.go
generated
vendored
9
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/handler.go
generated
vendored
@ -43,8 +43,8 @@ func (s *server) RegisterPlugin(pluginName string, endpoint string, versions []s
|
||||
return s.connectClient(pluginName, endpoint)
|
||||
}
|
||||
|
||||
func (s *server) DeRegisterPlugin(pluginName string) {
|
||||
klog.V(2).InfoS("Deregistering plugin", "plugin", pluginName)
|
||||
func (s *server) DeRegisterPlugin(pluginName, endpoint string) {
|
||||
klog.V(2).InfoS("Deregistering plugin", "plugin", pluginName, "endpoint", endpoint)
|
||||
client := s.getClient(pluginName)
|
||||
if client != nil {
|
||||
s.disconnectClient(pluginName, client)
|
||||
@ -62,6 +62,7 @@ func (s *server) ValidatePlugin(pluginName string, endpoint string, versions []s
|
||||
return fmt.Errorf("invalid name of device plugin socket: %s", fmt.Sprintf(errInvalidResourceName, pluginName))
|
||||
}
|
||||
|
||||
klog.V(2).InfoS("Device plugin validated", "plugin", pluginName, "endpoint", endpoint, "versions", versions)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -75,6 +76,7 @@ func (s *server) connectClient(name string, socketPath string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
klog.V(2).InfoS("Connected to new client", "resource", name)
|
||||
go func() {
|
||||
s.runClient(name, c)
|
||||
}()
|
||||
@ -86,7 +88,6 @@ func (s *server) disconnectClient(name string, c Client) error {
|
||||
s.deregisterClient(name)
|
||||
return c.Disconnect()
|
||||
}
|
||||
|
||||
func (s *server) registerClient(name string, c Client) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
@ -112,7 +113,7 @@ func (s *server) runClient(name string, c Client) {
|
||||
}
|
||||
|
||||
if err := s.disconnectClient(name, c); err != nil {
|
||||
klog.V(2).InfoS("Unable to disconnect client", "resource", name, "client", c, "err", err)
|
||||
klog.ErrorS(err, "Unable to disconnect client", "resource", name, "client", c)
|
||||
}
|
||||
}
|
||||
|
||||
|
11
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/server.go
generated
vendored
11
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/server.go
generated
vendored
@ -91,7 +91,7 @@ func (s *server) Start() error {
|
||||
|
||||
if selinux.GetEnabled() {
|
||||
if err := selinux.SetFileLabel(s.socketDir, config.KubeletPluginsDirSELinuxLabel); err != nil {
|
||||
klog.InfoS("Unprivileged containerized plugins might not work. Could not set selinux context on socket dir", "path", s.socketDir, "err", err)
|
||||
klog.ErrorS(err, "Unprivileged containerized plugins might not work. Could not set selinux context on socket dir", "path", s.socketDir)
|
||||
}
|
||||
}
|
||||
|
||||
@ -128,7 +128,7 @@ func (s *server) Start() error {
|
||||
func (s *server) Stop() error {
|
||||
s.visitClients(func(r string, c Client) {
|
||||
if err := s.disconnectClient(r, c); err != nil {
|
||||
klog.InfoS("Error disconnecting device plugin client", "resourceName", r, "err", err)
|
||||
klog.ErrorS(err, "Failed to disconnect device plugin client", "resourceName", r)
|
||||
}
|
||||
})
|
||||
|
||||
@ -145,6 +145,7 @@ func (s *server) Stop() error {
|
||||
// During kubelet termination, we do not need the registration server,
|
||||
// and we consider the kubelet to be healthy even when it is down.
|
||||
s.setHealthy()
|
||||
klog.V(2).InfoS("Stopping device plugin registration server")
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -159,18 +160,18 @@ func (s *server) Register(ctx context.Context, r *api.RegisterRequest) (*api.Emp
|
||||
|
||||
if !s.isVersionCompatibleWithPlugin(r.Version) {
|
||||
err := fmt.Errorf(errUnsupportedVersion, r.Version, api.SupportedVersions)
|
||||
klog.InfoS("Bad registration request from device plugin with resource", "resourceName", r.ResourceName, "err", err)
|
||||
klog.ErrorS(err, "Bad registration request from device plugin with resource", "resourceName", r.ResourceName)
|
||||
return &api.Empty{}, err
|
||||
}
|
||||
|
||||
if !v1helper.IsExtendedResourceName(core.ResourceName(r.ResourceName)) {
|
||||
err := fmt.Errorf(errInvalidResourceName, r.ResourceName)
|
||||
klog.InfoS("Bad registration request from device plugin", "err", err)
|
||||
klog.ErrorS(err, "Bad registration request from device plugin")
|
||||
return &api.Empty{}, err
|
||||
}
|
||||
|
||||
if err := s.connectClient(r.ResourceName, filepath.Join(s.socketDir, r.Endpoint)); err != nil {
|
||||
klog.InfoS("Error connecting to device plugin client", "err", err)
|
||||
klog.ErrorS(err, "Error connecting to device plugin client")
|
||||
return &api.Empty{}, err
|
||||
}
|
||||
|
||||
|
6
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/pod_devices.go
generated
vendored
6
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/pod_devices.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package devicemanager
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"sync"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
@ -429,10 +430,7 @@ func NewResourceDeviceInstances() ResourceDeviceInstances {
|
||||
func (rdev ResourceDeviceInstances) Clone() ResourceDeviceInstances {
|
||||
clone := NewResourceDeviceInstances()
|
||||
for resourceName, resourceDevs := range rdev {
|
||||
clone[resourceName] = make(map[string]pluginapi.Device)
|
||||
for devID, dev := range resourceDevs {
|
||||
clone[resourceName][devID] = dev
|
||||
}
|
||||
clone[resourceName] = maps.Clone(resourceDevs)
|
||||
}
|
||||
return clone
|
||||
}
|
||||
|
16
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/topology_hints.go
generated
vendored
16
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/topology_hints.go
generated
vendored
@ -43,7 +43,7 @@ func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map
|
||||
for resource, requested := range accumulatedResourceRequests {
|
||||
// Only consider devices that actually contain topology information.
|
||||
if aligned := m.deviceHasTopologyAlignment(resource); !aligned {
|
||||
klog.InfoS("Resource does not have a topology preference", "resource", resource)
|
||||
klog.InfoS("Resource does not have a topology preference", "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name, "request", requested)
|
||||
deviceHints[resource] = nil
|
||||
continue
|
||||
}
|
||||
@ -54,11 +54,11 @@ func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map
|
||||
allocated := m.podDevices.containerDevices(string(pod.UID), container.Name, resource)
|
||||
if allocated.Len() > 0 {
|
||||
if allocated.Len() != requested {
|
||||
klog.ErrorS(nil, "Resource already allocated to pod with different number than request", "resource", resource, "pod", klog.KObj(pod), "containerName", container.Name, "request", requested, "allocated", allocated.Len())
|
||||
klog.InfoS("Resource already allocated to pod with different number than request", "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name, "request", requested, "allocated", allocated.Len())
|
||||
deviceHints[resource] = []topologymanager.TopologyHint{}
|
||||
continue
|
||||
}
|
||||
klog.InfoS("Regenerating TopologyHints for resource already allocated to pod", "resource", resource, "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
klog.InfoS("Regenerating TopologyHints for resource already allocated to pod", "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.Set[string]{}, requested)
|
||||
continue
|
||||
}
|
||||
@ -67,7 +67,7 @@ func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map
|
||||
available := m.getAvailableDevices(resource)
|
||||
reusable := m.devicesToReuse[string(pod.UID)][resource]
|
||||
if available.Union(reusable).Len() < requested {
|
||||
klog.ErrorS(nil, "Unable to generate topology hints: requested number of devices unavailable", "resource", resource, "request", requested, "available", available.Union(reusable).Len())
|
||||
klog.InfoS("Unable to generate topology hints: requested number of devices unavailable", "resourceName", resource, "pod", klog.KObj(pod), "containerName", container.Name, "request", requested, "available", available.Union(reusable).Len())
|
||||
deviceHints[resource] = []topologymanager.TopologyHint{}
|
||||
continue
|
||||
}
|
||||
@ -94,7 +94,7 @@ func (m *ManagerImpl) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymana
|
||||
for resource, requested := range accumulatedResourceRequests {
|
||||
// Only consider devices that actually contain topology information.
|
||||
if aligned := m.deviceHasTopologyAlignment(resource); !aligned {
|
||||
klog.InfoS("Resource does not have a topology preference", "resource", resource)
|
||||
klog.InfoS("Resource does not have a topology preference", "resourceName", resource, "pod", klog.KObj(pod), "request", requested)
|
||||
deviceHints[resource] = nil
|
||||
continue
|
||||
}
|
||||
@ -105,11 +105,11 @@ func (m *ManagerImpl) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymana
|
||||
allocated := m.podDevices.podDevices(string(pod.UID), resource)
|
||||
if allocated.Len() > 0 {
|
||||
if allocated.Len() != requested {
|
||||
klog.ErrorS(nil, "Resource already allocated to pod with different number than request", "resource", resource, "pod", klog.KObj(pod), "request", requested, "allocated", allocated.Len())
|
||||
klog.InfoS("Resource already allocated to pod with different number than request", "resourceName", resource, "pod", klog.KObj(pod), "request", requested, "allocated", allocated.Len())
|
||||
deviceHints[resource] = []topologymanager.TopologyHint{}
|
||||
continue
|
||||
}
|
||||
klog.InfoS("Regenerating TopologyHints for resource already allocated to pod", "resource", resource, "pod", klog.KObj(pod))
|
||||
klog.InfoS("Regenerating TopologyHints for resource already allocated to pod", "resourceName", resource, "pod", klog.KObj(pod), "allocated", allocated.Len())
|
||||
deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.Set[string]{}, requested)
|
||||
continue
|
||||
}
|
||||
@ -117,7 +117,7 @@ func (m *ManagerImpl) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymana
|
||||
// Get the list of available devices, for which TopologyHints should be generated.
|
||||
available := m.getAvailableDevices(resource)
|
||||
if available.Len() < requested {
|
||||
klog.ErrorS(nil, "Unable to generate topology hints: requested number of devices unavailable", "resource", resource, "request", requested, "available", available.Len())
|
||||
klog.InfoS("Unable to generate topology hints: requested number of devices unavailable", "resourceName", resource, "pod", klog.KObj(pod), "request", requested, "available", available.Len())
|
||||
deviceHints[resource] = []topologymanager.TopologyHint{}
|
||||
continue
|
||||
}
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/doc.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/doc.go
generated
vendored
@ -18,4 +18,4 @@ limitations under the License.
|
||||
// to manage containers. For example, they contain functions to configure containers' cgroups,
|
||||
// ensure containers run with the desired QoS, and allocate compute resources like cpus, memory,
|
||||
// devices...
|
||||
package cm // import "k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
package cm
|
||||
|
15
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/manager.go
generated
vendored
15
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/manager.go
generated
vendored
@ -98,7 +98,20 @@ func NewManagerImpl(kubeClient clientset.Interface, stateFileDirectory string, n
|
||||
}
|
||||
|
||||
func (m *ManagerImpl) GetWatcherHandler() cache.PluginHandler {
|
||||
return cache.PluginHandler(dra.NewRegistrationHandler(m.kubeClient, m.getNode))
|
||||
// The time that DRA drivers have to come back after being unregistered
|
||||
// before the kubelet removes their ResourceSlices.
|
||||
//
|
||||
// This must be long enough to actually allow stopping a pod and
|
||||
// starting the replacement (otherwise ResourceSlices get deleted
|
||||
// unnecessarily) and not too long (otherwise the time window were
|
||||
// pods might still get scheduled to the node after removal of a
|
||||
// driver is too long).
|
||||
//
|
||||
// 30 seconds might be long enough for a simple container restart.
|
||||
// If a DRA driver wants to be sure that slices don't get wiped,
|
||||
// it should use rolling updates.
|
||||
wipingDelay := 30 * time.Second
|
||||
return cache.PluginHandler(dra.NewRegistrationHandler(m.kubeClient, m.getNode, wipingDelay))
|
||||
}
|
||||
|
||||
// Start starts the reconcile loop of the manager.
|
||||
|
59
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin/plugins_store.go
generated
vendored
59
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin/plugins_store.go
generated
vendored
@ -18,13 +18,16 @@ package plugin
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// PluginsStore holds a list of DRA Plugins.
|
||||
type pluginsStore struct {
|
||||
sync.RWMutex
|
||||
store map[string]*Plugin
|
||||
// plugin name -> Plugin in the order in which they got added
|
||||
store map[string][]*Plugin
|
||||
}
|
||||
|
||||
// draPlugins map keeps track of all registered DRA plugins on the node
|
||||
@ -37,43 +40,57 @@ func (s *pluginsStore) get(pluginName string) *Plugin {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
return s.store[pluginName]
|
||||
instances := s.store[pluginName]
|
||||
if len(instances) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Heuristic: pick the most recent one. It's most likely
|
||||
// the newest, except when kubelet got restarted and registered
|
||||
// all running plugins in random order.
|
||||
return instances[len(instances)-1]
|
||||
}
|
||||
|
||||
// Set lets you save a DRA Plugin to the list and give it a specific name.
|
||||
// This method is protected by a mutex.
|
||||
func (s *pluginsStore) add(p *Plugin) (replacedPlugin *Plugin, replaced bool) {
|
||||
func (s *pluginsStore) add(p *Plugin) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.store == nil {
|
||||
s.store = make(map[string]*Plugin)
|
||||
s.store = make(map[string][]*Plugin)
|
||||
}
|
||||
|
||||
replacedPlugin, exists := s.store[p.name]
|
||||
s.store[p.name] = p
|
||||
|
||||
if replacedPlugin != nil && replacedPlugin.cancel != nil {
|
||||
replacedPlugin.cancel(errors.New("plugin got replaced"))
|
||||
for _, oldP := range s.store[p.name] {
|
||||
if oldP.endpoint == p.endpoint {
|
||||
// One plugin instance cannot hijack the endpoint of another instance.
|
||||
return fmt.Errorf("endpoint %s already registered for plugin %s", p.endpoint, p.name)
|
||||
}
|
||||
}
|
||||
|
||||
return replacedPlugin, exists
|
||||
s.store[p.name] = append(s.store[p.name], p)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete lets you delete a DRA Plugin by name.
|
||||
// This method is protected by a mutex.
|
||||
func (s *pluginsStore) delete(pluginName string) *Plugin {
|
||||
// remove lets you remove one endpoint for a DRA Plugin.
|
||||
// This method is protected by a mutex. It returns the
|
||||
// plugin if found and true if that was the last instance
|
||||
func (s *pluginsStore) remove(pluginName, endpoint string) (*Plugin, bool) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
p, exists := s.store[pluginName]
|
||||
if !exists {
|
||||
return nil
|
||||
instances := s.store[pluginName]
|
||||
i := slices.IndexFunc(instances, func(p *Plugin) bool { return p.endpoint == endpoint })
|
||||
if i == -1 {
|
||||
return nil, false
|
||||
}
|
||||
p := instances[i]
|
||||
last := len(instances) == 1
|
||||
if last {
|
||||
delete(s.store, pluginName)
|
||||
} else {
|
||||
s.store[pluginName] = slices.Delete(instances, i, i+1)
|
||||
}
|
||||
|
||||
if p.cancel != nil {
|
||||
p.cancel(errors.New("plugin got removed"))
|
||||
}
|
||||
delete(s.store, pluginName)
|
||||
|
||||
return p
|
||||
return p, last
|
||||
}
|
||||
|
124
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin/registration.go
generated
vendored
124
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin/registration.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@ -51,8 +52,22 @@ type RegistrationHandler struct {
|
||||
// This is necessary because it implements APIs which don't
|
||||
// provide a context.
|
||||
backgroundCtx context.Context
|
||||
cancel func(err error)
|
||||
kubeClient kubernetes.Interface
|
||||
getNode func() (*v1.Node, error)
|
||||
wipingDelay time.Duration
|
||||
|
||||
wg sync.WaitGroup
|
||||
mutex sync.Mutex
|
||||
|
||||
// pendingWipes maps a plugin name to a cancel function for
|
||||
// wiping of that plugin's ResourceSlices. Entries get added
|
||||
// in DeRegisterPlugin and check in RegisterPlugin. If
|
||||
// wiping is pending during RegisterPlugin, it gets canceled.
|
||||
//
|
||||
// Must use pointers to functions because the entries have to
|
||||
// be comparable.
|
||||
pendingWipes map[string]*context.CancelCauseFunc
|
||||
}
|
||||
|
||||
var _ cache.PluginHandler = &RegistrationHandler{}
|
||||
@ -62,12 +77,20 @@ var _ cache.PluginHandler = &RegistrationHandler{}
|
||||
// Must only be called once per process because it manages global state.
|
||||
// If a kubeClient is provided, then it synchronizes ResourceSlices
|
||||
// with the resource information provided by plugins.
|
||||
func NewRegistrationHandler(kubeClient kubernetes.Interface, getNode func() (*v1.Node, error)) *RegistrationHandler {
|
||||
func NewRegistrationHandler(kubeClient kubernetes.Interface, getNode func() (*v1.Node, error), wipingDelay time.Duration) *RegistrationHandler {
|
||||
// The context and thus logger should come from the caller.
|
||||
return newRegistrationHandler(context.TODO(), kubeClient, getNode, wipingDelay)
|
||||
}
|
||||
|
||||
func newRegistrationHandler(ctx context.Context, kubeClient kubernetes.Interface, getNode func() (*v1.Node, error), wipingDelay time.Duration) *RegistrationHandler {
|
||||
ctx, cancel := context.WithCancelCause(ctx)
|
||||
handler := &RegistrationHandler{
|
||||
// The context and thus logger should come from the caller.
|
||||
backgroundCtx: klog.NewContext(context.TODO(), klog.LoggerWithName(klog.TODO(), "DRA registration handler")),
|
||||
backgroundCtx: klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "DRA registration handler")),
|
||||
cancel: cancel,
|
||||
kubeClient: kubeClient,
|
||||
getNode: getNode,
|
||||
wipingDelay: wipingDelay,
|
||||
pendingWipes: make(map[string]*context.CancelCauseFunc),
|
||||
}
|
||||
|
||||
// When kubelet starts up, no DRA driver has registered yet. None of
|
||||
@ -77,19 +100,45 @@ func NewRegistrationHandler(kubeClient kubernetes.Interface, getNode func() (*v1
|
||||
// to start up.
|
||||
//
|
||||
// This has to run in the background.
|
||||
go handler.wipeResourceSlices("")
|
||||
handler.wg.Add(1)
|
||||
go func() {
|
||||
defer handler.wg.Done()
|
||||
|
||||
logger := klog.LoggerWithName(klog.FromContext(handler.backgroundCtx), "startup")
|
||||
ctx := klog.NewContext(handler.backgroundCtx, logger)
|
||||
handler.wipeResourceSlices(ctx, 0 /* no delay */, "" /* all drivers */)
|
||||
}()
|
||||
|
||||
return handler
|
||||
}
|
||||
|
||||
// Stop cancels any remaining background activities and blocks until all goroutines have stopped.
|
||||
func (h *RegistrationHandler) Stop() {
|
||||
h.cancel(errors.New("Stop was called"))
|
||||
h.wg.Wait()
|
||||
}
|
||||
|
||||
// wipeResourceSlices deletes ResourceSlices of the node, optionally just for a specific driver.
|
||||
func (h *RegistrationHandler) wipeResourceSlices(driver string) {
|
||||
// Wiping will delay for a while and can be canceled by canceling the context.
|
||||
func (h *RegistrationHandler) wipeResourceSlices(ctx context.Context, delay time.Duration, driver string) {
|
||||
if h.kubeClient == nil {
|
||||
return
|
||||
}
|
||||
ctx := h.backgroundCtx
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
if delay != 0 {
|
||||
// Before we start deleting, give the driver time to bounce back.
|
||||
// Perhaps it got removed as part of a DaemonSet update and the
|
||||
// replacement pod is about to start.
|
||||
logger.V(4).Info("Starting to wait before wiping ResourceSlices", "delay", delay)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.V(4).Info("Aborting wiping of ResourceSlices", "reason", context.Cause(ctx))
|
||||
case <-time.After(delay):
|
||||
logger.V(4).Info("Starting to wipe ResourceSlices after waiting", "delay", delay)
|
||||
}
|
||||
}
|
||||
|
||||
backoff := wait.Backoff{
|
||||
Duration: time.Second,
|
||||
Factor: 2,
|
||||
@ -148,10 +197,10 @@ func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string,
|
||||
// into all log output related to the plugin.
|
||||
ctx := h.backgroundCtx
|
||||
logger := klog.FromContext(ctx)
|
||||
logger = klog.LoggerWithValues(logger, "pluginName", pluginName)
|
||||
logger = klog.LoggerWithValues(logger, "pluginName", pluginName, "endpoint", endpoint)
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
|
||||
logger.V(3).Info("Register new DRA plugin", "endpoint", endpoint)
|
||||
logger.V(3).Info("Register new DRA plugin")
|
||||
|
||||
chosenService, err := h.validateSupportedServices(pluginName, supportedServices)
|
||||
if err != nil {
|
||||
@ -179,9 +228,19 @@ func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string,
|
||||
|
||||
// Storing endpoint of newly registered DRA Plugin into the map, where plugin name will be the key
|
||||
// all other DRA components will be able to get the actual socket of DRA plugins by its name.
|
||||
if err := draPlugins.add(pluginInstance); err != nil {
|
||||
cancel(err)
|
||||
// No wrapping, the error already contains details.
|
||||
return err
|
||||
}
|
||||
|
||||
if oldPlugin, replaced := draPlugins.add(pluginInstance); replaced {
|
||||
logger.V(1).Info("DRA plugin already registered, the old plugin was replaced and will be forgotten by the kubelet till the next kubelet restart", "oldEndpoint", oldPlugin.endpoint)
|
||||
// Now cancel any pending ResourceSlice wiping for this plugin.
|
||||
// Only needs to be done once.
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
if cancel := h.pendingWipes[pluginName]; cancel != nil {
|
||||
(*cancel)(errors.New("new plugin instance registered"))
|
||||
delete(h.pendingWipes, pluginName)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -220,16 +279,53 @@ func (h *RegistrationHandler) validateSupportedServices(pluginName string, suppo
|
||||
|
||||
// DeRegisterPlugin is called when a plugin has removed its socket,
|
||||
// signaling it is no longer available.
|
||||
func (h *RegistrationHandler) DeRegisterPlugin(pluginName string) {
|
||||
if p := draPlugins.delete(pluginName); p != nil {
|
||||
func (h *RegistrationHandler) DeRegisterPlugin(pluginName, endpoint string) {
|
||||
if p, last := draPlugins.remove(pluginName, endpoint); p != nil {
|
||||
// This logger includes endpoint and pluginName.
|
||||
logger := klog.FromContext(p.backgroundCtx)
|
||||
logger.V(3).Info("Deregister DRA plugin", "endpoint", p.endpoint)
|
||||
logger.V(3).Info("Deregister DRA plugin", "lastInstance", last)
|
||||
if !last {
|
||||
return
|
||||
}
|
||||
|
||||
// Prepare for canceling the background wiping. This needs to run
|
||||
// in the context of the registration handler, the one from
|
||||
// the plugin is canceled.
|
||||
logger = klog.FromContext(h.backgroundCtx)
|
||||
logger = klog.LoggerWithName(logger, "driver-cleanup")
|
||||
logger = klog.LoggerWithValues(logger, "pluginName", pluginName)
|
||||
ctx, cancel := context.WithCancelCause(h.backgroundCtx)
|
||||
ctx = klog.NewContext(ctx, logger)
|
||||
|
||||
// Clean up the ResourceSlices for the deleted Plugin since it
|
||||
// may have died without doing so itself and might never come
|
||||
// back.
|
||||
go h.wipeResourceSlices(pluginName)
|
||||
//
|
||||
// May get canceled if the plugin comes back quickly enough
|
||||
// (see RegisterPlugin).
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
if cancel := h.pendingWipes[pluginName]; cancel != nil {
|
||||
(*cancel)(errors.New("plugin deregistered a second time"))
|
||||
}
|
||||
h.pendingWipes[pluginName] = &cancel
|
||||
|
||||
h.wg.Add(1)
|
||||
go func() {
|
||||
defer h.wg.Done()
|
||||
defer func() {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
// Cancel our own context, but remove it from the map only if it
|
||||
// is the current entry. Perhaps it already got replaced.
|
||||
cancel(errors.New("wiping done"))
|
||||
if h.pendingWipes[pluginName] == &cancel {
|
||||
delete(h.pendingWipes, pluginName)
|
||||
}
|
||||
}()
|
||||
h.wipeResourceSlices(ctx, h.wipingDelay, pluginName)
|
||||
}()
|
||||
return
|
||||
}
|
||||
|
||||
|
8
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/fake_container_manager.go
generated
vendored
8
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/fake_container_manager.go
generated
vendored
@ -268,3 +268,11 @@ func (cm *FakeContainerManager) UpdateAllocatedResourcesStatus(pod *v1.Pod, stat
|
||||
func (cm *FakeContainerManager) Updates() <-chan resourceupdates.Update {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *FakeContainerManager) PodHasExclusiveCPUs(pod *v1.Pod) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (cm *FakeContainerManager) ContainerHasExclusiveCPUs(pod *v1.Pod, container *v1.Container) bool {
|
||||
return false
|
||||
}
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
libcontainercgroups "github.com/opencontainers/cgroups"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
|
11
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/memory_manager.go
generated
vendored
11
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/memory_manager.go
generated
vendored
@ -205,6 +205,7 @@ func (m *manager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesRe
|
||||
|
||||
m.allocatableMemory = m.policy.GetAllocatableMemory(m.state)
|
||||
|
||||
klog.V(4).InfoS("memorymanager started", "policy", m.policy.Name())
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -248,7 +249,7 @@ func (m *manager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.
|
||||
}
|
||||
|
||||
if numaNodes.Len() == 0 {
|
||||
klog.V(5).InfoS("No allocation is available", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
klog.V(5).InfoS("NUMA nodes not available for allocation", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -266,7 +267,7 @@ func (m *manager) Allocate(pod *v1.Pod, container *v1.Container) error {
|
||||
|
||||
// Call down into the policy to assign this container memory if required.
|
||||
if err := m.policy.Allocate(m.state, pod, container); err != nil {
|
||||
klog.ErrorS(err, "Allocate error")
|
||||
klog.ErrorS(err, "Allocate error", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -280,7 +281,7 @@ func (m *manager) RemoveContainer(containerID string) error {
|
||||
// if error appears it means container entry already does not exist under the container map
|
||||
podUID, containerName, err := m.containerMap.GetContainerRef(containerID)
|
||||
if err != nil {
|
||||
klog.InfoS("Failed to get container from container map", "containerID", containerID, "err", err)
|
||||
klog.ErrorS(err, "Failed to get container from container map", "containerID", containerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -344,7 +345,7 @@ func (m *manager) removeStaleState() {
|
||||
for podUID := range assignments {
|
||||
for containerName := range assignments[podUID] {
|
||||
if _, ok := activeContainers[podUID][containerName]; !ok {
|
||||
klog.InfoS("RemoveStaleState removing state", "podUID", podUID, "containerName", containerName)
|
||||
klog.V(2).InfoS("RemoveStaleState removing state", "podUID", podUID, "containerName", containerName)
|
||||
m.policyRemoveContainerByRef(podUID, containerName)
|
||||
}
|
||||
}
|
||||
@ -352,7 +353,7 @@ func (m *manager) removeStaleState() {
|
||||
|
||||
m.containerMap.Visit(func(podUID, containerName, containerID string) {
|
||||
if _, ok := activeContainers[podUID][containerName]; !ok {
|
||||
klog.InfoS("RemoveStaleState removing state", "podUID", podUID, "containerName", containerName)
|
||||
klog.V(2).InfoS("RemoveStaleState removing state", "podUID", podUID, "containerName", containerName)
|
||||
m.policyRemoveContainerByRef(podUID, containerName)
|
||||
}
|
||||
})
|
||||
|
43
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/policy_static.go
generated
vendored
43
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/policy_static.go
generated
vendored
@ -96,7 +96,9 @@ func (p *staticPolicy) Start(s state.State) error {
|
||||
// Allocate call is idempotent
|
||||
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) (rerr error) {
|
||||
// allocate the memory only for guaranteed pods
|
||||
if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed {
|
||||
qos := v1qos.GetPodQOS(pod)
|
||||
if qos != v1.PodQOSGuaranteed {
|
||||
klog.V(5).InfoS("Exclusive memory allocation skipped, pod QoS is not guaranteed", "pod", klog.KObj(pod), "containerName", container.Name, "qos", qos)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -196,6 +198,7 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
|
||||
// TODO: we should refactor our state structs to reflect the amount of the re-used memory
|
||||
p.updateInitContainersMemoryBlocks(s, pod, container, containerBlocks)
|
||||
|
||||
klog.V(4).InfoS("Allocated exclusive memory", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -304,24 +307,24 @@ func regenerateHints(pod *v1.Pod, ctn *v1.Container, ctnBlocks []state.Block, re
|
||||
}
|
||||
|
||||
if len(ctnBlocks) != len(reqRsrc) {
|
||||
klog.ErrorS(nil, "The number of requested resources by the container differs from the number of memory blocks", "containerName", ctn.Name)
|
||||
klog.InfoS("The number of requested resources by the container differs from the number of memory blocks", "pod", klog.KObj(pod), "containerName", ctn.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, b := range ctnBlocks {
|
||||
if _, ok := reqRsrc[b.Type]; !ok {
|
||||
klog.ErrorS(nil, "Container requested resources do not have resource of this type", "containerName", ctn.Name, "type", b.Type)
|
||||
klog.InfoS("Container requested resources but none available of this type", "pod", klog.KObj(pod), "containerName", ctn.Name, "type", b.Type)
|
||||
return nil
|
||||
}
|
||||
|
||||
if b.Size != reqRsrc[b.Type] {
|
||||
klog.ErrorS(nil, "Memory already allocated with different numbers than requested", "podUID", pod.UID, "type", b.Type, "containerName", ctn.Name, "requestedResource", reqRsrc[b.Type], "allocatedSize", b.Size)
|
||||
klog.InfoS("Memory already allocated with different numbers than requested", "pod", klog.KObj(pod), "containerName", ctn.Name, "type", b.Type, "requestedResource", reqRsrc[b.Type], "allocatedSize", b.Size)
|
||||
return nil
|
||||
}
|
||||
|
||||
containerNUMAAffinity, err := bitmask.NewBitMask(b.NUMAAffinity...)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to generate NUMA bitmask")
|
||||
klog.ErrorS(err, "Failed to generate NUMA bitmask", "pod", klog.KObj(pod), "containerName", ctn.Name, "type", b.Type)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -447,7 +450,13 @@ func getRequestedResources(pod *v1.Pod, container *v1.Container) (map[v1.Resourc
|
||||
// We should return this value because this is what kubelet agreed to allocate for the container
|
||||
// and the value configured with runtime.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
|
||||
if cs, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, container.Name); ok {
|
||||
containerStatuses := pod.Status.ContainerStatuses
|
||||
if podutil.IsRestartableInitContainer(container) {
|
||||
if len(pod.Status.InitContainerStatuses) != 0 {
|
||||
containerStatuses = append(containerStatuses, pod.Status.InitContainerStatuses...)
|
||||
}
|
||||
}
|
||||
if cs, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok {
|
||||
resources = cs.AllocatedResources
|
||||
}
|
||||
}
|
||||
@ -654,36 +663,36 @@ func (p *staticPolicy) validateState(s state.State) error {
|
||||
|
||||
func areMachineStatesEqual(ms1, ms2 state.NUMANodeMap) bool {
|
||||
if len(ms1) != len(ms2) {
|
||||
klog.ErrorS(nil, "Node states are different", "lengthNode1", len(ms1), "lengthNode2", len(ms2))
|
||||
klog.InfoS("Node states were different", "lengthNode1", len(ms1), "lengthNode2", len(ms2))
|
||||
return false
|
||||
}
|
||||
|
||||
for nodeID, nodeState1 := range ms1 {
|
||||
nodeState2, ok := ms2[nodeID]
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Node state does not have node ID", "nodeID", nodeID)
|
||||
klog.InfoS("Node state didn't have node ID", "nodeID", nodeID)
|
||||
return false
|
||||
}
|
||||
|
||||
if nodeState1.NumberOfAssignments != nodeState2.NumberOfAssignments {
|
||||
klog.ErrorS(nil, "Node states number of assignments are different", "assignment1", nodeState1.NumberOfAssignments, "assignment2", nodeState2.NumberOfAssignments)
|
||||
klog.InfoS("Node state had a different number of memory assignments.", "assignment1", nodeState1.NumberOfAssignments, "assignment2", nodeState2.NumberOfAssignments)
|
||||
return false
|
||||
}
|
||||
|
||||
if !areGroupsEqual(nodeState1.Cells, nodeState2.Cells) {
|
||||
klog.ErrorS(nil, "Node states groups are different", "stateNode1", nodeState1.Cells, "stateNode2", nodeState2.Cells)
|
||||
klog.InfoS("Node states had different groups", "stateNode1", nodeState1.Cells, "stateNode2", nodeState2.Cells)
|
||||
return false
|
||||
}
|
||||
|
||||
if len(nodeState1.MemoryMap) != len(nodeState2.MemoryMap) {
|
||||
klog.ErrorS(nil, "Node states memory map have different lengths", "lengthNode1", len(nodeState1.MemoryMap), "lengthNode2", len(nodeState2.MemoryMap))
|
||||
klog.InfoS("Node state had memory maps of different lengths", "lengthNode1", len(nodeState1.MemoryMap), "lengthNode2", len(nodeState2.MemoryMap))
|
||||
return false
|
||||
}
|
||||
|
||||
for resourceName, memoryState1 := range nodeState1.MemoryMap {
|
||||
memoryState2, ok := nodeState2.MemoryMap[resourceName]
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Memory state does not have resource", "resource", resourceName)
|
||||
klog.InfoS("Memory state didn't have resource", "resource", resourceName)
|
||||
return false
|
||||
}
|
||||
|
||||
@ -701,11 +710,11 @@ func areMachineStatesEqual(ms1, ms2 state.NUMANodeMap) bool {
|
||||
}
|
||||
|
||||
if tmpState1.Free != tmpState2.Free {
|
||||
klog.InfoS("Memory states for the NUMA node and resource are different", "node", nodeID, "resource", resourceName, "field", "free", "free1", tmpState1.Free, "free2", tmpState2.Free, "memoryState1", *memoryState1, "memoryState2", *memoryState2)
|
||||
klog.InfoS("NUMA node and resource had different memory states", "node", nodeID, "resource", resourceName, "field", "free", "free1", tmpState1.Free, "free2", tmpState2.Free, "memoryState1", *memoryState1, "memoryState2", *memoryState2)
|
||||
return false
|
||||
}
|
||||
if tmpState1.Reserved != tmpState2.Reserved {
|
||||
klog.InfoS("Memory states for the NUMA node and resource are different", "node", nodeID, "resource", resourceName, "field", "reserved", "reserved1", tmpState1.Reserved, "reserved2", tmpState2.Reserved, "memoryState1", *memoryState1, "memoryState2", *memoryState2)
|
||||
klog.InfoS("NUMA node and resource had different memory states", "node", nodeID, "resource", resourceName, "field", "reserved", "reserved1", tmpState1.Reserved, "reserved2", tmpState2.Reserved, "memoryState1", *memoryState1, "memoryState2", *memoryState2)
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -715,17 +724,17 @@ func areMachineStatesEqual(ms1, ms2 state.NUMANodeMap) bool {
|
||||
|
||||
func areMemoryStatesEqual(memoryState1, memoryState2 *state.MemoryTable, nodeID int, resourceName v1.ResourceName) bool {
|
||||
if memoryState1.TotalMemSize != memoryState2.TotalMemSize {
|
||||
klog.ErrorS(nil, "Memory states for the NUMA node and resource are different", "node", nodeID, "resource", resourceName, "field", "TotalMemSize", "TotalMemSize1", memoryState1.TotalMemSize, "TotalMemSize2", memoryState2.TotalMemSize, "memoryState1", *memoryState1, "memoryState2", *memoryState2)
|
||||
klog.InfoS("Memory states for the NUMA node and resource are different", "node", nodeID, "resource", resourceName, "field", "TotalMemSize", "TotalMemSize1", memoryState1.TotalMemSize, "TotalMemSize2", memoryState2.TotalMemSize, "memoryState1", *memoryState1, "memoryState2", *memoryState2)
|
||||
return false
|
||||
}
|
||||
|
||||
if memoryState1.SystemReserved != memoryState2.SystemReserved {
|
||||
klog.ErrorS(nil, "Memory states for the NUMA node and resource are different", "node", nodeID, "resource", resourceName, "field", "SystemReserved", "SystemReserved1", memoryState1.SystemReserved, "SystemReserved2", memoryState2.SystemReserved, "memoryState1", *memoryState1, "memoryState2", *memoryState2)
|
||||
klog.InfoS("Memory states for the NUMA node and resource are different", "node", nodeID, "resource", resourceName, "field", "SystemReserved", "SystemReserved1", memoryState1.SystemReserved, "SystemReserved2", memoryState2.SystemReserved, "memoryState1", *memoryState1, "memoryState2", *memoryState2)
|
||||
return false
|
||||
}
|
||||
|
||||
if memoryState1.Allocatable != memoryState2.Allocatable {
|
||||
klog.ErrorS(nil, "Memory states for the NUMA node and resource are different", "node", nodeID, "resource", resourceName, "field", "Allocatable", "Allocatable1", memoryState1.Allocatable, "Allocatable2", memoryState2.Allocatable, "memoryState1", *memoryState1, "memoryState2", *memoryState2)
|
||||
klog.InfoS("Memory states for the NUMA node and resource are different", "node", nodeID, "resource", resourceName, "field", "Allocatable", "Allocatable1", memoryState1.Allocatable, "Allocatable2", memoryState2.Allocatable, "memoryState1", *memoryState1, "memoryState2", *memoryState2)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
10
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/state_checkpoint.go
generated
vendored
10
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/state_checkpoint.go
generated
vendored
@ -131,7 +131,7 @@ func (sc *stateCheckpoint) SetMachineState(memoryMap NUMANodeMap) {
|
||||
sc.cache.SetMachineState(memoryMap)
|
||||
err := sc.storeState()
|
||||
if err != nil {
|
||||
klog.InfoS("Store state to checkpoint error", "err", err)
|
||||
klog.ErrorS(err, "Failed to store state to checkpoint")
|
||||
}
|
||||
}
|
||||
|
||||
@ -143,7 +143,7 @@ func (sc *stateCheckpoint) SetMemoryBlocks(podUID string, containerName string,
|
||||
sc.cache.SetMemoryBlocks(podUID, containerName, blocks)
|
||||
err := sc.storeState()
|
||||
if err != nil {
|
||||
klog.InfoS("Store state to checkpoint error", "err", err)
|
||||
klog.ErrorS(err, "Failed to store state to checkpoint", "podUID", podUID, "containerName", containerName)
|
||||
}
|
||||
}
|
||||
|
||||
@ -155,7 +155,7 @@ func (sc *stateCheckpoint) SetMemoryAssignments(assignments ContainerMemoryAssig
|
||||
sc.cache.SetMemoryAssignments(assignments)
|
||||
err := sc.storeState()
|
||||
if err != nil {
|
||||
klog.InfoS("Store state to checkpoint error", "err", err)
|
||||
klog.ErrorS(err, "Failed to store state to checkpoint")
|
||||
}
|
||||
}
|
||||
|
||||
@ -167,7 +167,7 @@ func (sc *stateCheckpoint) Delete(podUID string, containerName string) {
|
||||
sc.cache.Delete(podUID, containerName)
|
||||
err := sc.storeState()
|
||||
if err != nil {
|
||||
klog.InfoS("Store state to checkpoint error", "err", err)
|
||||
klog.ErrorS(err, "Failed to store state to checkpoint", "podUID", podUID, "containerName", containerName)
|
||||
}
|
||||
}
|
||||
|
||||
@ -179,6 +179,6 @@ func (sc *stateCheckpoint) ClearState() {
|
||||
sc.cache.ClearState()
|
||||
err := sc.storeState()
|
||||
if err != nil {
|
||||
klog.InfoS("Store state to checkpoint error", "err", err)
|
||||
klog.ErrorS(err, "Failed to store state to checkpoint")
|
||||
}
|
||||
}
|
||||
|
1
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/state_mem.go
generated
vendored
1
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state/state_mem.go
generated
vendored
@ -94,6 +94,7 @@ func (s *stateMemory) SetMemoryAssignments(assignments ContainerMemoryAssignment
|
||||
defer s.Unlock()
|
||||
|
||||
s.assignments = assignments.Clone()
|
||||
klog.V(5).InfoS("Updated Memory assignments", "assignments", assignments)
|
||||
}
|
||||
|
||||
// Delete deletes corresponding Blocks from ContainerMemoryAssignments
|
||||
|
11
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go
generated
vendored
11
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/pod_container_manager_linux.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
libcontainercgroups "github.com/opencontainers/cgroups"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
@ -55,6 +55,8 @@ type podContainerManagerImpl struct {
|
||||
// cpuCFSQuotaPeriod is the cfs period value, cfs_period_us, setting per
|
||||
// node for all containers in usec
|
||||
cpuCFSQuotaPeriod uint64
|
||||
// podContainerManager is the ContainerManager running on the machine
|
||||
podContainerManager ContainerManager
|
||||
}
|
||||
|
||||
// Make sure that podContainerManagerImpl implements the PodContainerManager interface
|
||||
@ -73,6 +75,11 @@ func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error {
|
||||
// check if container already exist
|
||||
alreadyExists := m.Exists(pod)
|
||||
if !alreadyExists {
|
||||
enforceCPULimits := m.enforceCPULimits
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DisableCPUQuotaWithExclusiveCPUs) && m.podContainerManager.PodHasExclusiveCPUs(pod) {
|
||||
klog.V(2).InfoS("Disabled CFS quota", "pod", klog.KObj(pod))
|
||||
enforceCPULimits = false
|
||||
}
|
||||
enforceMemoryQoS := false
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryQoS) &&
|
||||
libcontainercgroups.IsCgroup2UnifiedMode() {
|
||||
@ -82,7 +89,7 @@ func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error {
|
||||
podContainerName, _ := m.GetPodContainerName(pod)
|
||||
containerConfig := &CgroupConfig{
|
||||
Name: podContainerName,
|
||||
ResourceParameters: ResourceConfigForPod(pod, m.enforceCPULimits, m.cpuCFSQuotaPeriod, enforceMemoryQoS),
|
||||
ResourceParameters: ResourceConfigForPod(pod, enforceCPULimits, m.cpuCFSQuotaPeriod, enforceMemoryQoS),
|
||||
}
|
||||
if m.podPidsLimit > 0 {
|
||||
containerConfig.ResourceParameters.PidsLimit = &m.podPidsLimit
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/qos_container_manager_linux.go
generated
vendored
@ -29,7 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
units "github.com/docker/go-units"
|
||||
libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
libcontainercgroups "github.com/opencontainers/cgroups"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
|
||||
"k8s.io/component-helpers/resource"
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/fake_topology_manager.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/fake_topology_manager.go
generated
vendored
@ -45,7 +45,7 @@ func NewFakeManagerWithHint(hint *TopologyHint) Manager {
|
||||
|
||||
// NewFakeManagerWithPolicy returns an instance of fake topology manager with specified policy
|
||||
func NewFakeManagerWithPolicy(policy Policy) Manager {
|
||||
klog.InfoS("NewFakeManagerWithPolicy")
|
||||
klog.InfoS("NewFakeManagerWithPolicy", "policy", policy.Name())
|
||||
return &fakeManager{
|
||||
policy: policy,
|
||||
}
|
||||
|
4
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_options.go
generated
vendored
4
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/policy_options.go
generated
vendored
@ -47,11 +47,11 @@ func CheckPolicyOptionAvailable(option string) error {
|
||||
}
|
||||
|
||||
if alphaOptions.Has(option) && !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.TopologyManagerPolicyAlphaOptions) {
|
||||
return fmt.Errorf("Topology Manager Policy Alpha-level Options not enabled, but option %q provided", option)
|
||||
return fmt.Errorf("topology manager policy alpha-level options not enabled, but option %q provided", option)
|
||||
}
|
||||
|
||||
if betaOptions.Has(option) && !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.TopologyManagerPolicyBetaOptions) {
|
||||
return fmt.Errorf("Topology Manager Policy Beta-level Options not enabled, but option %q provided", option)
|
||||
return fmt.Errorf("topology manager policy beta-level options not enabled, but option %q provided", option)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
6
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_container.go
generated
vendored
6
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_container.go
generated
vendored
@ -50,6 +50,9 @@ func (s *containerScope) Admit(pod *v1.Pod) lifecycle.PodAdmitResult {
|
||||
klog.InfoS("Best TopologyHint", "bestHint", bestHint, "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
|
||||
if !admit {
|
||||
if IsAlignmentGuaranteed(s.policy) {
|
||||
metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedNUMANode).Inc()
|
||||
}
|
||||
metrics.TopologyManagerAdmissionErrorsTotal.Inc()
|
||||
return admission.GetPodAdmitResult(&TopologyAffinityError{})
|
||||
}
|
||||
@ -63,6 +66,7 @@ func (s *containerScope) Admit(pod *v1.Pod) lifecycle.PodAdmitResult {
|
||||
}
|
||||
|
||||
if IsAlignmentGuaranteed(s.policy) {
|
||||
klog.V(4).InfoS("Resource alignment at container scope guaranteed", "pod", klog.KObj(pod))
|
||||
metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedNUMANode).Inc()
|
||||
}
|
||||
}
|
||||
@ -84,6 +88,6 @@ func (s *containerScope) accumulateProvidersHints(pod *v1.Pod, container *v1.Con
|
||||
func (s *containerScope) calculateAffinity(pod *v1.Pod, container *v1.Container) (TopologyHint, bool) {
|
||||
providersHints := s.accumulateProvidersHints(pod, container)
|
||||
bestHint, admit := s.policy.Merge(providersHints)
|
||||
klog.InfoS("ContainerTopologyHint", "bestHint", bestHint)
|
||||
klog.InfoS("ContainerTopologyHint", "bestHint", bestHint, "pod", klog.KObj(pod), "containerName", container.Name)
|
||||
return bestHint, admit
|
||||
}
|
||||
|
7
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_pod.go
generated
vendored
7
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/scope_pod.go
generated
vendored
@ -48,6 +48,10 @@ func (s *podScope) Admit(pod *v1.Pod) lifecycle.PodAdmitResult {
|
||||
bestHint, admit := s.calculateAffinity(pod)
|
||||
klog.InfoS("Best TopologyHint", "bestHint", bestHint, "pod", klog.KObj(pod))
|
||||
if !admit {
|
||||
if IsAlignmentGuaranteed(s.policy) {
|
||||
// increment only if we know we allocate aligned resources.
|
||||
metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopePod, metrics.AlignedNUMANode).Inc()
|
||||
}
|
||||
metrics.TopologyManagerAdmissionErrorsTotal.Inc()
|
||||
return admission.GetPodAdmitResult(&TopologyAffinityError{})
|
||||
}
|
||||
@ -64,6 +68,7 @@ func (s *podScope) Admit(pod *v1.Pod) lifecycle.PodAdmitResult {
|
||||
}
|
||||
if IsAlignmentGuaranteed(s.policy) {
|
||||
// increment only if we know we allocate aligned resources.
|
||||
klog.V(4).InfoS("Resource alignment at pod scope guaranteed", "pod", klog.KObj(pod))
|
||||
metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopePod, metrics.AlignedNUMANode).Inc()
|
||||
}
|
||||
return admission.GetPodAdmitResult(nil)
|
||||
@ -84,6 +89,6 @@ func (s *podScope) accumulateProvidersHints(pod *v1.Pod) []map[string][]Topology
|
||||
func (s *podScope) calculateAffinity(pod *v1.Pod) (TopologyHint, bool) {
|
||||
providersHints := s.accumulateProvidersHints(pod)
|
||||
bestHint, admit := s.policy.Merge(providersHints)
|
||||
klog.InfoS("PodTopologyHint", "bestHint", bestHint)
|
||||
klog.InfoS("PodTopologyHint", "bestHint", bestHint, "pod", klog.KObj(pod))
|
||||
return bestHint, admit
|
||||
}
|
||||
|
12
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go
generated
vendored
12
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go
generated
vendored
@ -188,9 +188,19 @@ func NewManager(topology []cadvisorapi.Node, topologyPolicyName string, topology
|
||||
scope: scope,
|
||||
}
|
||||
|
||||
manager.initializeMetrics()
|
||||
|
||||
return manager, nil
|
||||
}
|
||||
|
||||
func (m *manager) initializeMetrics() {
|
||||
// ensure the values exist
|
||||
metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedNUMANode).Add(0)
|
||||
metrics.ContainerAlignedComputeResources.WithLabelValues(metrics.AlignScopePod, metrics.AlignedNUMANode).Add(0)
|
||||
metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopeContainer, metrics.AlignedNUMANode).Add(0)
|
||||
metrics.ContainerAlignedComputeResourcesFailure.WithLabelValues(metrics.AlignScopePod, metrics.AlignedNUMANode).Add(0)
|
||||
}
|
||||
|
||||
func (m *manager) GetAffinity(podUID string, containerName string) TopologyHint {
|
||||
return m.scope.GetAffinity(podUID, containerName)
|
||||
}
|
||||
@ -212,11 +222,13 @@ func (m *manager) RemoveContainer(containerID string) error {
|
||||
}
|
||||
|
||||
func (m *manager) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
|
||||
klog.V(4).InfoS("Topology manager admission check", "pod", klog.KObj(attrs.Pod))
|
||||
metrics.TopologyManagerAdmissionRequestsTotal.Inc()
|
||||
|
||||
startTime := time.Now()
|
||||
podAdmitResult := m.scope.Admit(attrs.Pod)
|
||||
metrics.TopologyManagerAdmissionDuration.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
|
||||
klog.V(4).InfoS("Pod Admit Result", "Message", podAdmitResult.Message, "pod", klog.KObj(attrs.Pod))
|
||||
return podAdmitResult
|
||||
}
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/cgroups_linux.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/cm/util/cgroups_linux.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
|
||||
libcontainerutils "k8s.io/kubernetes/third_party/forked/libcontainer/utils"
|
||||
|
||||
libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
libcontainercgroups "github.com/opencontainers/cgroups"
|
||||
)
|
||||
|
||||
const (
|
||||
|
1
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/config.go
generated
vendored
1
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/config.go
generated
vendored
@ -456,6 +456,7 @@ func checkAndUpdatePod(existing, ref *v1.Pod) (needUpdate, needReconcile, needGr
|
||||
existing.Labels = ref.Labels
|
||||
existing.DeletionTimestamp = ref.DeletionTimestamp
|
||||
existing.DeletionGracePeriodSeconds = ref.DeletionGracePeriodSeconds
|
||||
existing.Generation = ref.Generation
|
||||
existing.Status = ref.Status
|
||||
updateAnnotations(existing, ref)
|
||||
|
||||
|
1
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go
generated
vendored
1
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/defaults.go
generated
vendored
@ -30,4 +30,5 @@ const (
|
||||
KubeletPluginsDirSELinuxLabel = "system_u:object_r:container_file_t:s0"
|
||||
KubeletContainersSharedSELinuxLabel = "system_u:object_r:container_file_t:s0"
|
||||
DefaultKubeletCheckpointsDirName = "checkpoints"
|
||||
DefaultKubeletUserNamespacesIDsPerPod = 65536
|
||||
)
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/doc.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/config/doc.go
generated
vendored
@ -15,4 +15,4 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
// Package config implements the pod configuration readers.
|
||||
package config // import "k8s.io/kubernetes/pkg/kubelet/config"
|
||||
package config
|
||||
|
25
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go
generated
vendored
25
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go
generated
vendored
@ -396,6 +396,8 @@ func MakePortMappings(container *v1.Container) (ports []PortMapping) {
|
||||
|
||||
// HasAnyRegularContainerStarted returns true if any regular container has
|
||||
// started, which indicates all init containers have been initialized.
|
||||
// Deprecated: This function is not accurate when its pod sandbox is recreated.
|
||||
// Use HasAnyActiveRegularContainerStarted instead.
|
||||
func HasAnyRegularContainerStarted(spec *v1.PodSpec, statuses []v1.ContainerStatus) bool {
|
||||
if len(statuses) == 0 {
|
||||
return false
|
||||
@ -417,3 +419,26 @@ func HasAnyRegularContainerStarted(spec *v1.PodSpec, statuses []v1.ContainerStat
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// HasAnyActiveRegularContainerStarted returns true if any regular container of
|
||||
// the current pod sandbox has started, which indicates all init containers
|
||||
// have been initialized.
|
||||
func HasAnyActiveRegularContainerStarted(spec *v1.PodSpec, podStatus *PodStatus) bool {
|
||||
if podStatus == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
containerNames := sets.New[string]()
|
||||
for _, c := range spec.Containers {
|
||||
containerNames.Insert(c.Name)
|
||||
}
|
||||
|
||||
for _, status := range podStatus.ActiveContainerStatuses {
|
||||
if !containerNames.Has(status.Name) {
|
||||
continue
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
19
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go
generated
vendored
19
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go
generated
vendored
@ -33,6 +33,8 @@ import (
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/credentialprovider"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
@ -137,6 +139,9 @@ type Runtime interface {
|
||||
ListPodSandboxMetrics(ctx context.Context) ([]*runtimeapi.PodSandboxMetrics, error)
|
||||
// GetContainerStatus returns the status for the container.
|
||||
GetContainerStatus(ctx context.Context, id ContainerID) (*Status, error)
|
||||
// GetContainerSwapBehavior reports whether a container could be swappable.
|
||||
// This is used to decide whether to handle InPlacePodVerticalScaling for containers.
|
||||
GetContainerSwapBehavior(pod *v1.Pod, container *v1.Container) kubelettypes.SwapBehavior
|
||||
}
|
||||
|
||||
// StreamingRuntime is the interface implemented by runtimes that handle the serving of the
|
||||
@ -151,8 +156,11 @@ type StreamingRuntime interface {
|
||||
// ImageService interfaces allows to work with image service.
|
||||
type ImageService interface {
|
||||
// PullImage pulls an image from the network to local storage using the supplied
|
||||
// secrets if necessary. It returns a reference (digest or ID) to the pulled image.
|
||||
PullImage(ctx context.Context, image ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error)
|
||||
// secrets if necessary.
|
||||
// It returns a reference (digest or ID) to the pulled image and the credentials
|
||||
// that were used to pull the image. If the returned credentials are nil, the
|
||||
// pull was anonymous.
|
||||
PullImage(ctx context.Context, image ImageSpec, credentials []credentialprovider.TrackedAuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, *credentialprovider.TrackedAuthConfig, error)
|
||||
// GetImageRef gets the reference (digest or ID) of the image which has already been in
|
||||
// the local storage. It returns ("", nil) if the image isn't in the local storage.
|
||||
GetImageRef(ctx context.Context, image ImageSpec) (string, error)
|
||||
@ -317,6 +325,8 @@ type PodStatus struct {
|
||||
IPs []string
|
||||
// Status of containers in the pod.
|
||||
ContainerStatuses []*Status
|
||||
// Statuses of containers of the active sandbox in the pod.
|
||||
ActiveContainerStatuses []*Status
|
||||
// Status of the pod sandbox.
|
||||
// Only for kuberuntime now, other runtime may keep it nil.
|
||||
SandboxStatuses []*runtimeapi.PodSandboxStatus
|
||||
@ -378,6 +388,8 @@ type Status struct {
|
||||
User *ContainerUser
|
||||
// Mounts are the volume mounts of the container
|
||||
Mounts []Mount
|
||||
// StopSignal is used to show the container's effective stop signal in the Status
|
||||
StopSignal *v1.Signal
|
||||
}
|
||||
|
||||
// ContainerUser represents user identity information
|
||||
@ -472,6 +484,9 @@ type Mount struct {
|
||||
Propagation runtimeapi.MountPropagation
|
||||
// Image is set if an OCI volume as image ID or digest should get mounted (special case).
|
||||
Image *runtimeapi.ImageSpec
|
||||
// ImageSubPath is set if an image volume sub path should get mounted. This
|
||||
// field is only required if the above Image is set.
|
||||
ImageSubPath string
|
||||
}
|
||||
|
||||
// ImageVolumes is a map of image specs by volume name.
|
||||
|
4
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/sync_result.go
generated
vendored
4
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/container/sync_result.go
generated
vendored
@ -45,6 +45,8 @@ var (
|
||||
ErrConfigPodSandbox = errors.New("ConfigPodSandboxError")
|
||||
// ErrKillPodSandbox returned when runtime failed to stop pod's sandbox.
|
||||
ErrKillPodSandbox = errors.New("KillPodSandboxError")
|
||||
// ErrResizePodInPlace returned when runtime failed to resize a pod.
|
||||
ErrResizePodInPlace = errors.New("ResizePodInPlaceError")
|
||||
)
|
||||
|
||||
// SyncAction indicates different kind of actions in SyncPod() and KillPod(). Now there are only actions
|
||||
@ -68,6 +70,8 @@ const (
|
||||
ConfigPodSandbox SyncAction = "ConfigPodSandbox"
|
||||
// KillPodSandbox action
|
||||
KillPodSandbox SyncAction = "KillPodSandbox"
|
||||
// ResizePodInPlace action is included whenever any containers in the pod are resized without restart
|
||||
ResizePodInPlace SyncAction = "ResizePodInPlace"
|
||||
)
|
||||
|
||||
// SyncResult is the result of sync action.
|
||||
|
1
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/events/event.go
generated
vendored
1
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/events/event.go
generated
vendored
@ -61,6 +61,7 @@ const (
|
||||
VolumeResizeFailed = "VolumeResizeFailed"
|
||||
VolumeResizeSuccess = "VolumeResizeSuccessful"
|
||||
FileSystemResizeFailed = "FileSystemResizeFailed"
|
||||
VolumePermissionChangeInProgress = "VolumePermissionChangeInProgress"
|
||||
FileSystemResizeSuccess = "FileSystemResizeSuccessful"
|
||||
FailedMapVolume = "FailedMapVolume"
|
||||
WarnAlreadyMountedVolume = "AlreadyMountedVolume"
|
||||
|
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/doc.go
generated
vendored
2
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/doc.go
generated
vendored
@ -16,4 +16,4 @@ limitations under the License.
|
||||
|
||||
// Package lifecycle contains handlers for pod lifecycle events and interfaces
|
||||
// to integrate with kubelet admission, synchronization, and eviction of pods.
|
||||
package lifecycle // import "k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
package lifecycle
|
||||
|
72
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go
generated
vendored
72
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate.go
generated
vendored
@ -22,15 +22,16 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/component-helpers/scheduling/corev1"
|
||||
"k8s.io/klog/v2"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -52,6 +53,11 @@ const (
|
||||
// than Always for some of its init containers.
|
||||
InitContainerRestartPolicyForbidden = "InitContainerRestartPolicyForbidden"
|
||||
|
||||
// SupplementalGroupsPolicyNotSupported is used to denote that the pod was
|
||||
// rejected admission to the node because the node does not support
|
||||
// the pod's SupplementalGroupsPolicy.
|
||||
SupplementalGroupsPolicyNotSupported = "SupplementalGroupsPolicyNotSupported"
|
||||
|
||||
// UnexpectedAdmissionError is used to denote that the pod was rejected
|
||||
// admission to the node because of an error during admission that could not
|
||||
// be categorized.
|
||||
@ -135,25 +141,20 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult
|
||||
}
|
||||
}
|
||||
|
||||
if rejectPodAdmissionBasedOnSupplementalGroupsPolicy(admitPod, node) {
|
||||
message := fmt.Sprintf("SupplementalGroupsPolicy=%s is not supported in this node", v1.SupplementalGroupsPolicyStrict)
|
||||
klog.InfoS("Failed to admit pod", "pod", klog.KObj(admitPod), "message", message)
|
||||
return PodAdmitResult{
|
||||
Admit: false,
|
||||
Reason: SupplementalGroupsPolicyNotSupported,
|
||||
Message: message,
|
||||
}
|
||||
}
|
||||
|
||||
pods := attrs.OtherPods
|
||||
nodeInfo := schedulerframework.NewNodeInfo(pods...)
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
// TODO: Remove this after the SidecarContainers feature gate graduates to GA.
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
|
||||
for _, c := range admitPod.Spec.InitContainers {
|
||||
if podutil.IsRestartableInitContainer(&c) {
|
||||
message := fmt.Sprintf("Init container %q may not have a non-default restartPolicy", c.Name)
|
||||
klog.InfoS("Failed to admit pod", "pod", klog.KObj(admitPod), "message", message)
|
||||
return PodAdmitResult{
|
||||
Admit: false,
|
||||
Reason: InitContainerRestartPolicyForbidden,
|
||||
Message: message,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ensure the node has enough plugin resources for that required in pods
|
||||
if err = w.pluginResourceUpdateFunc(nodeInfo, attrs); err != nil {
|
||||
message := fmt.Sprintf("Update plugin resources failed due to %v, which is unexpected.", err)
|
||||
@ -272,6 +273,45 @@ func rejectPodAdmissionBasedOnOSField(pod *v1.Pod) bool {
|
||||
return string(pod.Spec.OS.Name) != runtime.GOOS
|
||||
}
|
||||
|
||||
// rejectPodAdmissionBasedOnSupplementalGroupsPolicy rejects pod only if
|
||||
// - the feature is beta or above, and SupplementalPolicy=Strict is set in the pod
|
||||
// - but, the node does not support the feature
|
||||
//
|
||||
// Note: During the feature is alpha or before(not yet released) in emulated version,
|
||||
// it should admit for backward compatibility
|
||||
func rejectPodAdmissionBasedOnSupplementalGroupsPolicy(pod *v1.Pod, node *v1.Node) bool {
|
||||
admit, reject := false, true // just for readability
|
||||
|
||||
inUse := (pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SupplementalGroupsPolicy != nil)
|
||||
if !inUse {
|
||||
return admit
|
||||
}
|
||||
|
||||
isBetaOrAbove := false
|
||||
if featureSpec, ok := utilfeature.DefaultMutableFeatureGate.GetAll()[features.SupplementalGroupsPolicy]; ok {
|
||||
isBetaOrAbove = (featureSpec.PreRelease == featuregate.Beta) || (featureSpec.PreRelease == featuregate.GA)
|
||||
}
|
||||
|
||||
if !isBetaOrAbove {
|
||||
return admit
|
||||
}
|
||||
|
||||
featureSupportedOnNode := ptr.Deref(
|
||||
ptr.Deref(node.Status.Features, v1.NodeFeatures{SupplementalGroupsPolicy: ptr.To(false)}).SupplementalGroupsPolicy,
|
||||
false,
|
||||
)
|
||||
effectivePolicy := ptr.Deref(
|
||||
pod.Spec.SecurityContext.SupplementalGroupsPolicy,
|
||||
v1.SupplementalGroupsPolicyMerge,
|
||||
)
|
||||
|
||||
if effectivePolicy == v1.SupplementalGroupsPolicyStrict && !featureSupportedOnNode {
|
||||
return reject
|
||||
}
|
||||
|
||||
return admit
|
||||
}
|
||||
|
||||
func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulerframework.NodeInfo) *v1.Pod {
|
||||
filterExtendedResources := func(containers []v1.Container) {
|
||||
for i, c := range containers {
|
||||
|
70
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go
generated
vendored
70
e2e/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go
generated
vendored
@ -113,6 +113,7 @@ const (
|
||||
CPUManagerPinningErrorsTotalKey = "cpu_manager_pinning_errors_total"
|
||||
CPUManagerSharedPoolSizeMilliCoresKey = "cpu_manager_shared_pool_size_millicores"
|
||||
CPUManagerExclusiveCPUsAllocationCountKey = "cpu_manager_exclusive_cpu_allocation_count"
|
||||
CPUManagerAllocationPerNUMAKey = "cpu_manager_allocation_per_numa"
|
||||
|
||||
// Metrics to track the Memory manager behavior
|
||||
MemoryManagerPinningRequestsTotalKey = "memory_manager_pinning_requests_total"
|
||||
@ -132,6 +133,7 @@ const (
|
||||
|
||||
// Metric for tracking aligment of compute resources
|
||||
ContainerAlignedComputeResourcesNameKey = "container_aligned_compute_resources_count"
|
||||
ContainerAlignedComputeResourcesFailureNameKey = "container_aligned_compute_resources_failure_count"
|
||||
ContainerAlignedComputeResourcesScopeLabelKey = "scope"
|
||||
ContainerAlignedComputeResourcesBoundaryLabelKey = "boundary"
|
||||
|
||||
@ -149,9 +151,15 @@ const (
|
||||
|
||||
AlignedPhysicalCPU = "physical_cpu"
|
||||
AlignedNUMANode = "numa_node"
|
||||
AlignedUncoreCache = "uncore_cache"
|
||||
|
||||
// Metrics to track kubelet admission rejections.
|
||||
AdmissionRejectionsTotalKey = "admission_rejections_total"
|
||||
|
||||
// Image Volume metrics
|
||||
ImageVolumeRequestedTotalKey = "image_volume_requested_total"
|
||||
ImageVolumeMountedSucceedTotalKey = "image_volume_mounted_succeed_total"
|
||||
ImageVolumeMountedErrorsTotalKey = "image_volume_mounted_errors_total"
|
||||
)
|
||||
|
||||
type imageSizeBucket struct {
|
||||
@ -808,6 +816,17 @@ var (
|
||||
},
|
||||
)
|
||||
|
||||
// CPUManagerAllocationPerNUMA tracks the count of CPUs allocated per NUMA node
|
||||
CPUManagerAllocationPerNUMA = metrics.NewGaugeVec(
|
||||
&metrics.GaugeOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: CPUManagerAllocationPerNUMAKey,
|
||||
Help: "Number of CPUs allocated per NUMA node",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{AlignedNUMANode},
|
||||
)
|
||||
|
||||
// ContainerAlignedComputeResources reports the count of resources allocation which granted aligned resources, per alignment boundary
|
||||
ContainerAlignedComputeResources = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
@ -818,7 +837,18 @@ var (
|
||||
},
|
||||
[]string{ContainerAlignedComputeResourcesScopeLabelKey, ContainerAlignedComputeResourcesBoundaryLabelKey},
|
||||
)
|
||||
// MemoryManagerPinningRequestTotal tracks the number of times the pod spec required the memory manager to pin memory pages
|
||||
|
||||
// ContainerAlignedComputeResourcesFailure reports the count of resources allocation attempts which failed to align resources, per alignment boundary
|
||||
ContainerAlignedComputeResourcesFailure = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: ContainerAlignedComputeResourcesFailureNameKey,
|
||||
Help: "Cumulative number of failures to allocate aligned compute resources to containers by alignment type.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{ContainerAlignedComputeResourcesScopeLabelKey, ContainerAlignedComputeResourcesBoundaryLabelKey},
|
||||
)
|
||||
|
||||
MemoryManagerPinningRequestTotal = metrics.NewCounter(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
@ -1008,6 +1038,36 @@ var (
|
||||
},
|
||||
[]string{"reason"},
|
||||
)
|
||||
|
||||
// ImageVolumeRequestedTotal trakcs the number of requested image volumes.
|
||||
ImageVolumeRequestedTotal = metrics.NewCounter(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: ImageVolumeRequestedTotalKey,
|
||||
Help: "Number of requested image volumes.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
|
||||
// ImageVolumeMountedSucceedTotal tracks the number of successful image volume mounts.
|
||||
ImageVolumeMountedSucceedTotal = metrics.NewCounter(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: ImageVolumeMountedSucceedTotalKey,
|
||||
Help: "Number of successful image volume mounts.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
|
||||
// ImageVolumeMountedErrorsTotal tracks the number of failed image volume mounts.
|
||||
ImageVolumeMountedErrorsTotal = metrics.NewCounter(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: KubeletSubsystem,
|
||||
Name: ImageVolumeMountedErrorsTotalKey,
|
||||
Help: "Number of failed image volume mounts.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
var registerMetrics sync.Once
|
||||
@ -1078,7 +1138,9 @@ func Register(collectors ...metrics.StableCollector) {
|
||||
legacyregistry.MustRegister(CPUManagerPinningErrorsTotal)
|
||||
legacyregistry.MustRegister(CPUManagerSharedPoolSizeMilliCores)
|
||||
legacyregistry.MustRegister(CPUManagerExclusiveCPUsAllocationCount)
|
||||
legacyregistry.MustRegister(CPUManagerAllocationPerNUMA)
|
||||
legacyregistry.MustRegister(ContainerAlignedComputeResources)
|
||||
legacyregistry.MustRegister(ContainerAlignedComputeResourcesFailure)
|
||||
legacyregistry.MustRegister(MemoryManagerPinningRequestTotal)
|
||||
legacyregistry.MustRegister(MemoryManagerPinningErrorsTotal)
|
||||
legacyregistry.MustRegister(TopologyManagerAdmissionRequestsTotal)
|
||||
@ -1107,6 +1169,12 @@ func Register(collectors ...metrics.StableCollector) {
|
||||
}
|
||||
|
||||
legacyregistry.MustRegister(AdmissionRejectionsTotal)
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ImageVolume) {
|
||||
legacyregistry.MustRegister(ImageVolumeRequestedTotal)
|
||||
legacyregistry.MustRegister(ImageVolumeMountedSucceedTotal)
|
||||
legacyregistry.MustRegister(ImageVolumeMountedErrorsTotal)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -89,6 +89,7 @@ type PluginInfo struct {
|
||||
UUID types.UID
|
||||
Handler PluginHandler
|
||||
Name string
|
||||
Endpoint string
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) AddPlugin(pluginInfo PluginInfo) error {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user