rebase: update kubernetes to 1.26.1

update kubernetes and its dependencies
to v1.26.1

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2023-02-01 18:06:36 +01:00
committed by mergify[bot]
parent e9e33fb851
commit 9c8de9471e
937 changed files with 75539 additions and 33050 deletions

View File

@ -143,6 +143,21 @@ type StatefulSetPersistentVolumeClaimRetentionPolicy struct {
WhenScaled PersistentVolumeClaimRetentionPolicyType
}
// StatefulSetOrdinals describes the policy used for replica ordinal assignment
// in this StatefulSet.
type StatefulSetOrdinals struct {
// start is the number representing the first replica's index. It may be used
// to number replicas from an alternate index (eg: 1-indexed) over the default
// 0-indexed names, or to orchestrate progressive movement of replicas from
// one StatefulSet to another.
// If set, replica indices will be in the range:
// [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).
// If unset, defaults to 0. Replica indices will be in the range:
// [0, .spec.replicas).
// +optional
Start int32
}
// A StatefulSetSpec is the specification of a StatefulSet.
type StatefulSetSpec struct {
// Replicas is the desired number of replicas of the given Template.
@ -162,7 +177,9 @@ type StatefulSetSpec struct {
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected. Each pod stamped out by the StatefulSet
// will fulfill this Template, but have a unique identity from the rest
// of the StatefulSet.
// of the StatefulSet. Each pod will be named with the format
// <statefulsetname>-<podindex>. For example, a pod in a StatefulSet named
// "web" with index number "3" would be named "web-3".
Template api.PodTemplateSpec
// VolumeClaimTemplates is a list of claims that pods are allowed to reference.
@ -215,6 +232,14 @@ type StatefulSetSpec struct {
// StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.
// +optional
PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy
// ordinals controls the numbering of replica indices in a StatefulSet. The
// default ordinals behavior assigns a "0" index to the first replica and
// increments the index by one for each additional replica requested. Using
// the ordinals field requires the StatefulSetStartOrdinal feature gate to be
// enabled, which is alpha.
// +optional
Ordinals *StatefulSetOrdinals
}
// StatefulSetStatus represents the current state of a StatefulSet.
@ -342,9 +367,7 @@ type Deployment struct {
// DeploymentSpec specifies the state of a Deployment.
type DeploymentSpec struct {
// Number of desired pods. This is a pointer to distinguish between explicit
// zero and not specified. Defaults to 1.
// +optional
// Number of desired pods.
Replicas int32
// Label selector for pods. Existing ReplicaSets whose pods are

View File

@ -718,6 +718,22 @@ func (in *StatefulSetList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetOrdinals) DeepCopyInto(out *StatefulSetOrdinals) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetOrdinals.
func (in *StatefulSetOrdinals) DeepCopy() *StatefulSetOrdinals {
if in == nil {
return nil
}
out := new(StatefulSetOrdinals)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) {
*out = *in
@ -761,6 +777,11 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
*out = new(StatefulSetPersistentVolumeClaimRetentionPolicy)
**out = **in
}
if in.Ordinals != nil {
in, out := &in.Ordinals, &out.Ordinals
*out = new(StatefulSetOrdinals)
**out = **in
}
return
}

View File

@ -24,10 +24,13 @@ import (
// JobTrackingFinalizer is a finalizer for Job's pods. It prevents them from
// being deleted before being accounted in the Job status.
// The apiserver and job controller use this string as a Job annotation, to
// mark Jobs that are being tracked using pod finalizers. Two releases after
// the JobTrackingWithFinalizers graduates to GA, JobTrackingFinalizer will
// no longer be used as a Job annotation.
//
// Additionally, the apiserver and job controller use this string as a Job
// annotation, to mark Jobs that are being tracked using pod finalizers.
// However, this behavior is deprecated in kubernetes 1.26. This means that, in
// 1.27+, one release after JobTrackingWithFinalizers graduates to GA, the
// apiserver and job controller will ignore this annotation and they will
// always track jobs using finalizers.
const JobTrackingFinalizer = "batch.kubernetes.io/job-tracking"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@ -405,9 +408,6 @@ type JobStatus struct {
// (3) Remove the pod UID from the array while increasing the corresponding
// counter.
//
// This field is beta-level. The job controller only makes use of this field
// when the feature gate JobTrackingWithFinalizers is enabled (enabled
// by default).
// Old jobs might not be tracked using this field, in which case the field
// remains null.
// +optional
@ -440,8 +440,7 @@ const (
// JobFailed means the job has failed its execution.
JobFailed JobConditionType = "Failed"
// FailureTarget means the job is about to fail its execution.
// The constant is to be renamed once the name is accepted within the KEP-3329.
AlphaNoCompatGuaranteeJobFailureTarget JobConditionType = "FailureTarget"
JobFailureTarget JobConditionType = "FailureTarget"
)
// JobCondition describes current state of a job.

View File

@ -175,7 +175,7 @@ func IsExtendedResourceName(name core.ResourceName) bool {
}
// Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name
nameForQuota := fmt.Sprintf("%s%s", core.DefaultResourceRequestsPrefix, string(name))
if errs := validation.IsQualifiedName(string(nameForQuota)); len(errs) != 0 {
if errs := validation.IsQualifiedName(nameForQuota); len(errs) != 0 {
return false
}
return true

View File

@ -291,7 +291,7 @@ type PersistentVolume struct {
// +optional
metav1.ObjectMeta
//Spec defines a persistent volume owned by the cluster
// Spec defines a persistent volume owned by the cluster
// +optional
Spec PersistentVolumeSpec
@ -452,29 +452,54 @@ type PersistentVolumeClaimSpec struct {
// * An existing PVC (PersistentVolumeClaim)
// If the provisioner or an external controller can support the specified data source,
// it will create a new volume based on the contents of the specified data source.
// If the AnyVolumeDataSource feature gate is enabled, this field will always have
// the same contents as the DataSourceRef field.
// When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
// and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
// If the namespace is specified, then dataSourceRef will not be copied to dataSource.
// +optional
DataSource *TypedLocalObjectReference
// Specifies the object from which to populate the volume with data, if a non-empty
// volume is desired. This may be any local object from a non-empty API group (non
// volume is desired. This may be any object from a non-empty API group (non
// core object) or a PersistentVolumeClaim object.
// When this field is specified, volume binding will only succeed if the type of
// the specified object matches some installed volume populator or dynamic
// provisioner.
// This field will replace the functionality of the DataSource field and as such
// This field will replace the functionality of the dataSource field and as such
// if both fields are non-empty, they must have the same value. For backwards
// compatibility, both fields (DataSource and DataSourceRef) will be set to the same
// compatibility, when namespace isn't specified in dataSourceRef,
// both fields (dataSource and dataSourceRef) will be set to the same
// value automatically if one of them is empty and the other is non-empty.
// There are two important differences between DataSource and DataSourceRef:
// * While DataSource only allows two specific types of objects, DataSourceRef
// When namespace is specified in dataSourceRef,
// dataSource isn't set to the same value and must be empty.
// There are three important differences between dataSource and dataSourceRef:
// * While dataSource only allows two specific types of objects, dataSourceRef
// allows any non-core object, as well as PersistentVolumeClaim objects.
// * While DataSource ignores disallowed values (dropping them), DataSourceRef
// * While dataSource ignores disallowed values (dropping them), dataSourceRef
// preserves all values, and generates an error if a disallowed value is
// specified.
// * While dataSource only allows local objects, dataSourceRef allows objects
// in any namespaces.
// (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
// (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
// +optional
DataSourceRef *TypedLocalObjectReference
DataSourceRef *TypedObjectReference
}
type TypedObjectReference struct {
// APIGroup is the group for the resource being referenced.
// If APIGroup is not specified, the specified Kind must be in the core API group.
// For any other third-party types, APIGroup is required.
// +optional
APIGroup *string
// Kind is the type of resource being referenced
Kind string
// Name is the name of resource being referenced
Name string
// Namespace is the namespace of resource being referenced
// Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
// (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
// +featureGate=CrossNamespaceVolumeDataSource
// +optional
Namespace *string
}
// PersistentVolumeClaimConditionType defines the condition of PV claim.
@ -1977,10 +2002,10 @@ type EnvFromSource struct {
// +optional
Prefix string
// The ConfigMap to select from.
//+optional
// +optional
ConfigMapRef *ConfigMapEnvSource
// The Secret to select from.
//+optional
// +optional
SecretRef *SecretEnvSource
}
@ -2160,6 +2185,25 @@ type ResourceRequirements struct {
// otherwise to an implementation-defined value
// +optional
Requests ResourceList
// Claims lists the names of resources, defined in spec.resourceClaims,
// that are used by this container.
//
// This is an alpha field and requires enabling the
// DynamicResourceAllocation feature gate.
//
// This field is immutable.
//
// +featureGate=DynamicResourceAllocation
// +optional
Claims []ResourceClaim
}
// ResourceClaim references one entry in PodSpec.ResourceClaims.
type ResourceClaim struct {
// Name must match the name of one entry in pod.spec.resourceClaims of
// the Pod where this field is used. It makes that resource available
// inside a container.
Name string
}
// Container represents a single container that is expected to be run on the host.
@ -2428,12 +2472,14 @@ const (
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
PodReasonUnschedulable = "Unschedulable"
// PodReasonSchedulingGated reason in PodScheduled PodCondition means that the scheduler
// skips scheduling the pod because one or more scheduling gates are still present.
PodReasonSchedulingGated = "SchedulingGated"
// ContainersReady indicates whether all containers in the pod are ready.
ContainersReady PodConditionType = "ContainersReady"
// AlphaNoCompatGuaranteeDisruptionTarget indicates the pod is about to be deleted due to a
// DisruptionTarget indicates the pod is about to be terminated due to a
// disruption (such as preemption, eviction API or garbage-collection).
// The constant is to be renamed once the name is accepted within the KEP-3329.
AlphaNoCompatGuaranteeDisruptionTarget PodConditionType = "DisruptionTarget"
DisruptionTarget PodConditionType = "DisruptionTarget"
)
// PodCondition represents pod's condition
@ -2502,7 +2548,7 @@ const (
// over a set of nodes; that is, it represents the OR of the selectors represented
// by the node selector terms.
type NodeSelector struct {
//Required. A list of node selector terms. The terms are ORed.
// Required. A list of node selector terms. The terms are ORed.
NodeSelectorTerms []NodeSelectorTerm
}
@ -2997,6 +3043,68 @@ type PodSpec struct {
// - spec.containers[*].securityContext.runAsGroup
// +optional
OS *PodOS
// SchedulingGates is an opaque list of values that if specified will block scheduling the pod.
// More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness.
//
// This is an alpha-level feature enabled by PodSchedulingReadiness feature gate.
// +optional
SchedulingGates []PodSchedulingGate
// ResourceClaims defines which ResourceClaims must be allocated
// and reserved before the Pod is allowed to start. The resources
// will be made available to those containers which consume them
// by name.
//
// This is an alpha field and requires enabling the
// DynamicResourceAllocation feature gate.
//
// This field is immutable.
//
// +featureGate=DynamicResourceAllocation
// +optional
ResourceClaims []PodResourceClaim
}
// PodResourceClaim references exactly one ResourceClaim through a ClaimSource.
// It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
// Containers that need access to the ResourceClaim reference it with this name.
type PodResourceClaim struct {
// Name uniquely identifies this resource claim inside the pod.
// This must be a DNS_LABEL.
Name string
// Source describes where to find the ResourceClaim.
Source ClaimSource
}
// ClaimSource describes a reference to a ResourceClaim.
//
// Exactly one of these fields should be set. Consumers of this type must
// treat an empty object as if it has an unknown value.
type ClaimSource struct {
// ResourceClaimName is the name of a ResourceClaim object in the same
// namespace as this pod.
ResourceClaimName *string
// ResourceClaimTemplateName is the name of a ResourceClaimTemplate
// object in the same namespace as this pod.
//
// The template will be used to create a new ResourceClaim, which will
// be bound to this pod. When this pod is deleted, the ResourceClaim
// will also be deleted. The name of the ResourceClaim will be <pod
// name>-<resource name>, where <resource name> is the
// PodResourceClaim.Name. Pod validation will reject the pod if the
// concatenated name is not valid for a ResourceClaim (e.g. too long).
//
// An existing ResourceClaim with that name that is not owned by the
// pod will not be used for the pod to avoid using an unrelated
// resource by mistake. Scheduling and pod startup are then blocked
// until the unrelated ResourceClaim is removed.
//
// This field is immutable and no changes will be made to the
// corresponding ResourceClaim by the control plane after creating the
// ResourceClaim.
ResourceClaimTemplateName *string
}
// OSName is the set of OS'es that can be used in OS.
@ -3017,6 +3125,13 @@ type PodOS struct {
Name OSName
}
// PodSchedulingGate is associated to a Pod to guard its scheduling.
type PodSchedulingGate struct {
// Name of the scheduling gate.
// Each scheduling gate must have a unique name field.
Name string
}
// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
// pod's hosts file.
type HostAlias struct {
@ -3131,8 +3246,11 @@ type PodSecurityContext struct {
// +optional
RunAsNonRoot *bool
// A list of groups applied to the first process run in each container, in addition
// to the container's primary GID. If unspecified, no groups will be added to
// any container.
// to the container's primary GID, the fsGroup (if specified), and group memberships
// defined in the container image for the uid of the container process. If unspecified,
// no additional groups are added to any container. Note that group memberships
// defined in the container image for the uid of the container process are still effective,
// even if they are not included in this list.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
SupplementalGroups []int64
@ -3494,7 +3612,7 @@ type ReplicationControllerSpec struct {
// insufficient replicas are detected. This reference is ignored if a Template is set.
// Must be set before converting to a versioned API object
// +optional
//TemplateRef *ObjectReference
// TemplateRef *ObjectReference
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected. Internally, this takes precedence over a
@ -3936,7 +4054,6 @@ type ServiceSpec struct {
// implementation (e.g. cloud providers) should ignore Services that set this field.
// This field can only be set when creating or updating a Service to type 'LoadBalancer'.
// Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
// +featureGate=LoadBalancerClass
// +optional
LoadBalancerClass *string
@ -3946,7 +4063,6 @@ type ServiceSpec struct {
// dropping the traffic if there are no local endpoints. The default value,
// "Cluster", uses the standard behavior of routing to all endpoints evenly
// (possibly modified by topology and other features).
// +featureGate=ServiceInternalTrafficPolicy
// +optional
InternalTrafficPolicy *ServiceInternalTrafficPolicyType
}
@ -4161,7 +4277,7 @@ type NodeSpec struct {
// +optional
Taints []Taint
// Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed from Kubelets as of 1.24 and will be fully removed in 1.26.
// Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed.
// +optional
ConfigSource *NodeConfigSource
@ -4831,7 +4947,7 @@ type ObjectReference struct {
// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.
type LocalObjectReference struct {
//TODO: Add other useful fields. apiVersion, kind, uid?
// TODO: Add other useful fields. apiVersion, kind, uid?
Name string
}
@ -5737,7 +5853,7 @@ type TopologySpreadConstraint struct {
// - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
//
// If this value is nil, the behavior is equivalent to the Honor policy.
// This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
// +optional
NodeAffinityPolicy *NodeInclusionPolicy
// NodeTaintsPolicy indicates how we will treat node taints when calculating
@ -5747,7 +5863,7 @@ type TopologySpreadConstraint struct {
// - Ignore: node taints are ignored. All nodes are included.
//
// If this value is nil, the behavior is equivalent to the Ignore policy.
// This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
// +optional
NodeTaintsPolicy *NodeInclusionPolicy
// MatchLabelKeys is a set of pod label keys to select the pods over which

View File

@ -22,8 +22,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/parsers"
"k8s.io/utils/pointer"
)
@ -130,14 +128,11 @@ func SetDefaults_Service(obj *v1.Service) {
obj.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
}
if utilfeature.DefaultFeatureGate.Enabled(features.ServiceInternalTrafficPolicy) {
if obj.Spec.InternalTrafficPolicy == nil {
if obj.Spec.Type == v1.ServiceTypeNodePort || obj.Spec.Type == v1.ServiceTypeLoadBalancer || obj.Spec.Type == v1.ServiceTypeClusterIP {
serviceInternalTrafficPolicyCluster := v1.ServiceInternalTrafficPolicyCluster
obj.Spec.InternalTrafficPolicy = &serviceInternalTrafficPolicyCluster
}
if obj.Spec.InternalTrafficPolicy == nil {
if obj.Spec.Type == v1.ServiceTypeNodePort || obj.Spec.Type == v1.ServiceTypeLoadBalancer || obj.Spec.Type == v1.ServiceTypeClusterIP {
serviceInternalTrafficPolicyCluster := v1.ServiceInternalTrafficPolicyCluster
obj.Spec.InternalTrafficPolicy = &serviceInternalTrafficPolicyCluster
}
}
if obj.Spec.Type == v1.ServiceTypeLoadBalancer {

View File

@ -39,7 +39,7 @@ func IsExtendedResourceName(name v1.ResourceName) bool {
}
// Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name
nameForQuota := fmt.Sprintf("%s%s", v1.DefaultResourceRequestsPrefix, string(name))
if errs := validation.IsQualifiedName(string(nameForQuota)); len(errs) != 0 {
if errs := validation.IsQualifiedName(nameForQuota); len(errs) != 0 {
return false
}
return true

View File

@ -192,6 +192,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ClaimSource)(nil), (*core.ClaimSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ClaimSource_To_core_ClaimSource(a.(*v1.ClaimSource), b.(*core.ClaimSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ClaimSource)(nil), (*v1.ClaimSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ClaimSource_To_v1_ClaimSource(a.(*core.ClaimSource), b.(*v1.ClaimSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ClientIPConfig)(nil), (*core.ClientIPConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ClientIPConfig_To_core_ClientIPConfig(a.(*v1.ClientIPConfig), b.(*core.ClientIPConfig), scope)
}); err != nil {
@ -1352,6 +1362,26 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.PodResourceClaim)(nil), (*core.PodResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodResourceClaim_To_core_PodResourceClaim(a.(*v1.PodResourceClaim), b.(*core.PodResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodResourceClaim)(nil), (*v1.PodResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodResourceClaim_To_v1_PodResourceClaim(a.(*core.PodResourceClaim), b.(*v1.PodResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.PodSchedulingGate)(nil), (*core.PodSchedulingGate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodSchedulingGate_To_core_PodSchedulingGate(a.(*v1.PodSchedulingGate), b.(*core.PodSchedulingGate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.PodSchedulingGate)(nil), (*v1.PodSchedulingGate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_PodSchedulingGate_To_v1_PodSchedulingGate(a.(*core.PodSchedulingGate), b.(*v1.PodSchedulingGate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.PodSecurityContext)(nil), (*core.PodSecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_PodSecurityContext_To_core_PodSecurityContext(a.(*v1.PodSecurityContext), b.(*core.PodSecurityContext), scope)
}); err != nil {
@ -1562,6 +1592,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ResourceClaim)(nil), (*core.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceClaim_To_core_ResourceClaim(a.(*v1.ResourceClaim), b.(*core.ResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ResourceClaim)(nil), (*v1.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ResourceClaim_To_v1_ResourceClaim(a.(*core.ResourceClaim), b.(*v1.ResourceClaim), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ResourceFieldSelector)(nil), (*core.ResourceFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(a.(*v1.ResourceFieldSelector), b.(*core.ResourceFieldSelector), scope)
}); err != nil {
@ -1967,6 +2007,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.TypedObjectReference)(nil), (*core.TypedObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_TypedObjectReference_To_core_TypedObjectReference(a.(*v1.TypedObjectReference), b.(*core.TypedObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.TypedObjectReference)(nil), (*v1.TypedObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_TypedObjectReference_To_v1_TypedObjectReference(a.(*core.TypedObjectReference), b.(*v1.TypedObjectReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.VolumeDevice)(nil), (*core.VolumeDevice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeDevice_To_core_VolumeDevice(a.(*v1.VolumeDevice), b.(*core.VolumeDevice), scope)
}); err != nil {
@ -2613,6 +2663,28 @@ func Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVol
return autoConvert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s)
}
func autoConvert_v1_ClaimSource_To_core_ClaimSource(in *v1.ClaimSource, out *core.ClaimSource, s conversion.Scope) error {
out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName))
out.ResourceClaimTemplateName = (*string)(unsafe.Pointer(in.ResourceClaimTemplateName))
return nil
}
// Convert_v1_ClaimSource_To_core_ClaimSource is an autogenerated conversion function.
func Convert_v1_ClaimSource_To_core_ClaimSource(in *v1.ClaimSource, out *core.ClaimSource, s conversion.Scope) error {
return autoConvert_v1_ClaimSource_To_core_ClaimSource(in, out, s)
}
func autoConvert_core_ClaimSource_To_v1_ClaimSource(in *core.ClaimSource, out *v1.ClaimSource, s conversion.Scope) error {
out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName))
out.ResourceClaimTemplateName = (*string)(unsafe.Pointer(in.ResourceClaimTemplateName))
return nil
}
// Convert_core_ClaimSource_To_v1_ClaimSource is an autogenerated conversion function.
func Convert_core_ClaimSource_To_v1_ClaimSource(in *core.ClaimSource, out *v1.ClaimSource, s conversion.Scope) error {
return autoConvert_core_ClaimSource_To_v1_ClaimSource(in, out, s)
}
func autoConvert_v1_ClientIPConfig_To_core_ClientIPConfig(in *v1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error {
out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds))
return nil
@ -5172,7 +5244,7 @@ func autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(
out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName))
out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode))
out.DataSource = (*core.TypedLocalObjectReference)(unsafe.Pointer(in.DataSource))
out.DataSourceRef = (*core.TypedLocalObjectReference)(unsafe.Pointer(in.DataSourceRef))
out.DataSourceRef = (*core.TypedObjectReference)(unsafe.Pointer(in.DataSourceRef))
return nil
}
@ -5191,7 +5263,7 @@ func autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(
out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName))
out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode))
out.DataSource = (*v1.TypedLocalObjectReference)(unsafe.Pointer(in.DataSource))
out.DataSourceRef = (*v1.TypedLocalObjectReference)(unsafe.Pointer(in.DataSourceRef))
out.DataSourceRef = (*v1.TypedObjectReference)(unsafe.Pointer(in.DataSourceRef))
return nil
}
@ -6069,6 +6141,52 @@ func Convert_core_PodReadinessGate_To_v1_PodReadinessGate(in *core.PodReadinessG
return autoConvert_core_PodReadinessGate_To_v1_PodReadinessGate(in, out, s)
}
func autoConvert_v1_PodResourceClaim_To_core_PodResourceClaim(in *v1.PodResourceClaim, out *core.PodResourceClaim, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_v1_ClaimSource_To_core_ClaimSource(&in.Source, &out.Source, s); err != nil {
return err
}
return nil
}
// Convert_v1_PodResourceClaim_To_core_PodResourceClaim is an autogenerated conversion function.
func Convert_v1_PodResourceClaim_To_core_PodResourceClaim(in *v1.PodResourceClaim, out *core.PodResourceClaim, s conversion.Scope) error {
return autoConvert_v1_PodResourceClaim_To_core_PodResourceClaim(in, out, s)
}
func autoConvert_core_PodResourceClaim_To_v1_PodResourceClaim(in *core.PodResourceClaim, out *v1.PodResourceClaim, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_core_ClaimSource_To_v1_ClaimSource(&in.Source, &out.Source, s); err != nil {
return err
}
return nil
}
// Convert_core_PodResourceClaim_To_v1_PodResourceClaim is an autogenerated conversion function.
func Convert_core_PodResourceClaim_To_v1_PodResourceClaim(in *core.PodResourceClaim, out *v1.PodResourceClaim, s conversion.Scope) error {
return autoConvert_core_PodResourceClaim_To_v1_PodResourceClaim(in, out, s)
}
func autoConvert_v1_PodSchedulingGate_To_core_PodSchedulingGate(in *v1.PodSchedulingGate, out *core.PodSchedulingGate, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1_PodSchedulingGate_To_core_PodSchedulingGate is an autogenerated conversion function.
func Convert_v1_PodSchedulingGate_To_core_PodSchedulingGate(in *v1.PodSchedulingGate, out *core.PodSchedulingGate, s conversion.Scope) error {
return autoConvert_v1_PodSchedulingGate_To_core_PodSchedulingGate(in, out, s)
}
func autoConvert_core_PodSchedulingGate_To_v1_PodSchedulingGate(in *core.PodSchedulingGate, out *v1.PodSchedulingGate, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_core_PodSchedulingGate_To_v1_PodSchedulingGate is an autogenerated conversion function.
func Convert_core_PodSchedulingGate_To_v1_PodSchedulingGate(in *core.PodSchedulingGate, out *v1.PodSchedulingGate, s conversion.Scope) error {
return autoConvert_core_PodSchedulingGate_To_v1_PodSchedulingGate(in, out, s)
}
func autoConvert_v1_PodSecurityContext_To_core_PodSecurityContext(in *v1.PodSecurityContext, out *core.PodSecurityContext, s conversion.Scope) error {
out.SELinuxOptions = (*core.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions))
out.WindowsOptions = (*core.WindowsSecurityContextOptions)(unsafe.Pointer(in.WindowsOptions))
@ -6188,6 +6306,8 @@ func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s
out.SetHostnameAsFQDN = (*bool)(unsafe.Pointer(in.SetHostnameAsFQDN))
out.OS = (*core.PodOS)(unsafe.Pointer(in.OS))
// INFO: in.HostUsers opted out of conversion generation
out.SchedulingGates = *(*[]core.PodSchedulingGate)(unsafe.Pointer(&in.SchedulingGates))
out.ResourceClaims = *(*[]core.PodResourceClaim)(unsafe.Pointer(&in.ResourceClaims))
return nil
}
@ -6241,6 +6361,8 @@ func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s
out.EnableServiceLinks = (*bool)(unsafe.Pointer(in.EnableServiceLinks))
out.TopologySpreadConstraints = *(*[]v1.TopologySpreadConstraint)(unsafe.Pointer(&in.TopologySpreadConstraints))
out.OS = (*v1.PodOS)(unsafe.Pointer(in.OS))
out.SchedulingGates = *(*[]v1.PodSchedulingGate)(unsafe.Pointer(&in.SchedulingGates))
out.ResourceClaims = *(*[]v1.PodResourceClaim)(unsafe.Pointer(&in.ResourceClaims))
return nil
}
@ -6905,6 +7027,26 @@ func Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(
return autoConvert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s)
}
func autoConvert_v1_ResourceClaim_To_core_ResourceClaim(in *v1.ResourceClaim, out *core.ResourceClaim, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_v1_ResourceClaim_To_core_ResourceClaim is an autogenerated conversion function.
func Convert_v1_ResourceClaim_To_core_ResourceClaim(in *v1.ResourceClaim, out *core.ResourceClaim, s conversion.Scope) error {
return autoConvert_v1_ResourceClaim_To_core_ResourceClaim(in, out, s)
}
func autoConvert_core_ResourceClaim_To_v1_ResourceClaim(in *core.ResourceClaim, out *v1.ResourceClaim, s conversion.Scope) error {
out.Name = in.Name
return nil
}
// Convert_core_ResourceClaim_To_v1_ResourceClaim is an autogenerated conversion function.
func Convert_core_ResourceClaim_To_v1_ResourceClaim(in *core.ResourceClaim, out *v1.ResourceClaim, s conversion.Scope) error {
return autoConvert_core_ResourceClaim_To_v1_ResourceClaim(in, out, s)
}
func autoConvert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *core.ResourceFieldSelector, s conversion.Scope) error {
out.ContainerName = in.ContainerName
out.Resource = in.Resource
@ -7032,6 +7174,7 @@ func Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *core.Resourc
func autoConvert_v1_ResourceRequirements_To_core_ResourceRequirements(in *v1.ResourceRequirements, out *core.ResourceRequirements, s conversion.Scope) error {
out.Limits = *(*core.ResourceList)(unsafe.Pointer(&in.Limits))
out.Requests = *(*core.ResourceList)(unsafe.Pointer(&in.Requests))
out.Claims = *(*[]core.ResourceClaim)(unsafe.Pointer(&in.Claims))
return nil
}
@ -7043,6 +7186,7 @@ func Convert_v1_ResourceRequirements_To_core_ResourceRequirements(in *v1.Resourc
func autoConvert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error {
out.Limits = *(*v1.ResourceList)(unsafe.Pointer(&in.Limits))
out.Requests = *(*v1.ResourceList)(unsafe.Pointer(&in.Requests))
out.Claims = *(*[]v1.ResourceClaim)(unsafe.Pointer(&in.Claims))
return nil
}
@ -8051,6 +8195,32 @@ func Convert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(in *
return autoConvert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(in, out, s)
}
func autoConvert_v1_TypedObjectReference_To_core_TypedObjectReference(in *v1.TypedObjectReference, out *core.TypedObjectReference, s conversion.Scope) error {
out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup))
out.Kind = in.Kind
out.Name = in.Name
out.Namespace = (*string)(unsafe.Pointer(in.Namespace))
return nil
}
// Convert_v1_TypedObjectReference_To_core_TypedObjectReference is an autogenerated conversion function.
func Convert_v1_TypedObjectReference_To_core_TypedObjectReference(in *v1.TypedObjectReference, out *core.TypedObjectReference, s conversion.Scope) error {
return autoConvert_v1_TypedObjectReference_To_core_TypedObjectReference(in, out, s)
}
func autoConvert_core_TypedObjectReference_To_v1_TypedObjectReference(in *core.TypedObjectReference, out *v1.TypedObjectReference, s conversion.Scope) error {
out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup))
out.Kind = in.Kind
out.Name = in.Name
out.Namespace = (*string)(unsafe.Pointer(in.Namespace))
return nil
}
// Convert_core_TypedObjectReference_To_v1_TypedObjectReference is an autogenerated conversion function.
func Convert_core_TypedObjectReference_To_v1_TypedObjectReference(in *core.TypedObjectReference, out *v1.TypedObjectReference, s conversion.Scope) error {
return autoConvert_core_TypedObjectReference_To_v1_TypedObjectReference(in, out, s)
}
func autoConvert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_v1_VolumeSource_To_core_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil {

View File

@ -20,4 +20,3 @@ reviewers:
- soltysh
- jsafrane
- dims
- fejta

View File

@ -1,61 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"k8s.io/apimachinery/pkg/util/validation/field"
utilfeature "k8s.io/apiserver/pkg/util/feature"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features"
)
// ValidateConditionalService validates conditionally valid fields.
func ValidateConditionalService(service, oldService *api.Service) field.ErrorList {
var errs field.ErrorList
errs = append(errs, validateMixedProtocolLBService(service, oldService)...)
return errs
}
// validateMixedProtocolLBService checks if the old Service has type=LoadBalancer and whether the Service has different Protocols
// on its ports. If the MixedProtocolLBService feature flag is disabled the usage of different Protocols in the new Service is
// valid only if the old Service has different Protocols, too.
func validateMixedProtocolLBService(service, oldService *api.Service) (errs field.ErrorList) {
if service.Spec.Type != api.ServiceTypeLoadBalancer {
return
}
if utilfeature.DefaultFeatureGate.Enabled(features.MixedProtocolLBService) {
return
}
if serviceHasMixedProtocols(service) && !serviceHasMixedProtocols(oldService) {
errs = append(errs, field.Invalid(field.NewPath("spec", "ports"), service.Spec.Ports, "may not contain more than 1 protocol when type is 'LoadBalancer'"))
}
return
}
func serviceHasMixedProtocols(service *api.Service) bool {
if service == nil {
return false
}
protos := map[string]bool{}
for _, port := range service.Spec.Ports {
protos[string(port.Protocol)] = true
}
return len(protos) > 1
}

View File

@ -309,7 +309,7 @@ func ValidateRuntimeClassName(name string, fldPath *field.Path) field.ErrorList
// validateOverhead can be used to check whether the given Overhead is valid.
func validateOverhead(overhead core.ResourceList, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
// reuse the ResourceRequirements validation logic
return ValidateResourceRequirements(&core.ResourceRequirements{Limits: overhead}, fldPath, opts)
return ValidateResourceRequirements(&core.ResourceRequirements{Limits: overhead}, nil, fldPath, opts)
}
// Validates that given value is not negative.
@ -1013,7 +1013,7 @@ func validateGlusterfsPersistentVolumeSource(glusterfs *core.GlusterfsPersistent
func validateFlockerVolumeSource(flocker *core.FlockerVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(flocker.DatasetName) == 0 && len(flocker.DatasetUUID) == 0 {
//TODO: consider adding a RequiredOneOf() error for this and similar cases
// TODO: consider adding a RequiredOneOf() error for this and similar cases
allErrs = append(allErrs, field.Required(fldPath, "one of datasetName and datasetUUID is required"))
}
if len(flocker.DatasetName) != 0 && len(flocker.DatasetUUID) != 0 {
@ -1621,12 +1621,12 @@ func validateEphemeralVolumeSource(ephemeral *core.EphemeralVolumeSource, fldPat
// ValidatePersistentVolumeClaimTemplate verifies that the embedded object meta and spec are valid.
// Checking of the object data is very minimal because only labels and annotations are used.
func ValidatePersistentVolumeClaimTemplate(claimTemplate *core.PersistentVolumeClaimTemplate, fldPath *field.Path, opts PersistentVolumeClaimSpecValidationOptions) field.ErrorList {
allErrs := validatePersistentVolumeClaimTemplateObjectMeta(&claimTemplate.ObjectMeta, fldPath.Child("metadata"))
allErrs := ValidateTemplateObjectMeta(&claimTemplate.ObjectMeta, fldPath.Child("metadata"))
allErrs = append(allErrs, ValidatePersistentVolumeClaimSpec(&claimTemplate.Spec, fldPath.Child("spec"), opts)...)
return allErrs
}
func validatePersistentVolumeClaimTemplateObjectMeta(objMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {
func ValidateTemplateObjectMeta(objMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {
allErrs := apimachineryvalidation.ValidateAnnotations(objMeta.Annotations, fldPath.Child("annotations"))
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(objMeta.Labels, fldPath.Child("labels"))...)
// All other fields are not supported and thus must not be set
@ -1634,11 +1634,11 @@ func validatePersistentVolumeClaimTemplateObjectMeta(objMeta *metav1.ObjectMeta,
// but then adding a new one to ObjectMeta wouldn't be checked
// unless this code gets updated. Instead, we ensure that
// only allowed fields are set via reflection.
allErrs = append(allErrs, validateFieldAllowList(*objMeta, allowedPVCTemplateObjectMetaFields, "cannot be set for an ephemeral volume", fldPath)...)
allErrs = append(allErrs, validateFieldAllowList(*objMeta, allowedTemplateObjectMetaFields, "cannot be set", fldPath)...)
return allErrs
}
var allowedPVCTemplateObjectMetaFields = map[string]bool{
var allowedTemplateObjectMetaFields = map[string]bool{
"Annotations": true,
"Labels": true,
}
@ -2029,6 +2029,8 @@ type PersistentVolumeClaimSpecValidationOptions struct {
EnableRecoverFromExpansionFailure bool
// Allow assigning StorageClass to unbound PVCs retroactively
EnableRetroactiveDefaultStorageClass bool
// Allow to validate the label value of the label selector
AllowInvalidLabelValueInSelector bool
}
func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolumeClaim) PersistentVolumeClaimSpecValidationOptions {
@ -2036,11 +2038,19 @@ func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolum
AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
EnableRecoverFromExpansionFailure: utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure),
EnableRetroactiveDefaultStorageClass: utilfeature.DefaultFeatureGate.Enabled(features.RetroactiveDefaultStorageClass),
AllowInvalidLabelValueInSelector: false,
}
if oldPvc == nil {
// If there's no old PVC, use the options based solely on feature enablement
return opts
}
labelSelectorValidationOpts := unversionedvalidation.LabelSelectorValidationOptions{
AllowInvalidLabelValueInSelector: opts.AllowInvalidLabelValueInSelector,
}
if len(unversionedvalidation.ValidateLabelSelector(oldPvc.Spec.Selector, labelSelectorValidationOpts, nil)) > 0 {
// If the old object had an invalid label selector, continue to allow it in the new object
opts.AllowInvalidLabelValueInSelector = true
}
if helper.ContainsAccessMode(oldPvc.Spec.AccessModes, core.ReadWriteOncePod) {
// If the old object allowed "ReadWriteOncePod", continue to allow it in the new object
@ -2051,12 +2061,20 @@ func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolum
func ValidationOptionsForPersistentVolumeClaimTemplate(claimTemplate, oldClaimTemplate *core.PersistentVolumeClaimTemplate) PersistentVolumeClaimSpecValidationOptions {
opts := PersistentVolumeClaimSpecValidationOptions{
AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
AllowInvalidLabelValueInSelector: false,
}
if oldClaimTemplate == nil {
// If there's no old PVC template, use the options based solely on feature enablement
return opts
}
labelSelectorValidationOpts := unversionedvalidation.LabelSelectorValidationOptions{
AllowInvalidLabelValueInSelector: opts.AllowInvalidLabelValueInSelector,
}
if len(unversionedvalidation.ValidateLabelSelector(oldClaimTemplate.Spec.Selector, labelSelectorValidationOpts, nil)) > 0 {
// If the old object had an invalid label selector, continue to allow it in the new object
opts.AllowInvalidLabelValueInSelector = true
}
if helper.ContainsAccessMode(oldClaimTemplate.Spec.AccessModes, core.ReadWriteOncePod) {
// If the old object allowed "ReadWriteOncePod", continue to allow it in the new object
opts.AllowReadWriteOncePod = true
@ -2086,7 +2104,34 @@ func validateDataSource(dataSource *core.TypedLocalObjectReference, fldPath *fie
apiGroup = *dataSource.APIGroup
}
if len(apiGroup) == 0 && dataSource.Kind != "PersistentVolumeClaim" {
allErrs = append(allErrs, field.Invalid(fldPath, dataSource.Kind, ""))
allErrs = append(allErrs, field.Invalid(fldPath, dataSource.Kind, "must be 'PersistentVolumeClaim' when referencing the default apiGroup"))
}
return allErrs
}
// validateDataSourceRef validates a DataSourceRef in a PersistentVolumeClaimSpec
func validateDataSourceRef(dataSourceRef *core.TypedObjectReference, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(dataSourceRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
}
if len(dataSourceRef.Kind) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("kind"), ""))
}
apiGroup := ""
if dataSourceRef.APIGroup != nil {
apiGroup = *dataSourceRef.APIGroup
}
if len(apiGroup) == 0 && dataSourceRef.Kind != "PersistentVolumeClaim" {
allErrs = append(allErrs, field.Invalid(fldPath, dataSourceRef.Kind, "must be 'PersistentVolumeClaim' when referencing the default apiGroup"))
}
if dataSourceRef.Namespace != nil && len(*dataSourceRef.Namespace) > 0 {
for _, msg := range ValidateNameFunc(ValidateNamespaceName)(*dataSourceRef.Namespace, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), *dataSourceRef.Namespace, msg))
}
}
return allErrs
@ -2099,7 +2144,10 @@ func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fld
allErrs = append(allErrs, field.Required(fldPath.Child("accessModes"), "at least 1 access mode is required"))
}
if spec.Selector != nil {
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...)
labelSelectorValidationOpts := unversionedvalidation.LabelSelectorValidationOptions{
AllowInvalidLabelValueInSelector: opts.AllowInvalidLabelValueInSelector,
}
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, labelSelectorValidationOpts, fldPath.Child("selector"))...)
}
expandedSupportedAccessModes := sets.StringKeySet(supportedAccessModes)
@ -2145,10 +2193,15 @@ func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fld
allErrs = append(allErrs, validateDataSource(spec.DataSource, fldPath.Child("dataSource"))...)
}
if spec.DataSourceRef != nil {
allErrs = append(allErrs, validateDataSource(spec.DataSourceRef, fldPath.Child("dataSourceRef"))...)
allErrs = append(allErrs, validateDataSourceRef(spec.DataSourceRef, fldPath.Child("dataSourceRef"))...)
}
if spec.DataSource != nil && spec.DataSourceRef != nil {
if !apiequality.Semantic.DeepEqual(spec.DataSource, spec.DataSourceRef) {
if spec.DataSourceRef != nil && spec.DataSourceRef.Namespace != nil && len(*spec.DataSourceRef.Namespace) > 0 {
if spec.DataSource != nil {
allErrs = append(allErrs, field.Invalid(fldPath, fldPath.Child("dataSource"),
"may not be specified when dataSourceRef.namespace is specified"))
}
} else if spec.DataSource != nil && spec.DataSourceRef != nil {
if !isDataSourceEqualDataSourceRef(spec.DataSource, spec.DataSourceRef) {
allErrs = append(allErrs, field.Invalid(fldPath, fldPath.Child("dataSource"),
"must match dataSourceRef"))
}
@ -2157,6 +2210,10 @@ func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fld
return allErrs
}
func isDataSourceEqualDataSourceRef(dataSource *core.TypedLocalObjectReference, dataSourceRef *core.TypedObjectReference) bool {
return reflect.DeepEqual(dataSource.APIGroup, dataSourceRef.APIGroup) && dataSource.Kind == dataSourceRef.Kind && dataSource.Name == dataSourceRef.Name
}
// ValidatePersistentVolumeClaimUpdate validates an update to a PersistentVolumeClaim
func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeClaim, opts PersistentVolumeClaimSpecValidationOptions) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata"))
@ -2711,6 +2768,54 @@ func ValidateVolumeDevices(devices []core.VolumeDevice, volmounts map[string]str
return allErrs
}
func validatePodResourceClaims(claims []core.PodResourceClaim, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
podClaimNames := sets.NewString()
for i, claim := range claims {
allErrs = append(allErrs, validatePodResourceClaim(claim, &podClaimNames, fldPath.Index(i))...)
}
return allErrs
}
// gatherPodResourceClaimNames returns a set of all non-empty
// PodResourceClaim.Name values. Validation that those names are valid is
// handled by validatePodResourceClaims.
func gatherPodResourceClaimNames(claims []core.PodResourceClaim) sets.String {
podClaimNames := sets.String{}
for _, claim := range claims {
if claim.Name != "" {
podClaimNames.Insert(claim.Name)
}
}
return podClaimNames
}
func validatePodResourceClaim(claim core.PodResourceClaim, podClaimNames *sets.String, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if claim.Name == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
} else if podClaimNames.Has(claim.Name) {
allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), claim.Name))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(claim.Name, fldPath.Child("name"))...)
podClaimNames.Insert(claim.Name)
}
allErrs = append(allErrs, validatePodResourceClaimSource(claim.Source, fldPath.Child("source"))...)
return allErrs
}
func validatePodResourceClaimSource(claimSource core.ClaimSource, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if claimSource.ResourceClaimName != nil && claimSource.ResourceClaimTemplateName != nil {
allErrs = append(allErrs, field.Invalid(fldPath, claimSource, "at most one of `resourceClaimName` or `resourceClaimTemplateName` may be specified"))
}
if claimSource.ResourceClaimName == nil && claimSource.ResourceClaimTemplateName == nil {
allErrs = append(allErrs, field.Invalid(fldPath, claimSource, "must specify one of: `resourceClaimName`, `resourceClaimTemplateName`"))
}
return allErrs
}
func validateProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
@ -2933,8 +3038,8 @@ func validatePullPolicy(policy core.PullPolicy, fldPath *field.Path) field.Error
// validateEphemeralContainers is called by pod spec and template validation to validate the list of ephemeral containers.
// Note that this is called for pod template even though ephemeral containers aren't allowed in pod templates.
func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer, containers, initContainers []core.Container, volumes map[string]core.VolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer, containers, initContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
var allErrs field.ErrorList
if len(ephemeralContainers) == 0 {
return allErrs
@ -2954,7 +3059,7 @@ func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer,
idxPath := fldPath.Index(i)
c := (*core.Container)(&ec.EphemeralContainerCommon)
allErrs = append(allErrs, validateContainerCommon(c, volumes, idxPath, opts)...)
allErrs = append(allErrs, validateContainerCommon(c, volumes, podClaimNames, idxPath, opts)...)
// Ephemeral containers don't need looser constraints for pod templates, so it's convenient to apply both validations
// here where we've already converted EphemeralContainerCommon to Container.
allErrs = append(allErrs, validateContainerOnlyForPod(c, idxPath)...)
@ -2992,7 +3097,7 @@ func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer,
return allErrs
}
// validateFieldAcceptList checks that only allowed fields are set.
// ValidateFieldAcceptList checks that only allowed fields are set.
// The value must be a struct (not a pointer to a struct!).
func validateFieldAllowList(value interface{}, allowedFields map[string]bool, errorText string, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
@ -3016,7 +3121,7 @@ func validateFieldAllowList(value interface{}, allowedFields map[string]bool, er
}
// validateInitContainers is called by pod spec and template validation to validate the list of init containers
func validateInitContainers(containers []core.Container, regularContainers []core.Container, volumes map[string]core.VolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
func validateInitContainers(containers []core.Container, regularContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
var allErrs field.ErrorList
allNames := sets.String{}
@ -3027,7 +3132,7 @@ func validateInitContainers(containers []core.Container, regularContainers []cor
idxPath := fldPath.Index(i)
// Apply the validation common to all container types
allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, idxPath, opts)...)
allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, podClaimNames, idxPath, opts)...)
// Names must be unique within regular and init containers. Collisions with ephemeral containers
// will be detected by validateEphemeralContainers().
@ -3060,8 +3165,8 @@ func validateInitContainers(containers []core.Container, regularContainers []cor
// validateContainerCommon applies validation common to all container types. It's called by regular, init, and ephemeral
// container list validation to require a properly formatted name, image, etc.
func validateContainerCommon(ctr *core.Container, volumes map[string]core.VolumeSource, path *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
func validateContainerCommon(ctr *core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, path *field.Path, opts PodValidationOptions) field.ErrorList {
var allErrs field.ErrorList
namePath := path.Child("name")
if len(ctr.Name) == 0 {
@ -3097,7 +3202,7 @@ func validateContainerCommon(ctr *core.Container, volumes map[string]core.Volume
allErrs = append(allErrs, ValidateVolumeMounts(ctr.VolumeMounts, volDevices, volumes, ctr, path.Child("volumeMounts"))...)
allErrs = append(allErrs, ValidateVolumeDevices(ctr.VolumeDevices, volMounts, volumes, path.Child("volumeDevices"))...)
allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, path.Child("imagePullPolicy"))...)
allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, path.Child("resources"), opts)...)
allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, podClaimNames, path.Child("resources"), opts)...)
allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, path.Child("securityContext"))...)
return allErrs
}
@ -3150,7 +3255,7 @@ func validateHostUsers(spec *core.PodSpec, fldPath *field.Path) field.ErrorList
}
// validateContainers is called by pod spec and template validation to validate the list of regular containers.
func validateContainers(containers []core.Container, volumes map[string]core.VolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
func validateContainers(containers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if len(containers) == 0 {
@ -3162,7 +3267,7 @@ func validateContainers(containers []core.Container, volumes map[string]core.Vol
path := fldPath.Index(i)
// Apply validation common to all containers
allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, path, opts)...)
allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, podClaimNames, path, opts)...)
// Container names must be unique within the list of regular containers.
// Collisions with init or ephemeral container names will be detected by the init or ephemeral
@ -3272,6 +3377,22 @@ func validateReadinessGates(readinessGates []core.PodReadinessGate, fldPath *fie
return allErrs
}
func validateSchedulingGates(schedulingGates []core.PodSchedulingGate, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// There should be no duplicates in the list of scheduling gates.
seen := sets.String{}
for i, schedulingGate := range schedulingGates {
if schedulingGate.Name == "" {
allErrs = append(allErrs, field.Required(fldPath.Index(i), "must not be empty"))
}
if seen.Has(schedulingGate.Name) {
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), schedulingGate.Name))
}
seen.Insert(schedulingGate.Name)
}
return allErrs
}
func validatePodDNSConfig(dnsConfig *core.PodDNSConfig, dnsPolicy *core.DNSPolicy, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
@ -3355,7 +3476,7 @@ func validateImagePullSecrets(imagePullSecrets []core.LocalObjectReference, fldP
}
// validateAffinity checks if given affinities are valid
func validateAffinity(affinity *core.Affinity, fldPath *field.Path) field.ErrorList {
func validateAffinity(affinity *core.Affinity, opts PodValidationOptions, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if affinity != nil {
@ -3363,10 +3484,10 @@ func validateAffinity(affinity *core.Affinity, fldPath *field.Path) field.ErrorL
allErrs = append(allErrs, validateNodeAffinity(affinity.NodeAffinity, fldPath.Child("nodeAffinity"))...)
}
if affinity.PodAffinity != nil {
allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, fldPath.Child("podAffinity"))...)
allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, opts.AllowInvalidLabelValueInSelector, fldPath.Child("podAffinity"))...)
}
if affinity.PodAntiAffinity != nil {
allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, fldPath.Child("podAntiAffinity"))...)
allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, opts.AllowInvalidLabelValueInSelector, fldPath.Child("podAntiAffinity"))...)
}
}
@ -3420,6 +3541,28 @@ func validateOnlyAddedTolerations(newTolerations []core.Toleration, oldToleratio
return allErrs
}
func validateOnlyDeletedSchedulingGates(newGates, oldGates []core.PodSchedulingGate, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if len(newGates) == 0 {
return allErrs
}
additionalGates := make(map[string]int)
for i, newGate := range newGates {
additionalGates[newGate.Name] = i
}
for _, oldGate := range oldGates {
delete(additionalGates, oldGate.Name)
}
for gate, i := range additionalGates {
allErrs = append(allErrs, field.Forbidden(fldPath.Index(i).Child("name"), fmt.Sprintf("only deletion is allowed, but found new scheduling gate '%s'", gate)))
}
return allErrs
}
func ValidateHostAliases(hostAliases []core.HostAlias, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for _, hostAlias := range hostAliases {
@ -3505,10 +3648,10 @@ type PodValidationOptions struct {
AllowDownwardAPIHugePages bool
// Allow invalid pod-deletion-cost annotation value for backward compatibility.
AllowInvalidPodDeletionCost bool
// Allow invalid label-value in LabelSelector
AllowInvalidLabelValueInSelector bool
// Allow pod spec to use non-integer multiple of huge page unit size
AllowIndivisibleHugePagesValues bool
// Allow hostProcess field to be set in windows security context
AllowWindowsHostProcessField bool
// Allow more DNSSearchPaths and longer DNSSearchListChars
AllowExpandedDNSConfig bool
}
@ -3579,7 +3722,7 @@ func validatePodIPs(pod *core.Pod) field.ErrorList {
}
// There should be no duplicates in list of Pod.PodIPs
seen := sets.String{} //:= make(map[string]int)
seen := sets.String{} // := make(map[string]int)
for i, podIP := range pod.Status.PodIPs {
if seen.Has(podIP.IP) {
allErrs = append(allErrs, field.Duplicate(podIPsField.Index(i), podIP))
@ -3602,19 +3745,22 @@ func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *fi
vols, vErrs := ValidateVolumes(spec.Volumes, podMeta, fldPath.Child("volumes"), opts)
allErrs = append(allErrs, vErrs...)
allErrs = append(allErrs, validateContainers(spec.Containers, vols, fldPath.Child("containers"), opts)...)
allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, fldPath.Child("initContainers"), opts)...)
allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, fldPath.Child("ephemeralContainers"), opts)...)
podClaimNames := gatherPodResourceClaimNames(spec.ResourceClaims)
allErrs = append(allErrs, validatePodResourceClaims(spec.ResourceClaims, fldPath.Child("resourceClaims"))...)
allErrs = append(allErrs, validateContainers(spec.Containers, vols, podClaimNames, fldPath.Child("containers"), opts)...)
allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, podClaimNames, fldPath.Child("initContainers"), opts)...)
allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, podClaimNames, fldPath.Child("ephemeralContainers"), opts)...)
allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...)
allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...)
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...)
allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"), opts)...)
allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...)
allErrs = append(allErrs, validateAffinity(spec.Affinity, fldPath.Child("affinity"))...)
allErrs = append(allErrs, validateAffinity(spec.Affinity, opts, fldPath.Child("affinity"))...)
allErrs = append(allErrs, validatePodDNSConfig(spec.DNSConfig, &spec.DNSPolicy, fldPath.Child("dnsConfig"), opts)...)
allErrs = append(allErrs, validateReadinessGates(spec.ReadinessGates, fldPath.Child("readinessGates"))...)
allErrs = append(allErrs, validateSchedulingGates(spec.SchedulingGates, fldPath.Child("schedulingGates"))...)
allErrs = append(allErrs, validateTopologySpreadConstraints(spec.TopologySpreadConstraints, fldPath.Child("topologySpreadConstraints"))...)
allErrs = append(allErrs, validateWindowsHostProcessPod(spec, fldPath, opts)...)
allErrs = append(allErrs, validateWindowsHostProcessPod(spec, fldPath)...)
allErrs = append(allErrs, validateHostUsers(spec, fldPath)...)
if len(spec.ServiceAccountName) > 0 {
for _, msg := range ValidateServiceAccountName(spec.ServiceAccountName, false) {
@ -3973,12 +4119,10 @@ func ValidatePreferredSchedulingTerms(terms []core.PreferredSchedulingTerm, fldP
}
// validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data
func validatePodAffinityTerm(podAffinityTerm core.PodAffinityTerm, fldPath *field.Path) field.ErrorList {
func validatePodAffinityTerm(podAffinityTerm core.PodAffinityTerm, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("labelSelector"))...)
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.NamespaceSelector, fldPath.Child("namespaceSelector"))...)
allErrs = append(allErrs, ValidatePodAffinityTermSelector(podAffinityTerm, allowInvalidLabelValueInSelector, fldPath)...)
for _, name := range podAffinityTerm.Namespaces {
for _, msg := range ValidateNamespaceName(name, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, msg))
@ -3991,40 +4135,40 @@ func validatePodAffinityTerm(podAffinityTerm core.PodAffinityTerm, fldPath *fiel
}
// validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data
func validatePodAffinityTerms(podAffinityTerms []core.PodAffinityTerm, fldPath *field.Path) field.ErrorList {
func validatePodAffinityTerms(podAffinityTerms []core.PodAffinityTerm, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for i, podAffinityTerm := range podAffinityTerms {
allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, fldPath.Index(i))...)
allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, allowInvalidLabelValueInSelector, fldPath.Index(i))...)
}
return allErrs
}
// validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data
func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []core.WeightedPodAffinityTerm, fldPath *field.Path) field.ErrorList {
func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []core.WeightedPodAffinityTerm, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for j, weightedTerm := range weightedPodAffinityTerms {
if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 {
allErrs = append(allErrs, field.Invalid(fldPath.Index(j).Child("weight"), weightedTerm.Weight, "must be in the range 1-100"))
}
allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, fldPath.Index(j).Child("podAffinityTerm"))...)
allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, allowInvalidLabelValueInSelector, fldPath.Index(j).Child("podAffinityTerm"))...)
}
return allErrs
}
// validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data
func validatePodAntiAffinity(podAntiAffinity *core.PodAntiAffinity, fldPath *field.Path) field.ErrorList {
func validatePodAntiAffinity(podAntiAffinity *core.PodAntiAffinity, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented.
// if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
// allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution, false,
// fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
//}
// }
if podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, allowInvalidLabelValueInSelector,
fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
}
if podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, allowInvalidLabelValueInSelector,
fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
}
return allErrs
@ -4047,19 +4191,19 @@ func validateNodeAffinity(na *core.NodeAffinity, fldPath *field.Path) field.Erro
}
// validatePodAffinity tests that the specified podAffinity fields have valid data
func validatePodAffinity(podAffinity *core.PodAffinity, fldPath *field.Path) field.ErrorList {
func validatePodAffinity(podAffinity *core.PodAffinity, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented.
// if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
// allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingRequiredDuringExecution, false,
// fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
//}
// }
if podAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution, allowInvalidLabelValueInSelector,
fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
}
if podAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, allowInvalidLabelValueInSelector,
fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
}
return allErrs
@ -4260,7 +4404,7 @@ func ValidatePodSecurityContext(securityContext *core.PodSecurityContext, spec *
func ValidateContainerUpdates(newContainers, oldContainers []core.Container, fldPath *field.Path) (allErrs field.ErrorList, stop bool) {
allErrs = field.ErrorList{}
if len(newContainers) != len(oldContainers) {
//TODO: Pinpoint the specific container that causes the invalid error after we have strategic merge diff
// TODO: Pinpoint the specific container that causes the invalid error after we have strategic merge diff
allErrs = append(allErrs, field.Forbidden(fldPath, "pod updates may not add or remove containers"))
return allErrs, true
}
@ -4287,6 +4431,11 @@ func ValidatePodCreate(pod *core.Pod, opts PodValidationOptions) field.ErrorList
if len(pod.Spec.EphemeralContainers) > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("ephemeralContainers"), "cannot be set on create"))
}
// A Pod cannot be assigned a Node if there are remaining scheduling gates.
if utilfeature.DefaultFeatureGate.Enabled(features.PodSchedulingReadiness) &&
pod.Spec.NodeName != "" && len(pod.Spec.SchedulingGates) != 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("nodeName"), "cannot be set until all schedulingGates have been cleared"))
}
allErrs = append(allErrs, validateSeccompAnnotationsAndFields(pod.ObjectMeta, &pod.Spec, fldPath)...)
return allErrs
@ -4372,6 +4521,7 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
// 2. spec.initContainers[*].image
// 3. spec.activeDeadlineSeconds
// 4. spec.terminationGracePeriodSeconds
// 5. spec.schedulingGates
containerErrs, stop := ValidateContainerUpdates(newPod.Spec.Containers, oldPod.Spec.Containers, specPath.Child("containers"))
allErrs = append(allErrs, containerErrs...)
@ -4407,6 +4557,9 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
// Allow only additions to tolerations updates.
allErrs = append(allErrs, validateOnlyAddedTolerations(newPod.Spec.Tolerations, oldPod.Spec.Tolerations, specPath.Child("tolerations"))...)
// Allow only deletions to schedulingGates updates.
allErrs = append(allErrs, validateOnlyDeletedSchedulingGates(newPod.Spec.SchedulingGates, oldPod.Spec.SchedulingGates, specPath.Child("schedulingGates"))...)
// the last thing to check is pod spec equality. If the pod specs are equal, then we can simply return the errors we have
// so far and save the cost of a deep copy.
if apiequality.Semantic.DeepEqual(newPod.Spec, oldPod.Spec) {
@ -4435,6 +4588,8 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
activeDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds
mungedPodSpec.ActiveDeadlineSeconds = &activeDeadlineSeconds
}
// munge spec.schedulingGates
mungedPodSpec.SchedulingGates = oldPod.Spec.SchedulingGates // +k8s:verify-mutation:reason=clone
// tolerations are checked before the deep copy, so munge those too
mungedPodSpec.Tolerations = oldPod.Spec.Tolerations // +k8s:verify-mutation:reason=clone
@ -4446,7 +4601,7 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
if !apiequality.Semantic.DeepEqual(mungedPodSpec, oldPod.Spec) {
// This diff isn't perfect, but it's a helluva lot better an "I'm not going to tell you what the difference is".
//TODO: Pinpoint the specific field that causes the invalid error after we have strategic merge diff
// TODO: Pinpoint the specific field that causes the invalid error after we have strategic merge diff
specDiff := cmp.Diff(oldPod.Spec, mungedPodSpec)
allErrs = append(allErrs, field.Forbidden(specPath, fmt.Sprintf("pod updates may not change fields other than `spec.containers[*].image`, `spec.initContainers[*].image`, `spec.activeDeadlineSeconds`, `spec.tolerations` (only additions to existing tolerations) or `spec.terminationGracePeriodSeconds` (allow it to be set to 1 if it was previously negative)\n%v", specDiff)))
}
@ -4878,14 +5033,12 @@ func validateServiceExternalTrafficFieldsUpdate(before, after *core.Service) fie
func validateServiceInternalTrafficFieldsValue(service *core.Service) field.ErrorList {
allErrs := field.ErrorList{}
if utilfeature.DefaultFeatureGate.Enabled(features.ServiceInternalTrafficPolicy) {
if service.Spec.InternalTrafficPolicy == nil {
// We do not forbid internalTrafficPolicy on other Service types because of historical reasons.
// We did not check that before it went beta and we don't want to invalidate existing stored objects.
if service.Spec.Type == core.ServiceTypeNodePort ||
service.Spec.Type == core.ServiceTypeLoadBalancer || service.Spec.Type == core.ServiceTypeClusterIP {
allErrs = append(allErrs, field.Required(field.NewPath("spec").Child("internalTrafficPolicy"), ""))
}
if service.Spec.InternalTrafficPolicy == nil {
// We do not forbid internalTrafficPolicy on other Service types because of historical reasons.
// We did not check that before it went beta and we don't want to invalidate existing stored objects.
if service.Spec.Type == core.ServiceTypeNodePort ||
service.Spec.Type == core.ServiceTypeLoadBalancer || service.Spec.Type == core.ServiceTypeClusterIP {
allErrs = append(allErrs, field.Required(field.NewPath("spec").Child("internalTrafficPolicy"), ""))
}
}
@ -5259,6 +5412,8 @@ func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList {
}
// validation specific to Node.Spec.ConfigSource
// The field ConfigSource is deprecated and will not be used. The validation is kept in place
// for the backward compatibility
func validateNodeConfigSourceSpec(source *core.NodeConfigSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
count := int(0)
@ -5276,6 +5431,8 @@ func validateNodeConfigSourceSpec(source *core.NodeConfigSource, fldPath *field.
}
// validation specific to Node.Spec.ConfigSource.ConfigMap
// The field ConfigSource is deprecated and will not be used. The validation is kept in place
// for the backward compatibility
func validateConfigMapNodeConfigSourceSpec(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// uid and resourceVersion must not be set in spec
@ -5749,7 +5906,7 @@ func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) fiel
}
// Validates resource requirement spec.
func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
func ValidateResourceRequirements(requirements *core.ResourceRequirements, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
limPath := fldPath.Child("limits")
reqPath := fldPath.Child("requests")
@ -5812,6 +5969,42 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPa
allErrs = append(allErrs, field.Forbidden(fldPath, "HugePages require cpu or memory"))
}
allErrs = append(allErrs, validateResourceClaimNames(requirements.Claims, podClaimNames, fldPath.Child("claims"))...)
return allErrs
}
// validateResourceClaimNames checks that the names in
// ResourceRequirements.Claims have a corresponding entry in
// PodSpec.ResourceClaims.
func validateResourceClaimNames(claims []core.ResourceClaim, podClaimNames sets.String, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
names := sets.String{}
for i, claim := range claims {
name := claim.Name
if name == "" {
allErrs = append(allErrs, field.Required(fldPath.Index(i), ""))
} else {
if names.Has(name) {
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), name))
} else {
names.Insert(name)
}
if !podClaimNames.Has(name) {
// field.NotFound doesn't accept an
// explanation. Adding one here is more
// user-friendly.
error := field.NotFound(fldPath.Index(i), name)
error.Detail = "must be one of the names in pod.spec.resourceClaims"
if len(podClaimNames) == 0 {
error.Detail += " which is empty"
} else {
error.Detail += ": " + strings.Join(podClaimNames.List(), ", ")
}
allErrs = append(allErrs, error)
}
}
}
return allErrs
}
@ -6119,7 +6312,7 @@ func validateEndpointSubsets(subsets []core.EndpointSubset, fldPath *field.Path)
// EndpointSubsets must include endpoint address. For headless service, we allow its endpoints not to have ports.
if len(ss.Addresses) == 0 && len(ss.NotReadyAddresses) == 0 {
//TODO: consider adding a RequiredOneOf() error for this and similar cases
// TODO: consider adding a RequiredOneOf() error for this and similar cases
allErrs = append(allErrs, field.Required(idxPath, "must specify `addresses` or `notReadyAddresses`"))
}
for addr := range ss.Addresses {
@ -6208,7 +6401,7 @@ func validateEndpointPort(port *core.EndpointPort, requireName bool, fldPath *fi
// ValidateSecurityContext ensures the security context contains valid settings
func ValidateSecurityContext(sc *core.SecurityContext, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
//this should only be true for testing since SecurityContext is defaulted by the core
// this should only be true for testing since SecurityContext is defaulted by the core
if sc == nil {
return allErrs
}
@ -6372,7 +6565,7 @@ func validateWindowsSecurityContextOptions(windowsOptions *core.WindowsSecurityC
return allErrs
}
func validateWindowsHostProcessPod(podSpec *core.PodSpec, fieldPath *field.Path, opts PodValidationOptions) field.ErrorList {
func validateWindowsHostProcessPod(podSpec *core.PodSpec, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
// Keep track of container and hostProcess container count for validate
@ -6384,13 +6577,6 @@ func validateWindowsHostProcessPod(podSpec *core.PodSpec, fieldPath *field.Path,
podHostProcess = podSpec.SecurityContext.WindowsOptions.HostProcess
}
if !opts.AllowWindowsHostProcessField && podHostProcess != nil {
// Do not allow pods to persist data that sets hostProcess (true or false)
errMsg := "not allowed when feature gate 'WindowsHostProcessContainers' is not enabled"
allErrs = append(allErrs, field.Forbidden(fieldPath.Child("securityContext", "windowsOptions", "hostProcess"), errMsg))
return allErrs
}
hostNetwork := false
if podSpec.SecurityContext != nil {
hostNetwork = podSpec.SecurityContext.HostNetwork
@ -6404,12 +6590,6 @@ func validateWindowsHostProcessPod(podSpec *core.PodSpec, fieldPath *field.Path,
containerHostProcess = c.SecurityContext.WindowsOptions.HostProcess
}
if !opts.AllowWindowsHostProcessField && containerHostProcess != nil {
// Do not allow pods to persist data that sets hostProcess (true or false)
errMsg := "not allowed when feature gate 'WindowsHostProcessContainers' is not enabled"
allErrs = append(allErrs, field.Forbidden(cFieldPath.Child("securityContext", "windowsOptions", "hostProcess"), errMsg))
}
if podHostProcess != nil && containerHostProcess != nil && *podHostProcess != *containerHostProcess {
errMsg := fmt.Sprintf("pod hostProcess value must be identical if both are specified, was %v", *podHostProcess)
allErrs = append(allErrs, field.Invalid(cFieldPath.Child("securityContext", "windowsOptions", "hostProcess"), *containerHostProcess, errMsg))
@ -6417,7 +6597,7 @@ func validateWindowsHostProcessPod(podSpec *core.PodSpec, fieldPath *field.Path,
switch {
case containerHostProcess != nil && *containerHostProcess:
// Container explitly sets hostProcess=true
// Container explicitly sets hostProcess=true
hostProcessContainerCount++
case containerHostProcess == nil && podHostProcess != nil && *podHostProcess:
// Container inherits hostProcess=true from pod settings
@ -6428,13 +6608,6 @@ func validateWindowsHostProcessPod(podSpec *core.PodSpec, fieldPath *field.Path,
})
if hostProcessContainerCount > 0 {
// Fail Pod validation if feature is not enabled (unless podspec already exists and contains HostProcess fields) instead of dropping fields based on PRR reivew.
if !opts.AllowWindowsHostProcessField {
errMsg := "pod must not contain Windows hostProcess containers when feature gate 'WindowsHostProcessContainers' is not enabled"
allErrs = append(allErrs, field.Forbidden(fieldPath, errMsg))
return allErrs
}
// At present, if a Windows Pods contains any HostProcess containers than all containers must be
// HostProcess containers (explicitly set or inherited).
if hostProcessContainerCount != containerCount {
@ -6744,7 +6917,7 @@ func ValidateServiceClusterIPsRelatedFields(service *core.Service) field.ErrorLi
}
// IPFamilyPolicy stand alone validation
//note: nil is ok, defaulted in alloc check registry/core/service/*
// note: nil is ok, defaulted in alloc check registry/core/service/*
if service.Spec.IPFamilyPolicy != nil {
// must have a supported value
if !supportedServiceIPFamilyPolicy.Has(string(*(service.Spec.IPFamilyPolicy))) {
@ -6992,3 +7165,11 @@ func sameLoadBalancerClass(oldService, service *core.Service) bool {
}
return *oldService.Spec.LoadBalancerClass == *service.Spec.LoadBalancerClass
}
func ValidatePodAffinityTermSelector(podAffinityTerm core.PodAffinityTerm, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
labelSelectorValidationOptions := unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: allowInvalidLabelValueInSelector}
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, labelSelectorValidationOptions, fldPath.Child("labelSelector"))...)
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.NamespaceSelector, labelSelectorValidationOptions, fldPath.Child("namespaceSelector"))...)
return allErrs
}

View File

@ -419,6 +419,32 @@ func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClaimSource) DeepCopyInto(out *ClaimSource) {
*out = *in
if in.ResourceClaimName != nil {
in, out := &in.ResourceClaimName, &out.ResourceClaimName
*out = new(string)
**out = **in
}
if in.ResourceClaimTemplateName != nil {
in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimSource.
func (in *ClaimSource) DeepCopy() *ClaimSource {
if in == nil {
return nil
}
out := new(ClaimSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) {
*out = *in
@ -2965,7 +2991,7 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec
}
if in.DataSourceRef != nil {
in, out := &in.DataSourceRef, &out.DataSourceRef
*out = new(TypedLocalObjectReference)
*out = new(TypedObjectReference)
(*in).DeepCopyInto(*out)
}
return
@ -3728,6 +3754,39 @@ func (in *PodReadinessGate) DeepCopy() *PodReadinessGate {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodResourceClaim) DeepCopyInto(out *PodResourceClaim) {
*out = *in
in.Source.DeepCopyInto(&out.Source)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourceClaim.
func (in *PodResourceClaim) DeepCopy() *PodResourceClaim {
if in == nil {
return nil
}
out := new(PodResourceClaim)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSchedulingGate) DeepCopyInto(out *PodSchedulingGate) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingGate.
func (in *PodSchedulingGate) DeepCopy() *PodSchedulingGate {
if in == nil {
return nil
}
out := new(PodSchedulingGate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
*out = *in
@ -3961,6 +4020,18 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
*out = new(PodOS)
**out = **in
}
if in.SchedulingGates != nil {
in, out := &in.SchedulingGates, &out.SchedulingGates
*out = make([]PodSchedulingGate, len(*in))
copy(*out, *in)
}
if in.ResourceClaims != nil {
in, out := &in.ResourceClaims, &out.ResourceClaims
*out = make([]PodResourceClaim, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
@ -4539,6 +4610,22 @@ func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim.
func (in *ResourceClaim) DeepCopy() *ResourceClaim {
if in == nil {
return nil
}
out := new(ResourceClaim)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) {
*out = *in
@ -4719,6 +4806,11 @@ func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) {
(*out)[key] = val.DeepCopy()
}
}
if in.Claims != nil {
in, out := &in.Claims, &out.Claims
*out = make([]ResourceClaim, len(*in))
copy(*out, *in)
}
return
}
@ -5693,6 +5785,32 @@ func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TypedObjectReference) DeepCopyInto(out *TypedObjectReference) {
*out = *in
if in.APIGroup != nil {
in, out := &in.APIGroup, &out.APIGroup
*out = new(string)
**out = **in
}
if in.Namespace != nil {
in, out := &in.Namespace, &out.Namespace
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedObjectReference.
func (in *TypedObjectReference) DeepCopy() *TypedObjectReference {
if in == nil {
return nil
}
out := new(TypedObjectReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Volume) DeepCopyInto(out *Volume) {
*out = *in

View File

@ -158,15 +158,15 @@ type NetworkPolicyPort struct {
EndPort *int32
}
// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed
// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed
// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs
// that should not be included within this rule.
type IPBlock struct {
// CIDR is a string representing the IP Block
// Valid examples are "192.168.1.1/24" or "2001:db9::/64"
// Valid examples are "192.168.1.0/24" or "2001:db8::/64"
CIDR string
// Except is a slice of CIDRs that should not be included within an IP Block
// Valid examples are "192.168.1.1/24" or "2001:db9::/64"
// Valid examples are "192.168.1.0/24" or "2001:db8::/64"
// Except values will be rejected if they are outside the CIDR range
// +optional
Except []string
@ -423,7 +423,45 @@ type IngressTLS struct {
type IngressStatus struct {
// LoadBalancer contains the current status of the load-balancer.
// +optional
LoadBalancer api.LoadBalancerStatus
LoadBalancer IngressLoadBalancerStatus
}
// IngressLoadBalancerStatus represents the status of a load-balancer
type IngressLoadBalancerStatus struct {
// Ingress is a list containing ingress points for the load-balancer.
// +optional
Ingress []IngressLoadBalancerIngress
}
// IngressLoadBalancerIngress represents the status of a load-balancer ingress point.
type IngressLoadBalancerIngress struct {
// IP is set for load-balancer ingress points that are IP based.
// +optional
IP string
// Hostname is set for load-balancer ingress points that are DNS based.
// +optional
Hostname string
// Ports provides information about the ports exposed by this LoadBalancer.
// +optional
Ports []IngressPortStatus
}
// IngressPortStatus represents the error condition of an ingress port
type IngressPortStatus struct {
// Port is the port number of the ingress port.
Port int32
// Protocol is the protocol of the ingress port.
Protocol api.Protocol
// Error indicates a problem on this port.
// The format of the error must comply with the following rules:
// - Kubernetes-defined error values use CamelCase names
// - Provider-specific error values must follow label-name style (e.g.
// example.com/name).
Error *string
}
// IngressRule represents the rules mapping the paths under a specified host to
@ -628,7 +666,7 @@ type ClusterCIDRSpec struct {
// +optional
IPv4 string
// IPv6 defines an IPv6 IP block in CIDR notation(e.g. "fd12:3456:789a:1::/64").
// IPv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64").
// At least one of IPv4 and IPv6 must be specified.
// This field is immutable.
// +optional

View File

@ -374,6 +374,73 @@ func (in *IngressList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressLoadBalancerIngress) DeepCopyInto(out *IngressLoadBalancerIngress) {
*out = *in
if in.Ports != nil {
in, out := &in.Ports, &out.Ports
*out = make([]IngressPortStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerIngress.
func (in *IngressLoadBalancerIngress) DeepCopy() *IngressLoadBalancerIngress {
if in == nil {
return nil
}
out := new(IngressLoadBalancerIngress)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressLoadBalancerStatus) DeepCopyInto(out *IngressLoadBalancerStatus) {
*out = *in
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = make([]IngressLoadBalancerIngress, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerStatus.
func (in *IngressLoadBalancerStatus) DeepCopy() *IngressLoadBalancerStatus {
if in == nil {
return nil
}
out := new(IngressLoadBalancerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressPortStatus) DeepCopyInto(out *IngressPortStatus) {
*out = *in
if in.Error != nil {
in, out := &in.Error, &out.Error
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressPortStatus.
func (in *IngressPortStatus) DeepCopy() *IngressPortStatus {
if in == nil {
return nil
}
out := new(IngressPortStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressRule) DeepCopyInto(out *IngressRule) {
*out = *in

View File

@ -42,8 +42,56 @@ type PodDisruptionBudgetSpec struct {
// by specifying 0. This is a mutually exclusive setting with "minAvailable".
// +optional
MaxUnavailable *intstr.IntOrString
// UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods
// should be considered for eviction. Current implementation considers healthy pods,
// as pods that have status.conditions item with type="Ready",status="True".
//
// Valid policies are IfHealthyBudget and AlwaysAllow.
// If no policy is specified, the default behavior will be used,
// which corresponds to the IfHealthyBudget policy.
//
// IfHealthyBudget policy means that running pods (status.phase="Running"),
// but not yet healthy can be evicted only if the guarded application is not
// disrupted (status.currentHealthy is at least equal to status.desiredHealthy).
// Healthy pods will be subject to the PDB for eviction.
//
// AlwaysAllow policy means that all running pods (status.phase="Running"),
// but not yet healthy are considered disrupted and can be evicted regardless
// of whether the criteria in a PDB is met. This means perspective running
// pods of a disrupted application might not get a chance to become healthy.
// Healthy pods will be subject to the PDB for eviction.
//
// Additional policies may be added in the future.
// Clients making eviction decisions should disallow eviction of unhealthy pods
// if they encounter an unrecognized policy in this field.
//
// This field is alpha-level. The eviction API uses this field when
// the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default).
// +optional
UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType
}
// UnhealthyPodEvictionPolicyType defines the criteria for when unhealthy pods
// should be considered for eviction.
// +enum
type UnhealthyPodEvictionPolicyType string
const (
// IfHealthyBudget policy means that running pods (status.phase="Running"),
// but not yet healthy can be evicted only if the guarded application is not
// disrupted (status.currentHealthy is at least equal to status.desiredHealthy).
// Healthy pods will be subject to the PDB for eviction.
IfHealthyBudget UnhealthyPodEvictionPolicyType = "IfHealthyBudget"
// AlwaysAllow policy means that all running pods (status.phase="Running"),
// but not yet healthy are considered disrupted and can be evicted regardless
// of whether the criteria in a PDB is met. This means perspective running
// pods of a disrupted application might not get a chance to become healthy.
// Healthy pods will be subject to the PDB for eviction.
AlwaysAllow UnhealthyPodEvictionPolicyType = "AlwaysAllow"
)
// PodDisruptionBudgetStatus represents information about the status of a
// PodDisruptionBudget. Status may trail the actual state of a system.
type PodDisruptionBudgetStatus struct {

View File

@ -239,6 +239,11 @@ func (in *PodDisruptionBudgetSpec) DeepCopyInto(out *PodDisruptionBudgetSpec) {
*out = new(intstr.IntOrString)
**out = **in
}
if in.UnhealthyPodEvictionPolicy != nil {
in, out := &in.UnhealthyPodEvictionPolicy, &out.UnhealthyPodEvictionPolicy
*out = new(UnhealthyPodEvictionPolicyType)
**out = **in
}
return
}