rebase: bump k8s.io/kubernetes from 1.26.2 to 1.27.2

Bumps [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes) from 1.26.2 to 1.27.2.
- [Release notes](https://github.com/kubernetes/kubernetes/releases)
- [Commits](https://github.com/kubernetes/kubernetes/compare/v1.26.2...v1.27.2)

---
updated-dependencies:
- dependency-name: k8s.io/kubernetes
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2023-05-29 21:03:29 +00:00
committed by mergify[bot]
parent 0e79135419
commit 07b05616a0
1072 changed files with 208716 additions and 198880 deletions

View File

@ -180,6 +180,7 @@ type StatefulSetSpec struct {
// of the StatefulSet. Each pod will be named with the format
// <statefulsetname>-<podindex>. For example, a pod in a StatefulSet named
// "web" with index number "3" would be named "web-3".
// The only allowed template.spec.restartPolicy value is "Always".
Template api.PodTemplateSpec
// VolumeClaimTemplates is a list of claims that pods are allowed to reference.
@ -229,7 +230,7 @@ type StatefulSetSpec struct {
// PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from
// the StatefulSet VolumeClaimTemplates. This requires the
// StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.
// StatefulSetAutoDeletePVC feature gate to be enabled, which is beta and default on from 1.27.
// +optional
PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy
@ -237,7 +238,7 @@ type StatefulSetSpec struct {
// default ordinals behavior assigns a "0" index to the first replica and
// increments the index by one for each additional replica requested. Using
// the ordinals field requires the StatefulSetStartOrdinal feature gate to be
// enabled, which is alpha.
// enabled, which is beta.
// +optional
Ordinals *StatefulSetOrdinals
}
@ -376,6 +377,7 @@ type DeploymentSpec struct {
Selector *metav1.LabelSelector
// Template describes the pods that will be created.
// The only allowed template.spec.restartPolicy value is "Always".
Template api.PodTemplateSpec
// The deployment strategy to use to replace existing pods with new ones.
@ -666,6 +668,7 @@ type DaemonSetSpec struct {
// The DaemonSet will create exactly one copy of this pod on every node
// that matches the template's node selector (or on every node if no node
// selector is specified).
// The only allowed template.spec.restartPolicy value is "Always".
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
Template api.PodTemplateSpec
@ -857,6 +860,7 @@ type ReplicaSetSpec struct {
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected.
// The only allowed template.spec.restartPolicy value is "Always".
// +optional
Template api.PodTemplateSpec
}

View File

@ -31,25 +31,25 @@ type Scale struct {
// +optional
metav1.ObjectMeta
// defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
// spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
// +optional
Spec ScaleSpec
// current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.
// status represents the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.
// +optional
Status ScaleStatus
}
// ScaleSpec describes the attributes of a scale subresource.
type ScaleSpec struct {
// desired number of instances for the scaled object.
// replicas is the desired number of instances for the scaled object.
// +optional
Replicas int32
}
// ScaleStatus represents the current status of a scale subresource.
type ScaleStatus struct {
// actual number of observed instances of the scaled object.
// replicas is the actual number of observed instances of the scaled object.
Replicas int32
// label query over pods that should match the replicas count. This is same
@ -62,20 +62,23 @@ type ScaleStatus struct {
// CrossVersionObjectReference contains enough information to let you identify the referred resource.
type CrossVersionObjectReference struct {
// Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
// kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
Kind string
// Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
// name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
Name string
// API version of the referent
// apiVersion is the API version of the referent
// +optional
APIVersion string
}
// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.
type HorizontalPodAutoscalerSpec struct {
// ScaleTargetRef points to the target resource to scale, and is used to the pods for which metrics
// scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics
// should be collected, as well as to actually change the replica count.
ScaleTargetRef CrossVersionObjectReference
// minReplicas is the lower limit for the number of replicas to which the autoscaler
// can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the
// alpha feature gate HPAScaleToZero is enabled and at least one Object or External
@ -83,10 +86,12 @@ type HorizontalPodAutoscalerSpec struct {
// available.
// +optional
MinReplicas *int32
// MaxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
// maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
// It cannot be less that minReplicas.
MaxReplicas int32
// Metrics contains the specifications for which to use to calculate the
// metrics contains the specifications for which to use to calculate the
// desired replica count (the maximum replica count across all metrics will
// be used). The desired replica count is calculated multiplying the
// ratio between the target value and the current value by the current
@ -487,7 +492,7 @@ type PodsMetricStatus struct {
// Kubernetes, and have special scaling options on top of those available to
// normal per-pod metrics using the "pods" source.
type ResourceMetricStatus struct {
// Name is the name of the resource in question.
// name is the name of the resource in question.
Name api.ResourceName
Current MetricValueStatus
}
@ -498,7 +503,7 @@ type ResourceMetricStatus struct {
// Kubernetes, and have special scaling options on top of those available to
// normal per-pod metrics using the "pods" source.
type ContainerResourceMetricStatus struct {
// Name is the name of the resource in question.
// name is the name of the resource in question.
Name api.ResourceName
Container string
Current MetricValueStatus
@ -530,12 +535,12 @@ type HorizontalPodAutoscaler struct {
// +optional
metav1.ObjectMeta
// Spec is the specification for the behaviour of the autoscaler.
// spec is the specification for the behaviour of the autoscaler.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
// +optional
Spec HorizontalPodAutoscalerSpec
// Status is the current information about the autoscaler.
// status is the current information about the autoscaler.
// +optional
Status HorizontalPodAutoscalerStatus
}
@ -549,6 +554,6 @@ type HorizontalPodAutoscalerList struct {
// +optional
metav1.ListMeta
// Items is the list of horizontal pod autoscaler objects.
// items is the list of horizontal pod autoscaler objects.
Items []HorizontalPodAutoscaler
}

View File

@ -49,7 +49,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Job{},
&JobList{},
&JobTemplate{},
&CronJob{},
&CronJobList{},
)

View File

@ -22,16 +22,29 @@ import (
api "k8s.io/kubernetes/pkg/apis/core"
)
// JobTrackingFinalizer is a finalizer for Job's pods. It prevents them from
// being deleted before being accounted in the Job status.
//
// Additionally, the apiserver and job controller use this string as a Job
// annotation, to mark Jobs that are being tracked using pod finalizers.
// However, this behavior is deprecated in kubernetes 1.26. This means that, in
// 1.27+, one release after JobTrackingWithFinalizers graduates to GA, the
// apiserver and job controller will ignore this annotation and they will
// always track jobs using finalizers.
const JobTrackingFinalizer = "batch.kubernetes.io/job-tracking"
const (
// Unprefixed labels are reserved for end-users
// so we will add a batch.kubernetes.io to designate these labels as official Kubernetes labels.
// See https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#label-selector-and-annotation-conventions
labelPrefix = "batch.kubernetes.io/"
// JobTrackingFinalizer is a finalizer for Job's pods. It prevents them from
// being deleted before being accounted in the Job status.
//
// Additionally, the apiserver and job controller use this string as a Job
// annotation, to mark Jobs that are being tracked using pod finalizers.
// However, this behavior is deprecated in kubernetes 1.26. This means that, in
// 1.27+, one release after JobTrackingWithFinalizers graduates to GA, the
// apiserver and job controller will ignore this annotation and they will
// always track jobs using finalizers.
JobTrackingFinalizer = labelPrefix + "job-tracking"
// LegacyJobName and LegacyControllerUid are legacy labels that were set using unprefixed labels.
LegacyJobNameLabel = "job-name"
LegacyControllerUidLabel = "controller-uid"
// JobName is a user friendly way to refer to jobs and is set in the labels for jobs.
JobNameLabel = labelPrefix + LegacyJobNameLabel
// Controller UID is used for selectors and labels for jobs
ControllerUidLabel = labelPrefix + LegacyControllerUidLabel
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@ -68,22 +81,6 @@ type JobList struct {
Items []Job
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// JobTemplate describes a template for creating copies of a predefined pod.
type JobTemplate struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta
// Defines jobs that will be created from this template.
// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Template JobTemplateSpec
}
// JobTemplateSpec describes the data a Job should have when created from a template
type JobTemplateSpec struct {
// Standard object's metadata of the jobs created from this template.
@ -158,6 +155,7 @@ type PodFailurePolicyOnExitCodesRequirement struct {
// Represents the relationship between the container exit code(s) and the
// specified values. Containers completed with success (exit code 0) are
// excluded from the requirement check. Possible values are:
//
// - In: the requirement is satisfied if at least one container exit code
// (might be multiple if there are multiple containers not restricted
// by the 'containerName' field) is in the set of specified values.
@ -194,6 +192,7 @@ type PodFailurePolicyOnPodConditionsPattern struct {
type PodFailurePolicyRule struct {
// Specifies the action taken on a pod failure when the requirements are satisfied.
// Possible values are:
//
// - FailJob: indicates that the pod's job is marked as Failed and all
// running pods are terminated.
// - Ignore: indicates that the counter towards the .backoffLimit is not
@ -237,7 +236,7 @@ type JobSpec struct {
Parallelism *int32
// Specifies the desired number of successfully finished pods the
// job should be run with. Setting to nil means that the success of any
// job should be run with. Setting to null means that the success of any
// pod signals the success of all pods, and allows parallelism to have any positive
// value. Setting to 1 means that parallelism is limited to 1 and the success of that
// pod signals the success of the job.
@ -293,6 +292,7 @@ type JobSpec struct {
ManualSelector *bool
// Describes the pod that will be created when executing a job.
// The only allowed template.spec.restartPolicy values are "Never" or "OnFailure".
Template api.PodTemplateSpec
// ttlSecondsAfterFinished limits the lifetime of a Job that has finished
@ -305,7 +305,7 @@ type JobSpec struct {
// +optional
TTLSecondsAfterFinished *int32
// CompletionMode specifies how Pod completions are tracked. It can be
// completionMode specifies how Pod completions are tracked. It can be
// `NonIndexed` (default) or `Indexed`.
//
// `NonIndexed` means that the Job is considered complete when there have
@ -330,7 +330,7 @@ type JobSpec struct {
// +optional
CompletionMode *CompletionMode
// Suspend specifies whether the Job controller should create Pods or not. If
// suspend specifies whether the Job controller should create Pods or not. If
// a Job is created with suspend set to true, no Pods are created by the Job
// controller. If a Job is suspended after creation (i.e. the flag goes from
// false to true), the Job controller will delete all active Pods associated
@ -387,7 +387,7 @@ type JobStatus struct {
// +optional
Failed int32
// CompletedIndexes holds the completed indexes when .spec.completionMode =
// completedIndexes holds the completed indexes when .spec.completionMode =
// "Indexed" in a text format. The indexes are represented as decimal integers
// separated by commas. The numbers are listed in increasing order. Three or
// more consecutive numbers are compressed and represented by the first and
@ -397,15 +397,16 @@ type JobStatus struct {
// +optional
CompletedIndexes string
// UncountedTerminatedPods holds the UIDs of Pods that have terminated but
// uncountedTerminatedPods holds the UIDs of Pods that have terminated but
// the job controller hasn't yet accounted for in the status counters.
//
// The job controller creates pods with a finalizer. When a pod terminates
// (succeeded or failed), the controller does three steps to account for it
// in the job status:
// (1) Add the pod UID to the corresponding array in this field.
// (2) Remove the pod finalizer.
// (3) Remove the pod UID from the array while increasing the corresponding
//
// 1. Add the pod UID to the corresponding array in this field.
// 2. Remove the pod finalizer.
// 3. Remove the pod UID from the array while increasing the corresponding
// counter.
//
// Old jobs might not be tracked using this field, in which case the field
@ -417,12 +418,12 @@ type JobStatus struct {
// UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't
// been accounted in Job status counters.
type UncountedTerminatedPods struct {
// Succeeded holds UIDs of succeeded Pods.
// succeeded holds UIDs of succeeded Pods.
// +listType=set
// +optional
Succeeded []types.UID
// Failed holds UIDs of failed Pods.
// failed holds UIDs of failed Pods.
// +listType=set
// +optional
Failed []types.UID
@ -513,7 +514,6 @@ type CronJobSpec struct {
// configuration, the controller will stop creating new new Jobs and will create a system event with the
// reason UnknownTimeZone.
// More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones
// This is beta field and must be enabled via the `CronJobTimeZone` feature gate.
// +optional
TimeZone *string
@ -524,6 +524,7 @@ type CronJobSpec struct {
// Specifies how to treat concurrent executions of a Job.
// Valid values are:
//
// - "Allow" (default): allows CronJobs to run concurrently;
// - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet;
// - "Replace": cancels currently running job and replaces it with a new one

View File

@ -347,33 +347,6 @@ func (in *JobStatus) DeepCopy() *JobStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JobTemplate) DeepCopyInto(out *JobTemplate) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Template.DeepCopyInto(&out.Template)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplate.
func (in *JobTemplate) DeepCopy() *JobTemplate {
if in == nil {
return nil
}
out := new(JobTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *JobTemplate) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JobTemplateSpec) DeepCopyInto(out *JobTemplateSpec) {
*out = *in

View File

@ -122,8 +122,24 @@ const (
// This annotation is beta-level and is only honored when PodDeletionCost feature is enabled.
PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost"
// AnnotationTopologyAwareHints can be used to enable or disable Topology
// Aware Hints for a Service. This may be set to "Auto" or "Disabled". Any
// other value is treated as "Disabled".
AnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints"
// DeprecatedAnnotationTopologyAwareHints can be used to enable or disable
// Topology Aware Hints for a Service. This may be set to "Auto" or
// "Disabled". Any other value is treated as "Disabled". This annotation has
// been deprecated in favor of the `service.kubernetes.io/topology-mode`
// annotation which also allows "Auto" and "Disabled", but is not limited to
// those (it's open ended to provide room for experimentation while we
// pursue configuration for topology via specification). When both
// `service.kubernetes.io/topology-aware-hints` and
// `service.kubernetes.io/topology-mode` annotations are set, the value of
// `service.kubernetes.io/topology-aware-hints` has precedence.
DeprecatedAnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints"
// AnnotationTopologyMode can be used to enable or disable Topology Aware
// Routing for a Service. Well known values are "Auto" and "Disabled".
// Implementations may choose to develop new topology approaches, exposing
// them with domain-prefixed values. For example, "example.com/lowest-rtt"
// could be a valid implementation-specific value for this annotation. These
// heuristics will often populate topology hints on EndpointSlices, but that
// is not a requirement.
AnnotationTopologyMode = "service.kubernetes.io/topology-mode"
)

View File

@ -0,0 +1,102 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// NOTE: DO NOT use those helper functions through client-go, the
// package path will be changed in the future.
package qos
import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/apis/core"
)
var supportedQoSComputeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory))
func isSupportedQoSComputeResource(name core.ResourceName) bool {
return supportedQoSComputeResources.Has(string(name))
}
// GetPodQOS returns the QoS class of a pod.
// A pod is besteffort if none of its containers have specified any requests or limits.
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
// A pod is burstable if limits and requests do not match across all containers.
// When this function is updated please also update staging/src/k8s.io/kubectl/pkg/util/qos/qos.go
func GetPodQOS(pod *core.Pod) core.PodQOSClass {
requests := core.ResourceList{}
limits := core.ResourceList{}
zeroQuantity := resource.MustParse("0")
isGuaranteed := true
// note, ephemeral containers are not considered for QoS as they cannot define resources
allContainers := []core.Container{}
allContainers = append(allContainers, pod.Spec.Containers...)
allContainers = append(allContainers, pod.Spec.InitContainers...)
for _, container := range allContainers {
// process requests
for name, quantity := range container.Resources.Requests {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
delta := quantity.DeepCopy()
if _, exists := requests[name]; !exists {
requests[name] = delta
} else {
delta.Add(requests[name])
requests[name] = delta
}
}
}
// process limits
qosLimitsFound := sets.NewString()
for name, quantity := range container.Resources.Limits {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
qosLimitsFound.Insert(string(name))
delta := quantity.DeepCopy()
if _, exists := limits[name]; !exists {
limits[name] = delta
} else {
delta.Add(limits[name])
limits[name] = delta
}
}
}
if !qosLimitsFound.HasAll(string(core.ResourceMemory), string(core.ResourceCPU)) {
isGuaranteed = false
}
}
if len(requests) == 0 && len(limits) == 0 {
return core.PodQOSBestEffort
}
// Check is requests match limits for all resources.
if isGuaranteed {
for name, req := range requests {
if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 {
isGuaranteed = false
break
}
}
}
if isGuaranteed &&
len(requests) == len(limits) {
return core.PodQOSGuaranteed
}
return core.PodQOSBurstable
}

View File

@ -682,7 +682,7 @@ type EmptyDirVolumeSource struct {
// The maximum usage on memory medium EmptyDir would be the minimum value between
// the SizeLimit specified here and the sum of memory limits of all containers in a pod.
// The default is nil which means that the limit is undefined.
// More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
SizeLimit *resource.Quantity
}
@ -1741,7 +1741,6 @@ type CSIPersistentVolumeSource struct {
// ControllerExpandSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// ControllerExpandVolume call.
// This is an beta field and requires enabling ExpandCSIVolumes feature gate.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
@ -1750,9 +1749,10 @@ type CSIPersistentVolumeSource struct {
// NodeExpandSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// NodeExpandVolume call.
// This is an alpha field and requires enabling CSINodeExpandSecret feature gate.
// This is a beta field which is enabled default by CSINodeExpandSecret feature gate.
// This field is optional, may be omitted if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +featureGate=CSINodeExpandSecret
// +optional
NodeExpandSecretRef *SecretReference
}
@ -2037,7 +2037,8 @@ type SecretEnvSource struct {
// HTTPHeader describes a custom header to be used in HTTP probes
type HTTPHeader struct {
// The header field name
// The header field name.
// This will be canonicalized upon output, so case-variant names will be understood as the same header.
Name string
// The header field value
Value string
@ -2139,6 +2140,33 @@ const (
PullIfNotPresent PullPolicy = "IfNotPresent"
)
// ResourceResizeRestartPolicy specifies how to handle container resource resize.
type ResourceResizeRestartPolicy string
// These are the valid resource resize restart policy values:
const (
// 'NotRequired' means Kubernetes will try to resize the container
// without restarting it, if possible. Kubernetes may however choose to
// restart the container if it is unable to actuate resize without a
// restart. For e.g. the runtime doesn't support restart-free resizing.
NotRequired ResourceResizeRestartPolicy = "NotRequired"
// 'RestartContainer' means Kubernetes will resize the container in-place
// by stopping and starting the container when new resources are applied.
// This is needed for legacy applications. For e.g. java apps using the
// -xmxN flag which are unable to use resized memory without restarting.
RestartContainer ResourceResizeRestartPolicy = "RestartContainer"
)
// ContainerResizePolicy represents resource resize policy for the container.
type ContainerResizePolicy struct {
// Name of the resource to which this resource resize policy applies.
// Supported values: cpu, memory.
ResourceName ResourceName
// Restart policy to apply when specified resource is resized.
// If not specified, it defaults to NotRequired.
RestartPolicy ResourceResizeRestartPolicy
}
// PreemptionPolicy describes a policy for if/when to preempt a pod.
type PreemptionPolicy string
@ -2191,7 +2219,7 @@ type ResourceRequirements struct {
// This is an alpha field and requires enabling the
// DynamicResourceAllocation feature gate.
//
// This field is immutable.
// This field is immutable. It can only be set for containers.
//
// +featureGate=DynamicResourceAllocation
// +optional
@ -2247,6 +2275,10 @@ type Container struct {
// Compute resource requirements.
// +optional
Resources ResourceRequirements
// Resources resize policy for the container.
// +featureGate=InPlacePodVerticalScaling
// +optional
ResizePolicy []ContainerResizePolicy
// +optional
VolumeMounts []VolumeMount
// volumeDevices is the list of block devices to be used by the container.
@ -2296,8 +2328,6 @@ type ProbeHandler struct {
TCPSocket *TCPSocketAction
// GRPC specifies an action involving a GRPC port.
// This is a beta field and requires enabling GRPCContainerProbe feature gate.
// +featureGate=GRPCContainerProbe
// +optional
GRPC *GRPCAction
}
@ -2413,24 +2443,68 @@ type ContainerState struct {
Terminated *ContainerStateTerminated
}
// ContainerStatus represents the status of a container
// ContainerStatus contains details for the current status of this container.
type ContainerStatus struct {
// Each container in a pod must have a unique name.
// Name is a DNS_LABEL representing the unique name of the container.
// Each container in a pod must have a unique name across all container types.
// Cannot be updated.
Name string
// State holds details about the container's current condition.
// +optional
State ContainerState
// LastTerminationState holds the last termination state of the container to
// help debug container crashes and restarts. This field is not
// populated if the container is still running and RestartCount is 0.
// +optional
LastTerminationState ContainerState
// Ready specifies whether the container has passed its readiness check.
// Ready specifies whether the container is currently passing its readiness check.
// The value will change as readiness probes keep executing. If no readiness
// probes are specified, this field defaults to true once the container is
// fully started (see Started field).
//
// The value is typically used to determine whether a container is ready to
// accept traffic.
Ready bool
// Note that this is calculated from dead containers. But those containers are subject to
// garbage collection. This value will get capped at 5 by GC.
// RestartCount holds the number of times the container has been restarted.
// Kubelet makes an effort to always increment the value, but there
// are cases when the state may be lost due to node restarts and then the value
// may be reset to 0. The value is never negative.
RestartCount int32
Image string
ImageID string
// Image is the name of container image that the container is running.
// The container image may not match the image used in the PodSpec,
// as it may have been resolved by the runtime.
// More info: https://kubernetes.io/docs/concepts/containers/images.
Image string
// ImageID is the image ID of the container's image. The image ID may not
// match the image ID of the image used in the PodSpec, as it may have been
// resolved by the runtime.
ImageID string
// ContainerID is the ID of the container in the format '<type>://<container_id>'.
// Where type is a container runtime identifier, returned from Version call of CRI API
// (for example "containerd").
// +optional
ContainerID string
Started *bool
// Started indicates whether the container has finished its postStart lifecycle hook
// and passed its startup probe.
// Initialized as false, becomes true after startupProbe is considered
// successful. Resets to false when the container is restarted, or if kubelet
// loses state temporarily. In both cases, startup probes will run again.
// Is always true when no startupProbe is defined and container is running and
// has passed the postStart lifecycle hook. The null value must be treated the
// same as false.
// +optional
Started *bool
// AllocatedResources represents the compute resources allocated for this container by the
// node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission
// and after successfully admitting desired pod resize.
// +featureGate=InPlacePodVerticalScaling
// +optional
AllocatedResources ResourceList
// Resources represents the compute resource requests and limits that have been successfully
// enacted on the running container after it has been started or has been successfully resized.
// +featureGate=InPlacePodVerticalScaling
// +optional
Resources *ResourceRequirements
}
// PodPhase is a label for the condition of a pod at the current time.
@ -2496,6 +2570,20 @@ type PodCondition struct {
Message string
}
// PodResizeStatus shows status of desired resize of a pod's containers.
type PodResizeStatus string
const (
// Pod resources resize has been requested and will be evaluated by node.
PodResizeStatusProposed PodResizeStatus = "Proposed"
// Pod resources resize has been accepted by node and is being actuated.
PodResizeStatusInProgress PodResizeStatus = "InProgress"
// Node cannot resize the pod at this time and will keep retrying.
PodResizeStatusDeferred PodResizeStatus = "Deferred"
// Requested pod resize is not feasible and will not be re-evaluated.
PodResizeStatusInfeasible PodResizeStatus = "Infeasible"
)
// RestartPolicy describes how the container should be restarted.
// Only one of the following restart policies may be specified.
// If none of the following policies is specified, the default one
@ -3045,9 +3133,14 @@ type PodSpec struct {
OS *PodOS
// SchedulingGates is an opaque list of values that if specified will block scheduling the pod.
// More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness.
// If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the
// scheduler will not attempt to schedule the pod.
//
// This is an alpha-level feature enabled by PodSchedulingReadiness feature gate.
// SchedulingGates can only be set at pod creation time, and be removed only afterwards.
//
// This is a beta feature enabled by the PodSchedulingReadiness feature gate.
//
// +featureGate=PodSchedulingReadiness
// +optional
SchedulingGates []PodSchedulingGate
// ResourceClaims defines which ResourceClaims must be allocated
@ -3408,6 +3501,10 @@ type EphemeralContainerCommon struct {
// already allocated to the pod.
// +optional
Resources ResourceRequirements
// Resources resize policy for the container.
// +featureGate=InPlacePodVerticalScaling
// +optional
ResizePolicy []ContainerResizePolicy
// Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
// +optional
VolumeMounts []VolumeMount
@ -3524,6 +3621,13 @@ type PodStatus struct {
// Status for any ephemeral containers that have run in this pod.
// +optional
EphemeralContainerStatuses []ContainerStatus
// Status of resources resize desired for pod's containers.
// It is empty if no resources resize is pending.
// Any changes to container resources will automatically set this to "Proposed"
// +featureGate=InPlacePodVerticalScaling
// +optional
Resize PodResizeStatus
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@ -3617,6 +3721,7 @@ type ReplicationControllerSpec struct {
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected. Internally, this takes precedence over a
// TemplateRef.
// The only allowed template.spec.restartPolicy value is "Always".
// +optional
Template *PodTemplateSpec
}
@ -3781,33 +3886,33 @@ const (
ServiceTypeExternalName ServiceType = "ExternalName"
)
// ServiceInternalTrafficPolicyType describes the endpoint-selection policy for
// ServiceInternalTrafficPolicy describes the endpoint-selection policy for
// traffic sent to the ClusterIP.
type ServiceInternalTrafficPolicyType string
type ServiceInternalTrafficPolicy string
const (
// ServiceInternalTrafficPolicyCluster routes traffic to all endpoints.
ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicyType = "Cluster"
ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicy = "Cluster"
// ServiceInternalTrafficPolicyLocal routes traffic only to endpoints on the same
// node as the traffic was received on (dropping the traffic if there are no
// local endpoints).
ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicyType = "Local"
ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicy = "Local"
)
// ServiceExternalTrafficPolicyType describes the endpoint-selection policy for
// ServiceExternalTrafficPolicy describes the endpoint-selection policy for
// traffic to external service entrypoints (NodePorts, ExternalIPs, and
// LoadBalancer IPs).
type ServiceExternalTrafficPolicyType string
type ServiceExternalTrafficPolicy string
const (
// ServiceExternalTrafficPolicyTypeCluster routes traffic to all endpoints.
ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster"
// ServiceExternalTrafficPolicyCluster routes traffic to all endpoints.
ServiceExternalTrafficPolicyCluster ServiceExternalTrafficPolicy = "Cluster"
// ServiceExternalTrafficPolicyTypeLocal preserves the source IP of the traffic by
// ServiceExternalTrafficPolicyLocal preserves the source IP of the traffic by
// routing only to endpoints on the same node as the traffic was received on
// (dropping the traffic if there are no local endpoints).
ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local"
ServiceExternalTrafficPolicyLocal ServiceExternalTrafficPolicy = "Local"
)
// These are the valid conditions of a service.
@ -4013,7 +4118,7 @@ type ServiceSpec struct {
// a NodePort from within the cluster may need to take traffic policy into account
// when picking a node.
// +optional
ExternalTrafficPolicy ServiceExternalTrafficPolicyType
ExternalTrafficPolicy ServiceExternalTrafficPolicy
// healthCheckNodePort specifies the healthcheck nodePort for the service.
// If not specified, HealthCheckNodePort is created by the service api
@ -4064,7 +4169,7 @@ type ServiceSpec struct {
// "Cluster", uses the standard behavior of routing to all endpoints evenly
// (possibly modified by topology and other features).
// +optional
InternalTrafficPolicy *ServiceInternalTrafficPolicyType
InternalTrafficPolicy *ServiceInternalTrafficPolicy
}
// ServicePort represents the port on which the service is exposed
@ -4079,10 +4184,17 @@ type ServicePort struct {
Protocol Protocol
// The application protocol for this port.
// This is used as a hint for implementations to offer richer behavior for protocols that they understand.
// This field follows standard Kubernetes label syntax.
// Un-prefixed names are reserved for IANA standard service names (as per
// Valid values are either:
//
// * Un-prefixed protocol names - reserved for IANA standard service names (as per
// RFC-6335 and https://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names such as
//
// * Kubernetes-defined prefixed names:
// * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
//
// * Other protocols should use implementation-defined prefixed names such as
// mycompany.com/my-custom-protocol.
// +optional
AppProtocol *string
@ -4208,9 +4320,8 @@ type EndpointSubset struct {
// EndpointAddress is a tuple that describes single IP address.
type EndpointAddress struct {
// The IP of this endpoint.
// IPv6 is also accepted but not fully supported on all platforms. Also, certain
// kubernetes components, like kube-proxy, are not IPv6 ready.
// TODO: This should allow hostname or IP, see #4447.
// May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10),
// or link-local multicast (224.0.0.0/24 or ff02::/16).
IP string
// Optional: Hostname of this endpoint
// Meant to be used by DNS servers etc.
@ -5870,8 +5981,12 @@ type TopologySpreadConstraint struct {
// spreading will be calculated. The keys are used to lookup values from the
// incoming pod labels, those key-value labels are ANDed with labelSelector
// to select the group of existing pods over which spreading will be calculated
// for the incoming pod. Keys that don't exist in the incoming pod labels will
// for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
// MatchLabelKeys cannot be set when LabelSelector isn't set.
// Keys that don't exist in the incoming pod labels will
// be ignored. A null or empty list means only match against labelSelector.
//
// This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
// +listType=atomic
// +optional
MatchLabelKeys []string

View File

@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/core"
utilpointer "k8s.io/utils/pointer"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
@ -372,6 +373,11 @@ func Convert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) e
// drop init container annotations so they don't show up as differences when receiving requests from old clients
out.Annotations = dropInitContainerAnnotations(out.Annotations)
// Forcing the value of TerminationGracePeriodSeconds to 1 if it is negative.
// Just for Pod, not for PodSpec, because we don't want to change the behavior of the PodTemplate.
if in.Spec.TerminationGracePeriodSeconds != nil && *in.Spec.TerminationGracePeriodSeconds < 0 {
out.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(1)
}
return nil
}
@ -384,6 +390,11 @@ func Convert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) e
// remove this once the oldest supported kubelet no longer honors the annotations over the field.
out.Annotations = dropInitContainerAnnotations(out.Annotations)
// Forcing the value of TerminationGracePeriodSeconds to 1 if it is negative.
// Just for Pod, not for PodSpec, because we don't want to change the behavior of the PodTemplate.
if in.Spec.TerminationGracePeriodSeconds != nil && *in.Spec.TerminationGracePeriodSeconds < 0 {
out.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(1)
}
return nil
}

View File

@ -22,6 +22,8 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/parsers"
"k8s.io/utils/pointer"
)
@ -125,7 +127,7 @@ func SetDefaults_Service(obj *v1.Service) {
if (obj.Spec.Type == v1.ServiceTypeNodePort ||
obj.Spec.Type == v1.ServiceTypeLoadBalancer) &&
obj.Spec.ExternalTrafficPolicy == "" {
obj.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
obj.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster
}
if obj.Spec.InternalTrafficPolicy == nil {
@ -137,7 +139,7 @@ func SetDefaults_Service(obj *v1.Service) {
if obj.Spec.Type == v1.ServiceTypeLoadBalancer {
if obj.Spec.AllocateLoadBalancerNodePorts == nil {
obj.Spec.AllocateLoadBalancerNodePorts = pointer.BoolPtr(true)
obj.Spec.AllocateLoadBalancerNodePorts = pointer.Bool(true)
}
}
}
@ -157,6 +159,29 @@ func SetDefaults_Pod(obj *v1.Pod) {
}
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) &&
obj.Spec.Containers[i].Resources.Requests != nil {
// For normal containers, set resize restart policy to default value (NotRequired), if not specified.
resizePolicySpecified := make(map[v1.ResourceName]bool)
for _, p := range obj.Spec.Containers[i].ResizePolicy {
resizePolicySpecified[p.ResourceName] = true
}
setDefaultResizePolicy := func(resourceName v1.ResourceName) {
if _, found := resizePolicySpecified[resourceName]; !found {
obj.Spec.Containers[i].ResizePolicy = append(obj.Spec.Containers[i].ResizePolicy,
v1.ContainerResizePolicy{
ResourceName: resourceName,
RestartPolicy: v1.NotRequired,
})
}
}
if _, exists := obj.Spec.Containers[i].Resources.Requests[v1.ResourceCPU]; exists {
setDefaultResizePolicy(v1.ResourceCPU)
}
if _, exists := obj.Spec.Containers[i].Resources.Requests[v1.ResourceMemory]; exists {
setDefaultResizePolicy(v1.ResourceMemory)
}
}
}
for i := range obj.Spec.InitContainers {
if obj.Spec.InitContainers[i].Resources.Limits != nil {

View File

@ -370,62 +370,3 @@ func ScopedResourceSelectorRequirementsAsSelector(ssr v1.ScopedResourceSelectorR
selector = selector.Add(*r)
return selector, nil
}
// nodeSelectorRequirementsAsLabelRequirements converts the NodeSelectorRequirement
// type to a labels.Requirement type.
func nodeSelectorRequirementsAsLabelRequirements(nsr v1.NodeSelectorRequirement) (*labels.Requirement, error) {
var op selection.Operator
switch nsr.Operator {
case v1.NodeSelectorOpIn:
op = selection.In
case v1.NodeSelectorOpNotIn:
op = selection.NotIn
case v1.NodeSelectorOpExists:
op = selection.Exists
case v1.NodeSelectorOpDoesNotExist:
op = selection.DoesNotExist
case v1.NodeSelectorOpGt:
op = selection.GreaterThan
case v1.NodeSelectorOpLt:
op = selection.LessThan
default:
return nil, fmt.Errorf("%q is not a valid node selector operator", nsr.Operator)
}
return labels.NewRequirement(nsr.Key, op, nsr.Values)
}
// NodeSelectorAsSelector converts the NodeSelector api type into a struct that
// implements labels.Selector
// Note: This function should be kept in sync with the selector methods in
// pkg/labels/selector.go
func NodeSelectorAsSelector(ns *v1.NodeSelector) (labels.Selector, error) {
if ns == nil {
return labels.Nothing(), nil
}
if len(ns.NodeSelectorTerms) == 0 {
return labels.Everything(), nil
}
var requirements []labels.Requirement
for _, nsTerm := range ns.NodeSelectorTerms {
for _, expr := range nsTerm.MatchExpressions {
req, err := nodeSelectorRequirementsAsLabelRequirements(expr)
if err != nil {
return nil, err
}
requirements = append(requirements, *req)
}
for _, field := range nsTerm.MatchFields {
req, err := nodeSelectorRequirementsAsLabelRequirements(field)
if err != nil {
return nil, err
}
requirements = append(requirements, *req)
}
}
selector := labels.NewSelector()
selector = selector.Add(requirements...)
return selector, nil
}

View File

@ -342,6 +342,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ContainerResizePolicy)(nil), (*core.ContainerResizePolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(a.(*v1.ContainerResizePolicy), b.(*core.ContainerResizePolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ContainerResizePolicy)(nil), (*v1.ContainerResizePolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(a.(*core.ContainerResizePolicy), b.(*v1.ContainerResizePolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ContainerState)(nil), (*core.ContainerState)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerState_To_core_ContainerState(a.(*v1.ContainerState), b.(*core.ContainerState), scope)
}); err != nil {
@ -2975,6 +2985,7 @@ func autoConvert_v1_Container_To_core_Container(in *v1.Container, out *core.Cont
if err := Convert_v1_ResourceRequirements_To_core_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.ResizePolicy = *(*[]core.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy))
out.VolumeMounts = *(*[]core.VolumeMount)(unsafe.Pointer(&in.VolumeMounts))
out.VolumeDevices = *(*[]core.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices))
out.LivenessProbe = (*core.Probe)(unsafe.Pointer(in.LivenessProbe))
@ -3008,6 +3019,7 @@ func autoConvert_core_Container_To_v1_Container(in *core.Container, out *v1.Cont
if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.ResizePolicy = *(*[]v1.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy))
out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts))
out.VolumeDevices = *(*[]v1.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices))
out.LivenessProbe = (*v1.Probe)(unsafe.Pointer(in.LivenessProbe))
@ -3079,6 +3091,28 @@ func Convert_core_ContainerPort_To_v1_ContainerPort(in *core.ContainerPort, out
return autoConvert_core_ContainerPort_To_v1_ContainerPort(in, out, s)
}
func autoConvert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(in *v1.ContainerResizePolicy, out *core.ContainerResizePolicy, s conversion.Scope) error {
out.ResourceName = core.ResourceName(in.ResourceName)
out.RestartPolicy = core.ResourceResizeRestartPolicy(in.RestartPolicy)
return nil
}
// Convert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy is an autogenerated conversion function.
func Convert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(in *v1.ContainerResizePolicy, out *core.ContainerResizePolicy, s conversion.Scope) error {
return autoConvert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(in, out, s)
}
func autoConvert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(in *core.ContainerResizePolicy, out *v1.ContainerResizePolicy, s conversion.Scope) error {
out.ResourceName = v1.ResourceName(in.ResourceName)
out.RestartPolicy = v1.ResourceResizeRestartPolicy(in.RestartPolicy)
return nil
}
// Convert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy is an autogenerated conversion function.
func Convert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(in *core.ContainerResizePolicy, out *v1.ContainerResizePolicy, s conversion.Scope) error {
return autoConvert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(in, out, s)
}
func autoConvert_v1_ContainerState_To_core_ContainerState(in *v1.ContainerState, out *core.ContainerState, s conversion.Scope) error {
out.Waiting = (*core.ContainerStateWaiting)(unsafe.Pointer(in.Waiting))
out.Running = (*core.ContainerStateRunning)(unsafe.Pointer(in.Running))
@ -3191,6 +3225,8 @@ func autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in *v1.ContainerStat
out.ImageID = in.ImageID
out.ContainerID = in.ContainerID
out.Started = (*bool)(unsafe.Pointer(in.Started))
out.AllocatedResources = *(*core.ResourceList)(unsafe.Pointer(&in.AllocatedResources))
out.Resources = (*core.ResourceRequirements)(unsafe.Pointer(in.Resources))
return nil
}
@ -3213,6 +3249,8 @@ func autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerSt
out.ImageID = in.ImageID
out.ContainerID = in.ContainerID
out.Started = (*bool)(unsafe.Pointer(in.Started))
out.AllocatedResources = *(*v1.ResourceList)(unsafe.Pointer(&in.AllocatedResources))
out.Resources = (*v1.ResourceRequirements)(unsafe.Pointer(in.Resources))
return nil
}
@ -3563,6 +3601,7 @@ func autoConvert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon(in
if err := Convert_v1_ResourceRequirements_To_core_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.ResizePolicy = *(*[]core.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy))
out.VolumeMounts = *(*[]core.VolumeMount)(unsafe.Pointer(&in.VolumeMounts))
out.VolumeDevices = *(*[]core.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices))
out.LivenessProbe = (*core.Probe)(unsafe.Pointer(in.LivenessProbe))
@ -3596,6 +3635,7 @@ func autoConvert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon(in
if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.ResizePolicy = *(*[]v1.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy))
out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts))
out.VolumeDevices = *(*[]v1.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices))
out.LivenessProbe = (*v1.Probe)(unsafe.Pointer(in.LivenessProbe))
@ -6380,6 +6420,7 @@ func autoConvert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodS
out.ContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses))
out.QOSClass = core.PodQOSClass(in.QOSClass)
out.EphemeralContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.EphemeralContainerStatuses))
out.Resize = core.PodResizeStatus(in.Resize)
return nil
}
@ -6396,6 +6437,7 @@ func autoConvert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodS
out.InitContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses))
out.ContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses))
out.EphemeralContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.EphemeralContainerStatuses))
out.Resize = v1.PodResizeStatus(in.Resize)
return nil
}
@ -7850,7 +7892,7 @@ func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *cor
out.LoadBalancerIP = in.LoadBalancerIP
out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges))
out.ExternalName = in.ExternalName
out.ExternalTrafficPolicy = core.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy)
out.ExternalTrafficPolicy = core.ServiceExternalTrafficPolicy(in.ExternalTrafficPolicy)
out.HealthCheckNodePort = in.HealthCheckNodePort
out.PublishNotReadyAddresses = in.PublishNotReadyAddresses
out.SessionAffinityConfig = (*core.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig))
@ -7858,7 +7900,7 @@ func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *cor
out.IPFamilyPolicy = (*core.IPFamilyPolicy)(unsafe.Pointer(in.IPFamilyPolicy))
out.AllocateLoadBalancerNodePorts = (*bool)(unsafe.Pointer(in.AllocateLoadBalancerNodePorts))
out.LoadBalancerClass = (*string)(unsafe.Pointer(in.LoadBalancerClass))
out.InternalTrafficPolicy = (*core.ServiceInternalTrafficPolicyType)(unsafe.Pointer(in.InternalTrafficPolicy))
out.InternalTrafficPolicy = (*core.ServiceInternalTrafficPolicy)(unsafe.Pointer(in.InternalTrafficPolicy))
return nil
}
@ -7881,12 +7923,12 @@ func autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *v
out.SessionAffinity = v1.ServiceAffinity(in.SessionAffinity)
out.SessionAffinityConfig = (*v1.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig))
out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges))
out.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy)
out.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicy(in.ExternalTrafficPolicy)
out.HealthCheckNodePort = in.HealthCheckNodePort
out.PublishNotReadyAddresses = in.PublishNotReadyAddresses
out.AllocateLoadBalancerNodePorts = (*bool)(unsafe.Pointer(in.AllocateLoadBalancerNodePorts))
out.LoadBalancerClass = (*string)(unsafe.Pointer(in.LoadBalancerClass))
out.InternalTrafficPolicy = (*v1.ServiceInternalTrafficPolicyType)(unsafe.Pointer(in.InternalTrafficPolicy))
out.InternalTrafficPolicy = (*v1.ServiceInternalTrafficPolicy)(unsafe.Pointer(in.InternalTrafficPolicy))
return nil
}

View File

@ -48,6 +48,7 @@ func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&v1.PersistentVolumeList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeList(obj.(*v1.PersistentVolumeList)) })
scheme.AddTypeDefaultingFunc(&v1.Pod{}, func(obj interface{}) { SetObjectDefaults_Pod(obj.(*v1.Pod)) })
scheme.AddTypeDefaultingFunc(&v1.PodList{}, func(obj interface{}) { SetObjectDefaults_PodList(obj.(*v1.PodList)) })
scheme.AddTypeDefaultingFunc(&v1.PodStatusResult{}, func(obj interface{}) { SetObjectDefaults_PodStatusResult(obj.(*v1.PodStatusResult)) })
scheme.AddTypeDefaultingFunc(&v1.PodTemplate{}, func(obj interface{}) { SetObjectDefaults_PodTemplate(obj.(*v1.PodTemplate)) })
scheme.AddTypeDefaultingFunc(&v1.PodTemplateList{}, func(obj interface{}) { SetObjectDefaults_PodTemplateList(obj.(*v1.PodTemplateList)) })
scheme.AddTypeDefaultingFunc(&v1.ReplicationController{}, func(obj interface{}) { SetObjectDefaults_ReplicationController(obj.(*v1.ReplicationController)) })
@ -438,6 +439,30 @@ func SetObjectDefaults_Pod(in *v1.Pod) {
}
}
SetDefaults_ResourceList(&in.Spec.Overhead)
for i := range in.Status.InitContainerStatuses {
a := &in.Status.InitContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
for i := range in.Status.ContainerStatuses {
a := &in.Status.ContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
for i := range in.Status.EphemeralContainerStatuses {
a := &in.Status.EphemeralContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
}
func SetObjectDefaults_PodList(in *v1.PodList) {
@ -447,6 +472,33 @@ func SetObjectDefaults_PodList(in *v1.PodList) {
}
}
func SetObjectDefaults_PodStatusResult(in *v1.PodStatusResult) {
for i := range in.Status.InitContainerStatuses {
a := &in.Status.InitContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
for i := range in.Status.ContainerStatuses {
a := &in.Status.ContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
for i := range in.Status.EphemeralContainerStatuses {
a := &in.Status.EphemeralContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
}
func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) {
SetDefaults_PodSpec(&in.Template.Spec)
for i := range in.Template.Spec.Volumes {

View File

@ -26,6 +26,7 @@ import (
"reflect"
"regexp"
"strings"
"sync"
"unicode"
"unicode/utf8"
@ -36,6 +37,7 @@ import (
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
@ -43,9 +45,11 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
utilfeature "k8s.io/apiserver/pkg/util/feature"
schedulinghelper "k8s.io/component-helpers/scheduling/corev1"
kubeletapis "k8s.io/kubelet/pkg/apis"
apiservice "k8s.io/kubernetes/pkg/api/service"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
"k8s.io/kubernetes/pkg/apis/core/helper/qos"
podshelper "k8s.io/kubernetes/pkg/apis/core/pods"
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
"k8s.io/kubernetes/pkg/capabilities"
@ -295,6 +299,14 @@ var ValidateClassName = apimachineryvalidation.NameIsDNSSubdomain
// class name is valid.
var ValidatePriorityClassName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateResourceClaimName can be used to check whether the given
// name for a ResourceClaim is valid.
var ValidateResourceClaimName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateResourceClaimTemplateName can be used to check whether the given
// name for a ResourceClaimTemplate is valid.
var ValidateResourceClaimTemplateName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateRuntimeClassName can be used to check whether the given RuntimeClass name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
@ -1044,10 +1056,7 @@ func validateDownwardAPIVolumeFile(file *core.DownwardAPIVolumeFile, fldPath *fi
allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously"))
}
} else if file.ResourceFieldRef != nil {
localValidContainerResourceFieldPathPrefixes := validContainerResourceFieldPathPrefixes
if opts.AllowDownwardAPIHugePages {
localValidContainerResourceFieldPathPrefixes = validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages
}
localValidContainerResourceFieldPathPrefixes := validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages
allErrs = append(allErrs, validateContainerResourceFieldSelector(file.ResourceFieldRef, &validContainerResourceFieldPathExpressions, &localValidContainerResourceFieldPathPrefixes, fldPath.Child("resourceFieldRef"), true)...)
} else {
allErrs = append(allErrs, field.Required(fldPath, "one of fieldRef and resourceFieldRef is required"))
@ -1531,14 +1540,12 @@ func validateStorageOSPersistentVolumeSource(storageos *core.StorageOSPersistent
// validatePVSecretReference check whether provided SecretReference object is valid in terms of secret name and namespace.
func validatePVSecretReference(secretRef *core.SecretReference, allowDNSSubDomainSecretName bool, fldPath *field.Path) field.ErrorList {
func validatePVSecretReference(secretRef *core.SecretReference, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if len(secretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
} else if allowDNSSubDomainSecretName {
allErrs = append(allErrs, ValidateDNS1123Subdomain(secretRef.Name, fldPath.Child("name"))...)
} else {
allErrs = append(allErrs, ValidateDNS1123Label(secretRef.Name, fldPath.Child("name"))...)
allErrs = append(allErrs, ValidateDNS1123Subdomain(secretRef.Name, fldPath.Child("name"))...)
}
if len(secretRef.Namespace) == 0 {
@ -1567,7 +1574,7 @@ func ValidateCSIDriverName(driverName string, fldPath *field.Path) field.ErrorLi
return allErrs
}
func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, allowDNSSubDomainSecretName bool, fldPath *field.Path) field.ErrorList {
func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidateCSIDriverName(csi.Driver, fldPath.Child("driver"))...)
@ -1576,16 +1583,16 @@ func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, allo
allErrs = append(allErrs, field.Required(fldPath.Child("volumeHandle"), ""))
}
if csi.ControllerPublishSecretRef != nil {
allErrs = append(allErrs, validatePVSecretReference(csi.ControllerPublishSecretRef, allowDNSSubDomainSecretName, fldPath.Child("controllerPublishSecretRef"))...)
allErrs = append(allErrs, validatePVSecretReference(csi.ControllerPublishSecretRef, fldPath.Child("controllerPublishSecretRef"))...)
}
if csi.ControllerExpandSecretRef != nil {
allErrs = append(allErrs, validatePVSecretReference(csi.ControllerExpandSecretRef, allowDNSSubDomainSecretName, fldPath.Child("controllerExpandSecretRef"))...)
allErrs = append(allErrs, validatePVSecretReference(csi.ControllerExpandSecretRef, fldPath.Child("controllerExpandSecretRef"))...)
}
if csi.NodePublishSecretRef != nil {
allErrs = append(allErrs, validatePVSecretReference(csi.NodePublishSecretRef, allowDNSSubDomainSecretName, fldPath.Child("nodePublishSecretRef"))...)
allErrs = append(allErrs, validatePVSecretReference(csi.NodePublishSecretRef, fldPath.Child("nodePublishSecretRef"))...)
}
if csi.NodeExpandSecretRef != nil {
allErrs = append(allErrs, validatePVSecretReference(csi.NodeExpandSecretRef, allowDNSSubDomainSecretName, fldPath.Child("nodeExpandSecretRef"))...)
allErrs = append(allErrs, validatePVSecretReference(csi.NodeExpandSecretRef, fldPath.Child("nodeExpandSecretRef"))...)
}
return allErrs
}
@ -1647,8 +1654,6 @@ var allowedTemplateObjectMetaFields = map[string]bool{
type PersistentVolumeSpecValidationOptions struct {
// Allow spec to contain the "ReadWiteOncePod" access mode
AllowReadWriteOncePod bool
// Allow the secretRef Name field to be of DNSSubDomain Format
AllowDNSSubDomainSecretName bool
}
// ValidatePersistentVolumeName checks that a name is appropriate for a
@ -1663,8 +1668,7 @@ var supportedVolumeModes = sets.NewString(string(core.PersistentVolumeBlock), st
func ValidationOptionsForPersistentVolume(pv, oldPv *core.PersistentVolume) PersistentVolumeSpecValidationOptions {
opts := PersistentVolumeSpecValidationOptions{
AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
AllowDNSSubDomainSecretName: false,
AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
}
if oldPv == nil {
// If there's no old PV, use the options based solely on feature enablement
@ -1674,21 +1678,9 @@ func ValidationOptionsForPersistentVolume(pv, oldPv *core.PersistentVolume) Pers
// If the old object allowed "ReadWriteOncePod", continue to allow it in the new object
opts.AllowReadWriteOncePod = true
}
if oldCSI := oldPv.Spec.CSI; oldCSI != nil {
opts.AllowDNSSubDomainSecretName =
secretRefRequiresSubdomainSecretName(oldCSI.ControllerExpandSecretRef) ||
secretRefRequiresSubdomainSecretName(oldCSI.ControllerPublishSecretRef) ||
secretRefRequiresSubdomainSecretName(oldCSI.NodeStageSecretRef) ||
secretRefRequiresSubdomainSecretName(oldCSI.NodePublishSecretRef)
}
return opts
}
func secretRefRequiresSubdomainSecretName(secretRef *core.SecretReference) bool {
// ref and name were specified and name didn't fit within label validation
return secretRef != nil && len(secretRef.Name) > 0 && len(validation.IsDNS1123Label(secretRef.Name)) > 0
}
func ValidatePersistentVolumeSpec(pvSpec *core.PersistentVolumeSpec, pvName string, validateInlinePersistentVolumeSpec bool, fldPath *field.Path, opts PersistentVolumeSpecValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
@ -1943,7 +1935,7 @@ func ValidatePersistentVolumeSpec(pvSpec *core.PersistentVolumeSpec, pvName stri
allErrs = append(allErrs, field.Forbidden(fldPath.Child("csi"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateCSIPersistentVolumeSource(pvSpec.CSI, opts.AllowDNSSubDomainSecretName, fldPath.Child("csi"))...)
allErrs = append(allErrs, validateCSIPersistentVolumeSource(pvSpec.CSI, fldPath.Child("csi"))...)
}
}
@ -2007,7 +1999,7 @@ func ValidatePersistentVolumeUpdate(newPv, oldPv *core.PersistentVolume, opts Pe
// Allow setting NodeAffinity if oldPv NodeAffinity was not set
if oldPv.Spec.NodeAffinity != nil {
allErrs = append(allErrs, ValidateImmutableField(newPv.Spec.NodeAffinity, oldPv.Spec.NodeAffinity, field.NewPath("nodeAffinity"))...)
allErrs = append(allErrs, validatePvNodeAffinity(newPv.Spec.NodeAffinity, oldPv.Spec.NodeAffinity, field.NewPath("nodeAffinity"))...)
}
return allErrs
@ -2421,8 +2413,6 @@ var validEnvDownwardAPIFieldPathExpressions = sets.NewString(
var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "limits.ephemeral-storage", "requests.cpu", "requests.memory", "requests.ephemeral-storage")
// NOTE: this is only valid with DownwardAPIHugePages enabled
var validContainerResourceFieldPathPrefixes = sets.NewString()
var validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages = sets.NewString(hugepagesRequestsPrefixDownwardAPI, hugepagesLimitsPrefixDownwardAPI)
const hugepagesRequestsPrefixDownwardAPI string = `requests.hugepages-`
@ -2443,10 +2433,7 @@ func validateEnvVarValueFrom(ev core.EnvVar, fldPath *field.Path, opts PodValida
}
if ev.ValueFrom.ResourceFieldRef != nil {
numSources++
localValidContainerResourceFieldPathPrefixes := validContainerResourceFieldPathPrefixes
if opts.AllowDownwardAPIHugePages {
localValidContainerResourceFieldPathPrefixes = validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages
}
localValidContainerResourceFieldPathPrefixes := validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages
allErrs = append(allErrs, validateContainerResourceFieldSelector(ev.ValueFrom.ResourceFieldRef, &validContainerResourceFieldPathExpressions, &localValidContainerResourceFieldPathPrefixes, fldPath.Child("resourceFieldRef"), false)...)
}
if ev.ValueFrom.ConfigMapKeyRef != nil {
@ -2768,11 +2755,11 @@ func ValidateVolumeDevices(devices []core.VolumeDevice, volmounts map[string]str
return allErrs
}
func validatePodResourceClaims(claims []core.PodResourceClaim, fldPath *field.Path) field.ErrorList {
func validatePodResourceClaims(podMeta *metav1.ObjectMeta, claims []core.PodResourceClaim, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
podClaimNames := sets.NewString()
for i, claim := range claims {
allErrs = append(allErrs, validatePodResourceClaim(claim, &podClaimNames, fldPath.Index(i))...)
allErrs = append(allErrs, validatePodResourceClaim(podMeta, claim, &podClaimNames, fldPath.Index(i))...)
}
return allErrs
}
@ -2790,14 +2777,22 @@ func gatherPodResourceClaimNames(claims []core.PodResourceClaim) sets.String {
return podClaimNames
}
func validatePodResourceClaim(claim core.PodResourceClaim, podClaimNames *sets.String, fldPath *field.Path) field.ErrorList {
func validatePodResourceClaim(podMeta *metav1.ObjectMeta, claim core.PodResourceClaim, podClaimNames *sets.String, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if claim.Name == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
} else if podClaimNames.Has(claim.Name) {
allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), claim.Name))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(claim.Name, fldPath.Child("name"))...)
nameErrs := ValidateDNS1123Label(claim.Name, fldPath.Child("name"))
if len(nameErrs) > 0 {
allErrs = append(allErrs, nameErrs...)
} else if podMeta != nil && claim.Source.ResourceClaimTemplateName != nil {
claimName := podMeta.Name + "-" + claim.Name
for _, detail := range ValidateResourceClaimName(claimName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), claimName, "final ResourceClaim name: "+detail))
}
}
podClaimNames.Insert(claim.Name)
}
allErrs = append(allErrs, validatePodResourceClaimSource(claim.Source, fldPath.Child("source"))...)
@ -2813,6 +2808,16 @@ func validatePodResourceClaimSource(claimSource core.ClaimSource, fldPath *field
if claimSource.ResourceClaimName == nil && claimSource.ResourceClaimTemplateName == nil {
allErrs = append(allErrs, field.Invalid(fldPath, claimSource, "must specify one of: `resourceClaimName`, `resourceClaimTemplateName`"))
}
if claimSource.ResourceClaimName != nil {
for _, detail := range ValidateResourceClaimName(*claimSource.ResourceClaimName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceClaimName"), *claimSource.ResourceClaimName, detail))
}
}
if claimSource.ResourceClaimTemplateName != nil {
for _, detail := range ValidateResourceClaimTemplateName(*claimSource.ResourceClaimTemplateName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceClaimTemplateName"), *claimSource.ResourceClaimTemplateName, detail))
}
}
return allErrs
}
@ -3036,6 +3041,37 @@ func validatePullPolicy(policy core.PullPolicy, fldPath *field.Path) field.Error
return allErrors
}
var supportedResizeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory))
var supportedResizePolicies = sets.NewString(string(core.NotRequired), string(core.RestartContainer))
func validateResizePolicy(policyList []core.ContainerResizePolicy, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
// validate that resource name is not repeated, supported resource names and policy values are specified
resources := make(map[core.ResourceName]bool)
for i, p := range policyList {
if _, found := resources[p.ResourceName]; found {
allErrors = append(allErrors, field.Duplicate(fldPath.Index(i), p.ResourceName))
}
resources[p.ResourceName] = true
switch p.ResourceName {
case core.ResourceCPU, core.ResourceMemory:
case "":
allErrors = append(allErrors, field.Required(fldPath, ""))
default:
allErrors = append(allErrors, field.NotSupported(fldPath, p.ResourceName, supportedResizeResources.List()))
}
switch p.RestartPolicy {
case core.NotRequired, core.RestartContainer:
case "":
allErrors = append(allErrors, field.Required(fldPath, ""))
default:
allErrors = append(allErrors, field.NotSupported(fldPath, p.RestartPolicy, supportedResizePolicies.List()))
}
}
return allErrors
}
// validateEphemeralContainers is called by pod spec and template validation to validate the list of ephemeral containers.
// Note that this is called for pod template even though ephemeral containers aren't allowed in pod templates.
func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer, containers, initContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
@ -3158,6 +3194,9 @@ func validateInitContainers(containers []core.Container, regularContainers []cor
if ctr.StartupProbe != nil {
allErrs = append(allErrs, field.Forbidden(idxPath.Child("startupProbe"), "may not be set for init containers"))
}
if len(ctr.ResizePolicy) > 0 {
allErrs = append(allErrs, field.Invalid(idxPath.Child("resizePolicy"), ctr.ResizePolicy, "must not be set for init containers"))
}
}
return allErrs
@ -3203,6 +3242,7 @@ func validateContainerCommon(ctr *core.Container, volumes map[string]core.Volume
allErrs = append(allErrs, ValidateVolumeDevices(ctr.VolumeDevices, volMounts, volumes, path.Child("volumeDevices"))...)
allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, path.Child("imagePullPolicy"))...)
allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, podClaimNames, path.Child("resources"), opts)...)
allErrs = append(allErrs, validateResizePolicy(ctr.ResizePolicy, path.Child("resizePolicy"))...)
allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, path.Child("securityContext"))...)
return allErrs
}
@ -3382,9 +3422,7 @@ func validateSchedulingGates(schedulingGates []core.PodSchedulingGate, fldPath *
// There should be no duplicates in the list of scheduling gates.
seen := sets.String{}
for i, schedulingGate := range schedulingGates {
if schedulingGate.Name == "" {
allErrs = append(allErrs, field.Required(fldPath.Index(i), "must not be empty"))
}
allErrs = append(allErrs, ValidateQualifiedName(schedulingGate.Name, fldPath.Index(i))...)
if seen.Has(schedulingGate.Name) {
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), schedulingGate.Name))
}
@ -3644,8 +3682,6 @@ func validateContainerOnlyForPod(ctr *core.Container, path *field.Path) field.Er
// PodValidationOptions contains the different settings for pod validation
type PodValidationOptions struct {
// Allow pod spec to use hugepages in downward API
AllowDownwardAPIHugePages bool
// Allow invalid pod-deletion-cost annotation value for backward compatibility.
AllowInvalidPodDeletionCost bool
// Allow invalid label-value in LabelSelector
@ -3654,6 +3690,10 @@ type PodValidationOptions struct {
AllowIndivisibleHugePagesValues bool
// Allow more DNSSearchPaths and longer DNSSearchListChars
AllowExpandedDNSConfig bool
// Allow invalid topologySpreadConstraint labelSelector for backward compatibility
AllowInvalidTopologySpreadConstraintLabelSelector bool
// Allow node selector additions for gated pods.
AllowMutableNodeSelectorAndNodeAffinity bool
}
// validatePodMetadataAndSpec tests if required fields in the pod.metadata and pod.spec are set,
@ -3746,7 +3786,7 @@ func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *fi
vols, vErrs := ValidateVolumes(spec.Volumes, podMeta, fldPath.Child("volumes"), opts)
allErrs = append(allErrs, vErrs...)
podClaimNames := gatherPodResourceClaimNames(spec.ResourceClaims)
allErrs = append(allErrs, validatePodResourceClaims(spec.ResourceClaims, fldPath.Child("resourceClaims"))...)
allErrs = append(allErrs, validatePodResourceClaims(podMeta, spec.ResourceClaims, fldPath.Child("resourceClaims"))...)
allErrs = append(allErrs, validateContainers(spec.Containers, vols, podClaimNames, fldPath.Child("containers"), opts)...)
allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, podClaimNames, fldPath.Child("initContainers"), opts)...)
allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, podClaimNames, fldPath.Child("ephemeralContainers"), opts)...)
@ -3759,7 +3799,7 @@ func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *fi
allErrs = append(allErrs, validatePodDNSConfig(spec.DNSConfig, &spec.DNSPolicy, fldPath.Child("dnsConfig"), opts)...)
allErrs = append(allErrs, validateReadinessGates(spec.ReadinessGates, fldPath.Child("readinessGates"))...)
allErrs = append(allErrs, validateSchedulingGates(spec.SchedulingGates, fldPath.Child("schedulingGates"))...)
allErrs = append(allErrs, validateTopologySpreadConstraints(spec.TopologySpreadConstraints, fldPath.Child("topologySpreadConstraints"))...)
allErrs = append(allErrs, validateTopologySpreadConstraints(spec.TopologySpreadConstraints, fldPath.Child("topologySpreadConstraints"), opts)...)
allErrs = append(allErrs, validateWindowsHostProcessPod(spec, fldPath)...)
allErrs = append(allErrs, validateHostUsers(spec, fldPath)...)
if len(spec.ServiceAccountName) > 0 {
@ -4506,6 +4546,24 @@ func validateSeccompAnnotationsAndFieldsMatch(annotationValue string, seccompFie
return nil
}
var updatablePodSpecFields = []string{
"`spec.containers[*].image`",
"`spec.initContainers[*].image`",
"`spec.activeDeadlineSeconds`",
"`spec.tolerations` (only additions to existing tolerations)",
"`spec.terminationGracePeriodSeconds` (allow it to be set to 1 if it was previously negative)",
"`spec.containers[*].resources` (for CPU/memory only)",
}
// TODO(vinaykul,InPlacePodVerticalScaling): Drop this var once InPlacePodVerticalScaling goes GA and featuregate is gone.
var updatablePodSpecFieldsNoResources = []string{
"`spec.containers[*].image`",
"`spec.initContainers[*].image`",
"`spec.activeDeadlineSeconds`",
"`spec.tolerations` (only additions to existing tolerations)",
"`spec.terminationGracePeriodSeconds` (allow it to be set to 1 if it was previously negative)",
}
// ValidatePodUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields
// that cannot be changed.
func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList {
@ -4565,12 +4623,56 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
return allErrs
}
//TODO(vinaykul,InPlacePodVerticalScaling): With KEP 2527, we can rely on persistence of PodStatus.QOSClass
// We can use PodStatus.QOSClass instead of GetPodQOS here, in kubelet, and elsewhere, as PodStatus.QOSClass
// does not change once it is bootstrapped in podCreate. This needs to be addressed before beta as a
// separate PR covering all uses of GetPodQOS. With that change, we can drop the below block.
// Ref: https://github.com/kubernetes/kubernetes/pull/102884#discussion_r1093790446
// Ref: https://github.com/kubernetes/kubernetes/pull/102884/#discussion_r663280487
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
// reject attempts to change pod qos
oldQoS := qos.GetPodQOS(oldPod)
newQoS := qos.GetPodQOS(newPod)
if newQoS != oldQoS {
allErrs = append(allErrs, field.Invalid(fldPath, newQoS, "Pod QoS is immutable"))
}
}
// handle updateable fields by munging those fields prior to deep equal comparison.
mungedPodSpec := *newPod.Spec.DeepCopy()
// munge spec.containers[*].image
var newContainers []core.Container
for ix, container := range mungedPodSpec.Containers {
container.Image = oldPod.Spec.Containers[ix].Image // +k8s:verify-mutation:reason=clone
// When the feature-gate is turned off, any new requests attempting to update CPU or memory
// resource values will result in validation failure.
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
// Resources are mutable for CPU & memory only
// - user can now modify Resources to express new desired Resources
mungeCpuMemResources := func(resourceList, oldResourceList core.ResourceList) core.ResourceList {
if oldResourceList == nil {
return nil
}
var mungedResourceList core.ResourceList
if resourceList == nil {
mungedResourceList = make(core.ResourceList)
} else {
mungedResourceList = resourceList.DeepCopy()
}
delete(mungedResourceList, core.ResourceCPU)
delete(mungedResourceList, core.ResourceMemory)
if cpu, found := oldResourceList[core.ResourceCPU]; found {
mungedResourceList[core.ResourceCPU] = cpu
}
if mem, found := oldResourceList[core.ResourceMemory]; found {
mungedResourceList[core.ResourceMemory] = mem
}
return mungedResourceList
}
lim := mungeCpuMemResources(container.Resources.Limits, oldPod.Spec.Containers[ix].Resources.Limits)
req := mungeCpuMemResources(container.Resources.Requests, oldPod.Spec.Containers[ix].Resources.Requests)
container.Resources = core.ResourceRequirements{Limits: lim, Requests: req}
}
newContainers = append(newContainers, container)
}
mungedPodSpec.Containers = newContainers
@ -4598,13 +4700,49 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
mungedPodSpec.TerminationGracePeriodSeconds = oldPod.Spec.TerminationGracePeriodSeconds // +k8s:verify-mutation:reason=clone
}
// Handle validations specific to gated pods.
podIsGated := len(oldPod.Spec.SchedulingGates) > 0
if opts.AllowMutableNodeSelectorAndNodeAffinity && podIsGated {
// Additions to spec.nodeSelector are allowed (no deletions or mutations) for gated pods.
if !apiequality.Semantic.DeepEqual(mungedPodSpec.NodeSelector, oldPod.Spec.NodeSelector) {
allErrs = append(allErrs, validateNodeSelectorMutation(specPath.Child("nodeSelector"), mungedPodSpec.NodeSelector, oldPod.Spec.NodeSelector)...)
mungedPodSpec.NodeSelector = oldPod.Spec.NodeSelector // +k8s:verify-mutation:reason=clone
}
// Validate node affinity mutations.
var oldNodeAffinity *core.NodeAffinity
if oldPod.Spec.Affinity != nil {
oldNodeAffinity = oldPod.Spec.Affinity.NodeAffinity // +k8s:verify-mutation:reason=clone
}
var mungedNodeAffinity *core.NodeAffinity
if mungedPodSpec.Affinity != nil {
mungedNodeAffinity = mungedPodSpec.Affinity.NodeAffinity // +k8s:verify-mutation:reason=clone
}
if !apiequality.Semantic.DeepEqual(oldNodeAffinity, mungedNodeAffinity) {
allErrs = append(allErrs, validateNodeAffinityMutation(specPath.Child("affinity").Child("nodeAffinity"), mungedNodeAffinity, oldNodeAffinity)...)
switch {
case mungedPodSpec.Affinity == nil && oldNodeAffinity == nil:
// already effectively nil, no change needed
case mungedPodSpec.Affinity == nil && oldNodeAffinity != nil:
mungedPodSpec.Affinity = &core.Affinity{NodeAffinity: oldNodeAffinity} // +k8s:verify-mutation:reason=clone
default:
mungedPodSpec.Affinity.NodeAffinity = oldNodeAffinity // +k8s:verify-mutation:reason=clone
}
}
}
if !apiequality.Semantic.DeepEqual(mungedPodSpec, oldPod.Spec) {
// This diff isn't perfect, but it's a helluva lot better an "I'm not going to tell you what the difference is".
// TODO: Pinpoint the specific field that causes the invalid error after we have strategic merge diff
specDiff := cmp.Diff(oldPod.Spec, mungedPodSpec)
allErrs = append(allErrs, field.Forbidden(specPath, fmt.Sprintf("pod updates may not change fields other than `spec.containers[*].image`, `spec.initContainers[*].image`, `spec.activeDeadlineSeconds`, `spec.tolerations` (only additions to existing tolerations) or `spec.terminationGracePeriodSeconds` (allow it to be set to 1 if it was previously negative)\n%v", specDiff)))
errs := field.Forbidden(specPath, fmt.Sprintf("pod updates may not change fields other than %s\n%v", strings.Join(updatablePodSpecFieldsNoResources, ","), specDiff))
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
errs = field.Forbidden(specPath, fmt.Sprintf("pod updates may not change fields other than %s\n%v", strings.Join(updatablePodSpecFields, ","), specDiff))
}
allErrs = append(allErrs, errs)
}
return allErrs
}
@ -4687,6 +4825,11 @@ func ValidatePodEphemeralContainersUpdate(newPod, oldPod *core.Pod, opts PodVali
allErrs = append(allErrs, validatePodMetadataAndSpec(newPod, opts)...)
allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"), opts)...)
// static pods don't support ephemeral containers #113935
if _, ok := oldPod.Annotations[core.MirrorPodAnnotationKey]; ok {
return field.ErrorList{field.Forbidden(field.NewPath(""), "static pods do not support ephemeral containers")}
}
// Part 2: Validate that the changes between oldPod.Spec.EphemeralContainers and
// newPod.Spec.EphemeralContainers are allowed.
//
@ -4743,14 +4886,23 @@ var supportedSessionAffinityType = sets.NewString(string(core.ServiceAffinityCli
var supportedServiceType = sets.NewString(string(core.ServiceTypeClusterIP), string(core.ServiceTypeNodePort),
string(core.ServiceTypeLoadBalancer), string(core.ServiceTypeExternalName))
var supportedServiceInternalTrafficPolicy = sets.NewString(string(core.ServiceInternalTrafficPolicyCluster), string(core.ServiceExternalTrafficPolicyTypeLocal))
var supportedServiceInternalTrafficPolicy = sets.NewString(string(core.ServiceInternalTrafficPolicyCluster), string(core.ServiceExternalTrafficPolicyLocal))
var supportedServiceIPFamily = sets.NewString(string(core.IPv4Protocol), string(core.IPv6Protocol))
var supportedServiceIPFamilyPolicy = sets.NewString(string(core.IPFamilyPolicySingleStack), string(core.IPFamilyPolicyPreferDualStack), string(core.IPFamilyPolicyRequireDualStack))
// ValidateService tests if required fields/annotations of a Service are valid.
func ValidateService(service *core.Service) field.ErrorList {
allErrs := ValidateObjectMeta(&service.ObjectMeta, true, ValidateServiceName, field.NewPath("metadata"))
metaPath := field.NewPath("metadata")
allErrs := ValidateObjectMeta(&service.ObjectMeta, true, ValidateServiceName, metaPath)
topologyHintsVal, topologyHintsSet := service.Annotations[core.DeprecatedAnnotationTopologyAwareHints]
topologyModeVal, topologyModeSet := service.Annotations[core.AnnotationTopologyMode]
if topologyModeSet && topologyHintsSet && topologyModeVal != topologyHintsVal {
message := fmt.Sprintf("must match annotations[%s] when both are specified", core.DeprecatedAnnotationTopologyAwareHints)
allErrs = append(allErrs, field.Invalid(metaPath.Child("annotations").Key(core.AnnotationTopologyMode), topologyModeVal, message))
}
specPath := field.NewPath("spec")
@ -4975,8 +5127,8 @@ func needsExternalTrafficPolicy(svc *core.Service) bool {
}
var validExternalTrafficPolicies = sets.NewString(
string(core.ServiceExternalTrafficPolicyTypeCluster),
string(core.ServiceExternalTrafficPolicyTypeLocal))
string(core.ServiceExternalTrafficPolicyCluster),
string(core.ServiceExternalTrafficPolicyLocal))
func validateServiceExternalTrafficPolicy(service *core.Service) field.ErrorList {
allErrs := field.ErrorList{}
@ -5811,7 +5963,6 @@ func ValidateSecret(secret *core.Secret) field.ErrorList {
if _, exists := secret.Data[core.TLSPrivateKeyKey]; !exists {
allErrs = append(allErrs, field.Required(dataPath.Key(core.TLSPrivateKeyKey), ""))
}
// TODO: Verify that the key matches the cert.
default:
// no-op
}
@ -5946,9 +6097,9 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, podCl
if exists {
// For non overcommitable resources, not only requests can't exceed limits, they also can't be lower, i.e. must be equal.
if quantity.Cmp(limitQuantity) != 0 && !helper.IsOvercommitAllowed(resourceName) {
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit", resourceName)))
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit of %s", resourceName, limitQuantity.String())))
} else if quantity.Cmp(limitQuantity) > 0 {
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit", resourceName)))
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit of %s", resourceName, limitQuantity.String())))
}
} else if !helper.IsOvercommitAllowed(resourceName) {
allErrs = append(allErrs, field.Required(limPath, "Limit must be set for non overcommitable resources"))
@ -6741,7 +6892,7 @@ var (
)
// validateTopologySpreadConstraints validates given TopologySpreadConstraints.
func validateTopologySpreadConstraints(constraints []core.TopologySpreadConstraint, fldPath *field.Path) field.ErrorList {
func validateTopologySpreadConstraints(constraints []core.TopologySpreadConstraint, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
for i, constraint := range constraints {
@ -6767,6 +6918,9 @@ func validateTopologySpreadConstraints(constraints []core.TopologySpreadConstrai
allErrs = append(allErrs, err)
}
allErrs = append(allErrs, validateMatchLabelKeys(subFldPath.Child("matchLabelKeys"), constraint.MatchLabelKeys, constraint.LabelSelector)...)
if !opts.AllowInvalidTopologySpreadConstraintLabelSelector {
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(constraint.LabelSelector, unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: false}, subFldPath.Child("labelSelector"))...)
}
}
return allErrs
@ -6846,7 +7000,9 @@ func validateMatchLabelKeys(fldPath *field.Path, matchLabelKeys []string, labelS
return nil
}
var allErrs field.ErrorList
labelSelectorKeys := sets.String{}
if labelSelector != nil {
for key := range labelSelector.MatchLabels {
labelSelectorKeys.Insert(key)
@ -6854,9 +7010,10 @@ func validateMatchLabelKeys(fldPath *field.Path, matchLabelKeys []string, labelS
for _, matchExpression := range labelSelector.MatchExpressions {
labelSelectorKeys.Insert(matchExpression.Key)
}
} else {
allErrs = append(allErrs, field.Forbidden(fldPath, "must not be specified when labelSelector is not set"))
}
allErrs := field.ErrorList{}
for i, key := range matchLabelKeys {
allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(key, fldPath.Index(i))...)
if labelSelectorKeys.Has(key) {
@ -7172,3 +7329,115 @@ func ValidatePodAffinityTermSelector(podAffinityTerm core.PodAffinityTerm, allow
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.NamespaceSelector, labelSelectorValidationOptions, fldPath.Child("namespaceSelector"))...)
return allErrs
}
var betaToGALabel = map[string]string{
v1.LabelFailureDomainBetaZone: v1.LabelTopologyZone,
v1.LabelFailureDomainBetaRegion: v1.LabelTopologyRegion,
kubeletapis.LabelOS: v1.LabelOSStable,
kubeletapis.LabelArch: v1.LabelArchStable,
v1.LabelInstanceType: v1.LabelInstanceTypeStable,
}
var (
maskNodeSelectorLabelChangeEqualities conversion.Equalities
initMaskNodeSelectorLabelChangeEqualities sync.Once
)
func getMaskNodeSelectorLabelChangeEqualities() conversion.Equalities {
initMaskNodeSelectorLabelChangeEqualities.Do(func() {
var eqs = apiequality.Semantic.Copy()
err := eqs.AddFunc(
func(newReq, oldReq core.NodeSelectorRequirement) bool {
// allow newReq to change to a GA key
if oldReq.Key != newReq.Key && betaToGALabel[oldReq.Key] == newReq.Key {
oldReq.Key = newReq.Key // +k8s:verify-mutation:reason=clone
}
return apiequality.Semantic.DeepEqual(newReq, oldReq)
},
)
if err != nil {
panic(fmt.Errorf("failed to instantiate semantic equalities: %w", err))
}
maskNodeSelectorLabelChangeEqualities = eqs
})
return maskNodeSelectorLabelChangeEqualities
}
func validatePvNodeAffinity(newPvNodeAffinity, oldPvNodeAffinity *core.VolumeNodeAffinity, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if !getMaskNodeSelectorLabelChangeEqualities().DeepEqual(newPvNodeAffinity, oldPvNodeAffinity) {
allErrs = append(allErrs, field.Invalid(fldPath, newPvNodeAffinity, fieldImmutableErrorMsg+", except for updating from beta label to GA"))
}
return allErrs
}
func validateNodeSelectorMutation(fldPath *field.Path, newNodeSelector, oldNodeSelector map[string]string) field.ErrorList {
var allErrs field.ErrorList
// Validate no existing node selectors were deleted or mutated.
for k, v1 := range oldNodeSelector {
if v2, ok := newNodeSelector[k]; !ok || v1 != v2 {
allErrs = append(allErrs, field.Invalid(fldPath, newNodeSelector, "only additions to spec.nodeSelector are allowed (no mutations or deletions)"))
return allErrs
}
}
return allErrs
}
func validateNodeAffinityMutation(nodeAffinityPath *field.Path, newNodeAffinity, oldNodeAffinity *core.NodeAffinity) field.ErrorList {
var allErrs field.ErrorList
// If old node affinity was nil, anything can be set.
if oldNodeAffinity == nil || oldNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
return allErrs
}
oldTerms := oldNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
var newTerms []core.NodeSelectorTerm
if newNodeAffinity != nil && newNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
newTerms = newNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
}
// If there are no old terms, we can set the new terms to anything.
// If there are old terms, we cannot add any new ones.
if len(oldTerms) > 0 && len(oldTerms) != len(newTerms) {
return append(allErrs, field.Invalid(nodeAffinityPath.Child("requiredDuringSchedulingIgnoredDuringExecution").Child("nodeSelectorTerms"), newTerms, "no additions/deletions to non-empty NodeSelectorTerms list are allowed"))
}
// For requiredDuringSchedulingIgnoredDuringExecution, if old NodeSelectorTerms
// was empty, anything can be set. If non-empty, only additions of NodeSelectorRequirements
// to matchExpressions or fieldExpressions are allowed.
for i := range oldTerms {
if !validateNodeSelectorTermHasOnlyAdditions(newTerms[i], oldTerms[i]) {
allErrs = append(allErrs, field.Invalid(nodeAffinityPath.Child("requiredDuringSchedulingIgnoredDuringExecution").Child("nodeSelectorTerms").Index(i), newTerms[i], "only additions are allowed (no mutations or deletions)"))
}
}
return allErrs
}
func validateNodeSelectorTermHasOnlyAdditions(newTerm, oldTerm core.NodeSelectorTerm) bool {
if len(oldTerm.MatchExpressions) == 0 && len(oldTerm.MatchFields) == 0 {
if len(newTerm.MatchExpressions) > 0 || len(newTerm.MatchFields) > 0 {
return false
}
}
// Validate MatchExpressions only has additions (no deletions or mutations)
if l := len(oldTerm.MatchExpressions); l > 0 {
if len(newTerm.MatchExpressions) < l {
return false
}
if !apiequality.Semantic.DeepEqual(newTerm.MatchExpressions[:l], oldTerm.MatchExpressions) {
return false
}
}
// Validate MatchFields only has additions (no deletions or mutations)
if l := len(oldTerm.MatchFields); l > 0 {
if len(newTerm.MatchFields) < l {
return false
}
if !apiequality.Semantic.DeepEqual(newTerm.MatchFields[:l], oldTerm.MatchFields) {
return false
}
}
return true
}

View File

@ -788,6 +788,11 @@ func (in *Container) DeepCopyInto(out *Container) {
}
}
in.Resources.DeepCopyInto(&out.Resources)
if in.ResizePolicy != nil {
in, out := &in.ResizePolicy, &out.ResizePolicy
*out = make([]ContainerResizePolicy, len(*in))
copy(*out, *in)
}
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]VolumeMount, len(*in))
@ -875,6 +880,22 @@ func (in *ContainerPort) DeepCopy() *ContainerPort {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerResizePolicy) DeepCopyInto(out *ContainerResizePolicy) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResizePolicy.
func (in *ContainerResizePolicy) DeepCopy() *ContainerResizePolicy {
if in == nil {
return nil
}
out := new(ContainerResizePolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerState) DeepCopyInto(out *ContainerState) {
*out = *in
@ -967,6 +988,18 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) {
*out = new(bool)
**out = **in
}
if in.AllocatedResources != nil {
in, out := &in.AllocatedResources, &out.AllocatedResources
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = new(ResourceRequirements)
(*in).DeepCopyInto(*out)
}
return
}
@ -1382,6 +1415,11 @@ func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon)
}
}
in.Resources.DeepCopyInto(&out.Resources)
if in.ResizePolicy != nil {
in, out := &in.ResizePolicy, &out.ResizePolicy
*out = make([]ContainerResizePolicy, len(*in))
copy(*out, *in)
}
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]VolumeMount, len(*in))
@ -5502,7 +5540,7 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
}
if in.InternalTrafficPolicy != nil {
in, out := &in.InternalTrafficPolicy, &out.InternalTrafficPolicy
*out = new(ServiceInternalTrafficPolicyType)
*out = new(ServiceInternalTrafficPolicy)
**out = **in
}
return

View File

@ -22,7 +22,6 @@ import (
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/networking"
"k8s.io/kubernetes/pkg/apis/policy"
)
// GroupName is the group name use in this package
@ -60,8 +59,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&networking.IngressList{},
&apps.ReplicaSet{},
&apps.ReplicaSetList{},
&policy.PodSecurityPolicy{},
&policy.PodSecurityPolicyList{},
&autoscaling.Scale{},
&networking.NetworkPolicy{},
&networking.NetworkPolicyList{},

View File

@ -54,6 +54,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&IngressClassList{},
&ClusterCIDR{},
&ClusterCIDRList{},
&IPAddress{},
&IPAddressList{},
)
return nil
}

View File

@ -18,23 +18,25 @@ package networking
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
api "k8s.io/kubernetes/pkg/apis/core"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NetworkPolicy describes what network traffic is allowed for a set of Pods
// NetworkPolicy describes what network traffic is allowed for a set of pods
type NetworkPolicy struct {
metav1.TypeMeta
// +optional
metav1.ObjectMeta
// Specification of the desired behavior for this NetworkPolicy.
// spec represents the specification of the desired behavior for this NetworkPolicy.
// +optional
Spec NetworkPolicySpec
// Status is the current state of the NetworkPolicy.
// status represents the current state of the NetworkPolicy.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status NetworkPolicyStatus
@ -53,16 +55,16 @@ const (
// NetworkPolicySpec provides the specification of a NetworkPolicy
type NetworkPolicySpec struct {
// Selects the pods to which this NetworkPolicy object applies. The array of
// ingress rules is applied to any pods selected by this field. Multiple network
// policies can select the same set of pods. In this case, the ingress rules for
// each are combined additively. This field is NOT optional and follows standard
// label selector semantics. An empty podSelector matches all pods in this
// namespace.
// podSelector selects the pods to which this NetworkPolicy object applies.
// The array of ingress rules is applied to any pods selected by this field.
// Multiple network policies can select the same set of pods. In this case,
// the ingress rules for each are combined additively.
// This field is NOT optional and follows standard label selector semantics.
// An empty podSelector matches all pods in this namespace.
PodSelector metav1.LabelSelector
// List of ingress rules to be applied to the selected pods. Traffic is allowed to
// a pod if there are no NetworkPolicies selecting the pod
// ingress is a list of ingress rules to be applied to the selected pods.
// Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod
// (and cluster policy otherwise allows the traffic), OR if the traffic source is
// the pod's local node, OR if the traffic matches at least one ingress rule
// across all of the NetworkPolicy objects whose podSelector matches the pod. If
@ -71,8 +73,8 @@ type NetworkPolicySpec struct {
// +optional
Ingress []NetworkPolicyIngressRule
// List of egress rules to be applied to the selected pods. Outgoing traffic is
// allowed if there are no NetworkPolicies selecting the pod (and cluster policy
// egress is a list of egress rules to be applied to the selected pods. Outgoing traffic
// is allowed if there are no NetworkPolicies selecting the pod (and cluster policy
// otherwise allows the traffic), OR if the traffic matches at least one egress rule
// across all of the NetworkPolicy objects whose podSelector matches the pod. If
// this field is empty then this NetworkPolicy limits all outgoing traffic (and serves
@ -81,15 +83,15 @@ type NetworkPolicySpec struct {
// +optional
Egress []NetworkPolicyEgressRule
// List of rule types that the NetworkPolicy relates to.
// policyTypes is a list of rule types that the NetworkPolicy relates to.
// Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"].
// If this field is not specified, it will default based on the existence of Ingress or Egress rules;
// policies that contain an Egress section are assumed to affect Egress, and all policies
// (whether or not they contain an Ingress section) are assumed to affect Ingress.
// If this field is not specified, it will default based on the existence of ingress or egress rules;
// policies that contain an egress section are assumed to affect egress, and all policies
// (whether or not they contain an ingress section) are assumed to affect ingress.
// If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ].
// Likewise, if you want to write a policy that specifies that no egress is allowed,
// you must specify a policyTypes value that include "Egress" (since such a policy would not include
// an Egress section and would otherwise default to just [ "Ingress" ]).
// an egress section and would otherwise default to just [ "Ingress" ]).
// This field is beta-level in 1.8
// +optional
PolicyTypes []PolicyType
@ -98,15 +100,15 @@ type NetworkPolicySpec struct {
// NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods
// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.
type NetworkPolicyIngressRule struct {
// List of ports which should be made accessible on the pods selected for this
// rule. Each item in this list is combined using a logical OR. If this field is
// ports is a list of ports which should be made accessible on the pods selected for
// this rule. Each item in this list is combined using a logical OR. If this field is
// empty or missing, this rule matches all ports (traffic not restricted by port).
// If this field is present and contains at least one item, then this rule allows
// traffic only if the traffic matches at least one port in the list.
// +optional
Ports []NetworkPolicyPort
// List of sources which should be able to access the pods selected for this rule.
// from is a list of sources which should be able to access the pods selected for this rule.
// Items in this list are combined using a logical OR operation. If this field is
// empty or missing, this rule matches all sources (traffic not restricted by
// source). If this field is present and contains at least one item, this rule
@ -119,7 +121,7 @@ type NetworkPolicyIngressRule struct {
// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to.
// This type is beta-level in 1.8
type NetworkPolicyEgressRule struct {
// List of destination ports for outgoing traffic.
// ports is a list of destination ports for outgoing traffic.
// Each item in this list is combined using a logical OR. If this field is
// empty or missing, this rule matches all ports (traffic not restricted by port).
// If this field is present and contains at least one item, then this rule allows
@ -127,7 +129,7 @@ type NetworkPolicyEgressRule struct {
// +optional
Ports []NetworkPolicyPort
// List of destinations for outgoing traffic of pods selected for this rule.
// to is a list of destinations for outgoing traffic of pods selected for this rule.
// Items in this list are combined using a logical OR operation. If this field is
// empty or missing, this rule matches all destinations (traffic not restricted by
// destination). If this field is present and contains at least one item, this rule
@ -138,19 +140,19 @@ type NetworkPolicyEgressRule struct {
// NetworkPolicyPort describes a port to allow traffic on
type NetworkPolicyPort struct {
// The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this
// field defaults to TCP.
// protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match.
// If not specified, this field defaults to TCP.
// +optional
Protocol *api.Protocol
// The port on the given protocol. This can either be a numerical or named
// port represents the port on the given protocol. This can either be a numerical or named
// port on a pod. If this field is not provided, this matches all port names and
// numbers.
// If present, only traffic on the specified protocol AND port will be matched.
// +optional
Port *intstr.IntOrString
// If set, indicates that the range of ports from port to endPort, inclusive,
// endPort indicates that the range of ports from port to endPort if set, inclusive,
// should be allowed by the policy. This field cannot be defined if the port field
// is not defined or if the port field is defined as a named (string) port.
// The endPort must be equal or greater than port.
@ -162,37 +164,38 @@ type NetworkPolicyPort struct {
// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs
// that should not be included within this rule.
type IPBlock struct {
// CIDR is a string representing the IP Block
// cidr is a string representing the IPBlock
// Valid examples are "192.168.1.0/24" or "2001:db8::/64"
CIDR string
// Except is a slice of CIDRs that should not be included within an IP Block
// except is a list of CIDRs that should not be included within the IPBlock
// Valid examples are "192.168.1.0/24" or "2001:db8::/64"
// Except values will be rejected if they are outside the CIDR range
// Except values will be rejected if they are outside the cidr range
// +optional
Except []string
}
// NetworkPolicyPeer describes a peer to allow traffic to/from.
type NetworkPolicyPeer struct {
// This is a label selector which selects Pods. This field follows standard label
// podSelector is a label selector which selects pods. This field follows standard label
// selector semantics; if present but empty, it selects all pods.
//
// If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects
// the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
// Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.
// If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects
// the pods matching podSelector in the Namespaces selected by namespaceSelector.
// Otherwise it selects the pods matching podSelector in the policy's own namespace.
// +optional
PodSelector *metav1.LabelSelector
// Selects Namespaces using cluster-scoped labels. This field follows standard label
// selector semantics; if present but empty, it selects all namespaces.
// namespaceSelector selects namespaces using cluster-scoped labels. This field follows
// standard label selector semantics; if present but empty, it selects all namespaces.
//
// If PodSelector is also set, then the NetworkPolicyPeer as a whole selects
// the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
// Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.
// If podSelector is also set, then the NetworkPolicyPeer as a whole selects
// the pods matching podSelector in the namespaces selected by namespaceSelector.
// Otherwise it selects all pods in the namespaces selected by namespaceSelector.
// +optional
NamespaceSelector *metav1.LabelSelector
// IPBlock defines policy on a particular IPBlock. If this field is set then
// ipBlock defines policy on a particular IPBlock. If this field is set then
// neither of the other fields can be.
// +optional
IPBlock *IPBlock
@ -228,9 +231,9 @@ const (
NetworkPolicyConditionReasonFeatureNotSupported NetworkPolicyConditionReason = "FeatureNotSupported"
)
// NetworkPolicyStatus describe the current state of the NetworkPolicy.
// NetworkPolicyStatus describes the current state of the NetworkPolicy.
type NetworkPolicyStatus struct {
// Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy.
// conditions holds an array of metav1.Condition that describes the state of the NetworkPolicy.
Conditions []metav1.Condition
}
@ -239,6 +242,7 @@ type NetworkPolicyStatus struct {
// NetworkPolicyList is a list of NetworkPolicy objects.
type NetworkPolicyList struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
@ -253,17 +257,18 @@ type NetworkPolicyList struct {
// based virtual hosting etc.
type Ingress struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta
// Spec is the desired state of the Ingress.
// spec is the desired state of the Ingress.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec IngressSpec
// Status is the current state of the Ingress.
// status is the current state of the Ingress.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status IngressStatus
@ -274,18 +279,19 @@ type Ingress struct {
// IngressList is a collection of Ingress.
type IngressList struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta
// Items is the list of Ingress.
// items is the list of Ingress.
Items []Ingress
}
// IngressSpec describes the Ingress the user wishes to exist.
type IngressSpec struct {
// IngressClassName is the name of the IngressClass cluster resource. The
// ingressClassName is the name of the IngressClass cluster resource. The
// associated IngressClass defines which controller will implement the
// resource. This replaces the deprecated `kubernetes.io/ingress.class`
// annotation. For backwards compatibility, when that annotation is set, it
@ -298,23 +304,23 @@ type IngressSpec struct {
// +optional
IngressClassName *string
// DefaultBackend is the backend that should handle requests that don't
// defaultBackend is the backend that should handle requests that don't
// match any rule. If Rules are not specified, DefaultBackend must be specified.
// If DefaultBackend is not set, the handling of requests that do not match any
// of the rules will be up to the Ingress controller.
// +optional
DefaultBackend *IngressBackend
// TLS configuration. Currently the Ingress only supports a single TLS
// port, 443. If multiple members of this list specify different hosts, they
// will be multiplexed on the same port according to the hostname specified
// tls represents the TLS configuration. Currently the ingress only supports a
// single TLS port, 443. If multiple members of this list specify different hosts,
// they will be multiplexed on the same port according to the hostname specified
// through the SNI TLS extension, if the ingress controller fulfilling the
// ingress supports SNI.
// +listType=atomic
// +optional
TLS []IngressTLS
// A list of host rules used to configure the Ingress. If unspecified, or
// rules is a list of host rules used to configure the Ingress. If unspecified, or
// no rule matches, all traffic is sent to the default backend.
// +listType=atomic
// +optional
@ -330,9 +336,10 @@ type IngressSpec struct {
// resources without a class specified will be assigned this default class.
type IngressClass struct {
metav1.TypeMeta
metav1.ObjectMeta
// Spec is the desired state of the IngressClass.
// spec is the desired state of the IngressClass.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec IngressClassSpec
@ -340,15 +347,15 @@ type IngressClass struct {
// IngressClassSpec provides information about the class of an Ingress.
type IngressClassSpec struct {
// Controller refers to the name of the controller that should handle this
// controller refers to the name of the controller that should handle this
// class. This allows for different "flavors" that are controlled by the
// same controller. For example, you may have different Parameters for the
// same controller. For example, you may have different parameters for the
// same implementing controller. This should be specified as a
// domain-prefixed path no more than 250 characters in length, e.g.
// "acme.io/ingress-controller". This field is immutable.
Controller string
// Parameters is a link to a custom resource containing additional
// parameters is a link to a custom resource containing additional
// configuration for the controller. This is optional if the controller does
// not require extra parameters.
// +optional
@ -367,20 +374,24 @@ const (
// IngressClassParametersReference identifies an API object. This can be used
// to specify a cluster or namespace-scoped resource.
type IngressClassParametersReference struct {
// APIGroup is the group for the resource being referenced. If APIGroup is
// not specified, the specified Kind must be in the core API group. For any
// other third-party types, APIGroup is required.
// apiGroup is the group for the resource being referenced. If apiGroup is
// not specified, the specified kind must be in the core API group. For any
// other third-party types, apiGroup is required.
// +optional
APIGroup *string
// Kind is the type of resource being referenced.
// kind is the type of resource being referenced.
Kind string
// Name is the name of resource being referenced.
// name is the name of resource being referenced.
Name string
// Scope represents if this refers to a cluster or namespace scoped resource.
// scope represents if this refers to a cluster or namespace scoped resource.
// This may be set to "Cluster" (default) or "Namespace".
// +optional
Scope *string
// Namespace is the namespace of the resource being referenced. This field is
// namespace is the namespace of the resource being referenced. This field is
// required when scope is set to "Namespace" and must be unset when scope is set to
// "Cluster".
// +optional
@ -392,71 +403,73 @@ type IngressClassParametersReference struct {
// IngressClassList is a collection of IngressClasses.
type IngressClassList struct {
metav1.TypeMeta
// Standard object's metadata.
// +optional
metav1.ListMeta
// Items is the list of IngressClasses.
// items is the list of IngressClasses.
Items []IngressClass
}
// IngressTLS describes the transport layer security associated with an Ingress.
// IngressTLS describes the transport layer security associated with an ingress.
type IngressTLS struct {
// Hosts are a list of hosts included in the TLS certificate. The values in
// hosts is a list of hosts included in the TLS certificate. The values in
// this list must match the name/s used in the tlsSecret. Defaults to the
// wildcard host setting for the loadbalancer controller fulfilling this
// Ingress, if left unspecified.
// +listType=atomic
// +optional
Hosts []string
// SecretName is the name of the secret used to terminate TLS traffic on
// secretName is the name of the secret used to terminate TLS traffic on
// port 443. Field is left optional to allow TLS routing based on SNI
// hostname alone. If the SNI host in a listener conflicts with the "Host"
// header field used by an IngressRule, the SNI host is used for termination
// and value of the Host header is used for routing.
// and value of the "Host" header is used for routing.
// +optional
SecretName string
// TODO: Consider specifying different modes of termination, protocols etc.
}
// IngressStatus describe the current state of the Ingress.
// IngressStatus describes the current state of the Ingress.
type IngressStatus struct {
// LoadBalancer contains the current status of the load-balancer.
// loadBalancer contains the current status of the load-balancer.
// +optional
LoadBalancer IngressLoadBalancerStatus
}
// IngressLoadBalancerStatus represents the status of a load-balancer
type IngressLoadBalancerStatus struct {
// Ingress is a list containing ingress points for the load-balancer.
// ingress is a list containing ingress points for the load-balancer.
// +optional
Ingress []IngressLoadBalancerIngress
}
// IngressLoadBalancerIngress represents the status of a load-balancer ingress point.
type IngressLoadBalancerIngress struct {
// IP is set for load-balancer ingress points that are IP based.
// ip is set for load-balancer ingress points that are IP based.
// +optional
IP string
// Hostname is set for load-balancer ingress points that are DNS based.
// hostname is set for load-balancer ingress points that are DNS based.
// +optional
Hostname string
// Ports provides information about the ports exposed by this LoadBalancer.
// ports provides information about the ports exposed by this LoadBalancer.
// +optional
Ports []IngressPortStatus
}
// IngressPortStatus represents the error condition of an ingress port
type IngressPortStatus struct {
// Port is the port number of the ingress port.
// port is the port number of the ingress port.
Port int32
// Protocol is the protocol of the ingress port.
// protocol is the protocol of the ingress port.
Protocol api.Protocol
// Error indicates a problem on this port.
// error indicates a problem on this port.
// The format of the error must comply with the following rules:
// - Kubernetes-defined error values use CamelCase names
// - Provider-specific error values must follow label-name style (e.g.
@ -469,7 +482,7 @@ type IngressPortStatus struct {
// host match, then routed to the backend associated with the matching
// IngressRuleValue.
type IngressRule struct {
// Host is the fully qualified domain name of a network host, as defined by RFC 3986.
// host is the fully qualified domain name of a network host, as defined by RFC 3986.
// Note the following deviations from the "host" part of the
// URI as defined in RFC 3986:
// 1. IPs are not allowed. Currently an IngressRuleValue can only apply to
@ -482,17 +495,18 @@ type IngressRule struct {
// IngressRuleValue. If the host is unspecified, the Ingress routes all
// traffic based on the specified IngressRuleValue.
//
// Host can be "precise" which is a domain name without the terminating dot of
// host can be "precise" which is a domain name without the terminating dot of
// a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name
// prefixed with a single wildcard label (e.g. "*.foo.com").
// The wildcard character '*' must appear by itself as the first DNS label and
// matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*").
// Requests will be matched against the Host field in the following way:
// 1. If Host is precise, the request matches this rule if the http host header is equal to Host.
// 2. If Host is a wildcard, then the request matches this rule if the http host header
// Requests will be matched against the host field in the following way:
// 1. If host is precise, the request matches this rule if the http host header is equal to Host.
// 2. If host is a wildcard, then the request matches this rule if the http host header
// is to equal to the suffix (removing the first label) of the wildcard rule.
// +optional
Host string
// IngressRuleValue represents a rule to route requests for this
// IngressRule. If unspecified, the rule defaults to a http catch-all.
// Whether that sends just traffic matching the host to the default backend
@ -524,7 +538,7 @@ type IngressRuleValue struct {
// to match against everything after the last '/' and before the first '?'
// or '#'.
type HTTPIngressRuleValue struct {
// A collection of paths that map requests to backends.
// paths is a collection of paths that map requests to backends.
// +listType=atomic
Paths []HTTPIngressPath
// TODO: Consider adding fields for ingress-type specific global
@ -564,32 +578,32 @@ const (
// HTTPIngressPath associates a path with a backend. Incoming urls matching the
// path are forwarded to the backend.
type HTTPIngressPath struct {
// Path is matched against the path of an incoming request. Currently it can
// path is matched against the path of an incoming request. Currently it can
// contain characters disallowed from the conventional "path" part of a URL
// as defined by RFC 3986. Paths must begin with a '/' and must be present
// when using PathType with value "Exact" or "Prefix".
// +optional
Path string
// PathType determines the interpretation of the Path matching. PathType can
// pathType determines the interpretation of the path matching. PathType can
// be one of Exact, Prefix, or ImplementationSpecific. Implementations are
// required to support all path types.
// +optional
PathType *PathType
// Backend defines the referenced service endpoint to which the traffic
// backend defines the referenced service endpoint to which the traffic
// will be forwarded to.
Backend IngressBackend
}
// IngressBackend describes all endpoints for a given service and port.
type IngressBackend struct {
// Service references a Service as a Backend.
// service references a service as a backend.
// This is a mutually exclusive setting with "Resource".
// +optional
Service *IngressServiceBackend
// Resource is an ObjectRef to another Kubernetes resource in the namespace
// resource is an ObjectRef to another Kubernetes resource in the namespace
// of the Ingress object. If resource is specified, a service.Name and
// service.Port must not be specified.
// This is a mutually exclusive setting with "Service".
@ -599,24 +613,24 @@ type IngressBackend struct {
// IngressServiceBackend references a Kubernetes Service as a Backend.
type IngressServiceBackend struct {
// Name is the referenced service. The service must exist in
// name is the referenced service. The service must exist in
// the same namespace as the Ingress object.
Name string
// Port of the referenced service. A port name or port number
// port of the referenced service. A port name or port number
// is required for a IngressServiceBackend.
Port ServiceBackendPort
}
// ServiceBackendPort is the service port being referenced.
type ServiceBackendPort struct {
// Name is the name of the port on the Service.
// name is the name of the port on the Service.
// This must be an IANA_SVC_NAME (following RFC6335).
// This is a mutually exclusive setting with "Number".
// +optional
Name string
// Number is the numerical port number (e.g. 80) on the Service.
// number is the numerical port number (e.g. 80) on the Service.
// This is a mutually exclusive setting with "Name".
// +optional
Number int32
@ -637,6 +651,7 @@ type ServiceBackendPort struct {
// selector matches the Node may be used.
type ClusterCIDR struct {
metav1.TypeMeta
metav1.ObjectMeta
Spec ClusterCIDRSpec
@ -644,13 +659,13 @@ type ClusterCIDR struct {
// ClusterCIDRSpec defines the desired state of ClusterCIDR.
type ClusterCIDRSpec struct {
// NodeSelector defines which nodes the config is applicable to.
// An empty or nil NodeSelector selects all nodes.
// nodeSelector defines which nodes the config is applicable to.
// An empty or nil nodeSelector selects all nodes.
// This field is immutable.
// +optional
NodeSelector *api.NodeSelector
// PerNodeHostBits defines the number of host bits to be configured per node.
// perNodeHostBits defines the number of host bits to be configured per node.
// A subnet mask determines how much of the address is used for network bits
// and host bits. For example an IPv4 address of 192.168.0.0/24, splits the
// address into 24 bits for the network portion and 8 bits for the host portion.
@ -660,14 +675,14 @@ type ClusterCIDRSpec struct {
// +required
PerNodeHostBits int32
// IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8").
// At least one of IPv4 and IPv6 must be specified.
// ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8").
// At least one of ipv4 and ipv6 must be specified.
// This field is immutable.
// +optional
IPv4 string
// IPv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64").
// At least one of IPv4 and IPv6 must be specified.
// ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64").
// At least one of ipv4 and ipv6 must be specified.
// This field is immutable.
// +optional
IPv6 string
@ -682,6 +697,58 @@ type ClusterCIDRList struct {
// +optional
metav1.ListMeta
// Items is the list of ClusterCIDRs.
// items is the list of ClusterCIDRs.
Items []ClusterCIDR
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
// the name of the object is the IP address in canonical format, four decimal digits separated
// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
type IPAddress struct {
metav1.TypeMeta
// +optional
metav1.ObjectMeta
// +optional
Spec IPAddressSpec
}
// IPAddressSpec describe the attributes in an IP Address,
type IPAddressSpec struct {
// ParentRef references the resource that an IPAddress is attached to.
// An IPAddress must reference a parent object.
// +required
ParentRef *ParentReference
}
type ParentReference struct {
// Group is the group of the object being referenced.
Group string
// Resource is the resource of the object being referenced.
Resource string
// Namespace is the namespace of the object being referenced.
Namespace string
// Name is the name of the object being referenced.
Name string
// UID is the uid of the object being referenced.
// +optional
UID types.UID
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IPAddressList contains a list of IPAddress.
type IPAddressList struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
// Items is the list of IPAddress
Items []IPAddress
}

View File

@ -154,6 +154,87 @@ func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAddress) DeepCopyInto(out *IPAddress) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress.
func (in *IPAddress) DeepCopy() *IPAddress {
if in == nil {
return nil
}
out := new(IPAddress)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IPAddress) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAddressList) DeepCopyInto(out *IPAddressList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]IPAddress, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList.
func (in *IPAddressList) DeepCopy() *IPAddressList {
if in == nil {
return nil
}
out := new(IPAddressList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IPAddressList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) {
*out = *in
if in.ParentRef != nil {
in, out := &in.ParentRef, &out.ParentRef
*out = new(ParentReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec.
func (in *IPAddressSpec) DeepCopy() *IPAddressSpec {
if in == nil {
return nil
}
out := new(IPAddressSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPBlock) DeepCopyInto(out *IPBlock) {
*out = *in
@ -816,6 +897,22 @@ func (in *NetworkPolicyStatus) DeepCopy() *NetworkPolicyStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ParentReference) DeepCopyInto(out *ParentReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference.
func (in *ParentReference) DeepCopy() *ParentReference {
if in == nil {
return nil
}
out := new(ParentReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceBackendPort) DeepCopyInto(out *ServiceBackendPort) {
*out = *in

View File

@ -1,11 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
# approval on api packages bubbles to api-approvers
reviewers:
- sig-apps-api-reviewers
- sig-apps-api-approvers
- sig-auth-policy-approvers
- sig-auth-policy-reviewers
labels:
- sig/auth
- sig/apps

View File

@ -1,19 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
package policy // import "k8s.io/kubernetes/pkg/apis/policy"

View File

@ -1,51 +0,0 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package policy
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
PDBV1beta1Label = "pdb.kubernetes.io/deprecated-v1beta1-empty-selector-match"
)
var (
NonV1beta1MatchAllSelector = &metav1.LabelSelector{}
NonV1beta1MatchNoneSelector = &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{Key: PDBV1beta1Label, Operator: metav1.LabelSelectorOpExists}},
}
V1beta1MatchNoneSelector = &metav1.LabelSelector{}
V1beta1MatchAllSelector = &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{Key: PDBV1beta1Label, Operator: metav1.LabelSelectorOpDoesNotExist}},
}
)
func StripPDBV1beta1Label(selector *metav1.LabelSelector) {
if selector == nil {
return
}
trimmedMatchExpressions := selector.MatchExpressions[:0]
for _, exp := range selector.MatchExpressions {
if exp.Key != PDBV1beta1Label {
trimmedMatchExpressions = append(trimmedMatchExpressions, exp)
}
}
selector.MatchExpressions = trimmedMatchExpressions
}

View File

@ -1,58 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package policy
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "policy"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
// TODO this gets cleaned up when the types are fixed
scheme.AddKnownTypes(SchemeGroupVersion,
&PodDisruptionBudget{},
&PodDisruptionBudgetList{},
&PodSecurityPolicy{},
&PodSecurityPolicyList{},
&Eviction{},
)
return nil
}

View File

@ -1,529 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package policy
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
api "k8s.io/kubernetes/pkg/apis/core"
)
// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.
type PodDisruptionBudgetSpec struct {
// An eviction is allowed if at least "minAvailable" pods selected by
// "selector" will still be available after the eviction, i.e. even in the
// absence of the evicted pod. So for example you can prevent all voluntary
// evictions by specifying "100%".
// +optional
MinAvailable *intstr.IntOrString
// Label query over pods whose evictions are managed by the disruption
// budget.
// +optional
Selector *metav1.LabelSelector
// An eviction is allowed if at most "maxUnavailable" pods selected by
// "selector" are unavailable after the eviction, i.e. even in absence of
// the evicted pod. For example, one can prevent all voluntary evictions
// by specifying 0. This is a mutually exclusive setting with "minAvailable".
// +optional
MaxUnavailable *intstr.IntOrString
// UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods
// should be considered for eviction. Current implementation considers healthy pods,
// as pods that have status.conditions item with type="Ready",status="True".
//
// Valid policies are IfHealthyBudget and AlwaysAllow.
// If no policy is specified, the default behavior will be used,
// which corresponds to the IfHealthyBudget policy.
//
// IfHealthyBudget policy means that running pods (status.phase="Running"),
// but not yet healthy can be evicted only if the guarded application is not
// disrupted (status.currentHealthy is at least equal to status.desiredHealthy).
// Healthy pods will be subject to the PDB for eviction.
//
// AlwaysAllow policy means that all running pods (status.phase="Running"),
// but not yet healthy are considered disrupted and can be evicted regardless
// of whether the criteria in a PDB is met. This means perspective running
// pods of a disrupted application might not get a chance to become healthy.
// Healthy pods will be subject to the PDB for eviction.
//
// Additional policies may be added in the future.
// Clients making eviction decisions should disallow eviction of unhealthy pods
// if they encounter an unrecognized policy in this field.
//
// This field is alpha-level. The eviction API uses this field when
// the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default).
// +optional
UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType
}
// UnhealthyPodEvictionPolicyType defines the criteria for when unhealthy pods
// should be considered for eviction.
// +enum
type UnhealthyPodEvictionPolicyType string
const (
// IfHealthyBudget policy means that running pods (status.phase="Running"),
// but not yet healthy can be evicted only if the guarded application is not
// disrupted (status.currentHealthy is at least equal to status.desiredHealthy).
// Healthy pods will be subject to the PDB for eviction.
IfHealthyBudget UnhealthyPodEvictionPolicyType = "IfHealthyBudget"
// AlwaysAllow policy means that all running pods (status.phase="Running"),
// but not yet healthy are considered disrupted and can be evicted regardless
// of whether the criteria in a PDB is met. This means perspective running
// pods of a disrupted application might not get a chance to become healthy.
// Healthy pods will be subject to the PDB for eviction.
AlwaysAllow UnhealthyPodEvictionPolicyType = "AlwaysAllow"
)
// PodDisruptionBudgetStatus represents information about the status of a
// PodDisruptionBudget. Status may trail the actual state of a system.
type PodDisruptionBudgetStatus struct {
// Most recent generation observed when updating this PDB status. DisruptionsAllowed and other
// status information is valid only if observedGeneration equals to PDB's object generation.
// +optional
ObservedGeneration int64
// DisruptedPods contains information about pods whose eviction was
// processed by the API server eviction subresource handler but has not
// yet been observed by the PodDisruptionBudget controller.
// A pod will be in this map from the time when the API server processed the
// eviction request to the time when the pod is seen by PDB controller
// as having been marked for deletion (or after a timeout). The key in the map is the name of the pod
// and the value is the time when the API server processed the eviction request. If
// the deletion didn't occur and a pod is still there it will be removed from
// the list automatically by PodDisruptionBudget controller after some time.
// If everything goes smooth this map should be empty for the most of the time.
// Large number of entries in the map may indicate problems with pod deletions.
// +optional
DisruptedPods map[string]metav1.Time
// Number of pod disruptions that are currently allowed.
DisruptionsAllowed int32
// current number of healthy pods
CurrentHealthy int32
// minimum desired number of healthy pods
DesiredHealthy int32
// total number of pods counted by this disruption budget
ExpectedPods int32
// Conditions contain conditions for PDB
// +optional
Conditions []metav1.Condition
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
type PodDisruptionBudget struct {
metav1.TypeMeta
// +optional
metav1.ObjectMeta
// Specification of the desired behavior of the PodDisruptionBudget.
// +optional
Spec PodDisruptionBudgetSpec
// Most recently observed status of the PodDisruptionBudget.
// +optional
Status PodDisruptionBudgetStatus
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodDisruptionBudgetList is a collection of PodDisruptionBudgets.
type PodDisruptionBudgetList struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
Items []PodDisruptionBudget
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Eviction evicts a pod from its node subject to certain policies and safety constraints.
// This is a subresource of Pod. A request to cause such an eviction is
// created by POSTing to .../pods/<pod name>/eviction.
type Eviction struct {
metav1.TypeMeta
// ObjectMeta describes the pod that is being evicted.
// +optional
metav1.ObjectMeta
// DeleteOptions may be provided
// +optional
DeleteOptions *metav1.DeleteOptions
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodSecurityPolicy governs the ability to make requests that affect the SecurityContext
// that will be applied to a pod and container.
type PodSecurityPolicy struct {
metav1.TypeMeta
// +optional
metav1.ObjectMeta
// Spec defines the policy enforced.
// +optional
Spec PodSecurityPolicySpec
}
// PodSecurityPolicySpec defines the policy enforced.
type PodSecurityPolicySpec struct {
// Privileged determines if a pod can request to be run as privileged.
// +optional
Privileged bool
// DefaultAddCapabilities is the default set of capabilities that will be added to the container
// unless the pod spec specifically drops the capability. You may not list a capability in both
// DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly
// allowed, and need not be included in the AllowedCapabilities list.
// +optional
DefaultAddCapabilities []api.Capability
// RequiredDropCapabilities are the capabilities that will be dropped from the container. These
// are required to be dropped and cannot be added.
// +optional
RequiredDropCapabilities []api.Capability
// AllowedCapabilities is a list of capabilities that can be requested to add to the container.
// Capabilities in this field may be added at the pod author's discretion.
// You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.
// To allow all capabilities you may use '*'.
// +optional
AllowedCapabilities []api.Capability
// Volumes is an allowlist of volume plugins. Empty indicates that
// no volumes may be used. To allow all volumes you may use '*'.
// +optional
Volumes []FSType
// HostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
// +optional
HostNetwork bool
// HostPorts determines which host port ranges are allowed to be exposed.
// +optional
HostPorts []HostPortRange
// HostPID determines if the policy allows the use of HostPID in the pod spec.
// +optional
HostPID bool
// HostIPC determines if the policy allows the use of HostIPC in the pod spec.
// +optional
HostIPC bool
// SELinux is the strategy that will dictate the allowable labels that may be set.
SELinux SELinuxStrategyOptions
// RunAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
RunAsUser RunAsUserStrategyOptions
// RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set.
// If this field is omitted, the pod's RunAsGroup can take any value. This field requires the
// RunAsGroup feature gate to be enabled.
RunAsGroup *RunAsGroupStrategyOptions
// SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
SupplementalGroups SupplementalGroupsStrategyOptions
// FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.
FSGroup FSGroupStrategyOptions
// ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file
// system. If the container specifically requests to run with a non-read only root file system
// the PSP should deny the pod.
// If set to false the container may run with a read only root file system if it wishes but it
// will not be forced to.
// +optional
ReadOnlyRootFilesystem bool
// DefaultAllowPrivilegeEscalation controls the default setting for whether a
// process can gain more privileges than its parent process.
// +optional
DefaultAllowPrivilegeEscalation *bool
// AllowPrivilegeEscalation determines if a pod can request to allow
// privilege escalation. If unspecified, defaults to true.
// +optional
AllowPrivilegeEscalation bool
// AllowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used.
// +optional
AllowedHostPaths []AllowedHostPath
// AllowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all
// Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes
// is allowed in the "Volumes" field.
// +optional
AllowedFlexVolumes []AllowedFlexVolume
// AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec.
// An empty value indicates that any CSI driver can be used for inline ephemeral volumes.
// +optional
AllowedCSIDrivers []AllowedCSIDriver
// AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none.
// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
// as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed.
// Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection.
//
// Examples:
// e.g. "foo/*" allows "foo/bar", "foo/baz", etc.
// e.g. "foo.*" allows "foo.bar", "foo.baz", etc.
// +optional
AllowedUnsafeSysctls []string
// ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none.
// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
// as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.
//
// Examples:
// e.g. "foo/*" forbids "foo/bar", "foo/baz", etc.
// e.g. "foo.*" forbids "foo.bar", "foo.baz", etc.
// +optional
ForbiddenSysctls []string
// AllowedProcMountTypes is an allowlist of ProcMountTypes.
// Empty or nil indicates that only the DefaultProcMountType may be used.
// +optional
AllowedProcMountTypes []api.ProcMountType
// runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod.
// If this field is omitted, the pod's runtimeClassName field is unrestricted.
// Enforcement of this field depends on the RuntimeClass feature gate being enabled.
// +optional
RuntimeClass *RuntimeClassStrategyOptions
}
// AllowedHostPath defines the host volume conditions that will be enabled by a policy
// for pods to use. It requires the path prefix to be defined.
type AllowedHostPath struct {
// PathPrefix is the path prefix that the host volume must match.
// PathPrefix does not support `*`.
// Trailing slashes are trimmed when validating the path prefix with a host path.
//
// Examples:
// `/foo` would allow `/foo`, `/foo/` and `/foo/bar`
// `/foo` would not allow `/food` or `/etc/foo`
PathPrefix string
// when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.
ReadOnly bool
}
// HostPortRange defines a range of host ports that will be enabled by a policy
// for pods to use. It requires both the start and end to be defined.
type HostPortRange struct {
// Min is the start of the range, inclusive.
Min int32
// Max is the end of the range, inclusive.
Max int32
}
// AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities
// field and means that any capabilities are allowed to be requested.
var AllowAllCapabilities api.Capability = "*"
// FSType gives strong typing to different file systems that are used by volumes.
type FSType string
// Exported FSTypes.
const (
AzureFile FSType = "azureFile"
Flocker FSType = "flocker"
FlexVolume FSType = "flexVolume"
HostPath FSType = "hostPath"
EmptyDir FSType = "emptyDir"
GCEPersistentDisk FSType = "gcePersistentDisk"
AWSElasticBlockStore FSType = "awsElasticBlockStore"
GitRepo FSType = "gitRepo"
Secret FSType = "secret"
NFS FSType = "nfs"
ISCSI FSType = "iscsi"
Glusterfs FSType = "glusterfs"
PersistentVolumeClaim FSType = "persistentVolumeClaim"
RBD FSType = "rbd"
Cinder FSType = "cinder"
CephFS FSType = "cephFS"
DownwardAPI FSType = "downwardAPI"
FC FSType = "fc"
ConfigMap FSType = "configMap"
VsphereVolume FSType = "vsphereVolume"
Quobyte FSType = "quobyte"
AzureDisk FSType = "azureDisk"
PhotonPersistentDisk FSType = "photonPersistentDisk"
StorageOS FSType = "storageos"
Projected FSType = "projected"
PortworxVolume FSType = "portworxVolume"
ScaleIO FSType = "scaleIO"
CSI FSType = "csi"
Ephemeral FSType = "ephemeral"
All FSType = "*"
)
// AllowedFlexVolume represents a single Flexvolume that is allowed to be used.
type AllowedFlexVolume struct {
// Driver is the name of the Flexvolume driver.
Driver string
}
// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.
type AllowedCSIDriver struct {
// Name is the registered name of the CSI driver
Name string
}
// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.
type SELinuxStrategyOptions struct {
// Rule is the strategy that will dictate the allowable labels that may be set.
Rule SELinuxStrategy
// SELinuxOptions required to run as; required for MustRunAs
// More info: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#selinux
// +optional
SELinuxOptions *api.SELinuxOptions
}
// SELinuxStrategy denotes strategy types for generating SELinux options for a
// Security.
type SELinuxStrategy string
const (
// SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied.
SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs"
// SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels.
SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny"
)
// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.
type RunAsUserStrategyOptions struct {
// Rule is the strategy that will dictate the allowable RunAsUser values that may be set.
Rule RunAsUserStrategy
// Ranges are the allowed ranges of uids that may be used. If you would like to force a single uid
// then supply a single range with the same start and end. Required for MustRunAs.
// +optional
Ranges []IDRange
}
// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.
type RunAsGroupStrategyOptions struct {
// Rule is the strategy that will dictate the allowable RunAsGroup values that may be set.
Rule RunAsGroupStrategy
// Ranges are the allowed ranges of gids that may be used. If you would like to force a single gid
// then supply a single range with the same start and end. Required for MustRunAs.
// +optional
Ranges []IDRange
}
// IDRange provides a min/max of an allowed range of IDs.
type IDRange struct {
// Min is the start of the range, inclusive.
Min int64
// Max is the end of the range, inclusive.
Max int64
}
// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a
// SecurityContext.
type RunAsUserStrategy string
const (
// RunAsUserStrategyMustRunAs means that container must run as a particular uid.
RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs"
// RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid
RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot"
// RunAsUserStrategyRunAsAny means that container may make requests for any uid.
RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny"
)
// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a
// SecurityContext.
type RunAsGroupStrategy string
const (
// RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid.
// However, when RunAsGroup are specified, they have to fall in the defined range.
RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs"
// RunAsGroupStrategyMustRunAs means that container must run as a particular gid.
RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs"
// RunAsGroupStrategyRunAsAny means that container may make requests for any gid.
RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny"
)
// FSGroupStrategyOptions defines the strategy type and options used to create the strategy.
type FSGroupStrategyOptions struct {
// Rule is the strategy that will dictate what FSGroup is used in the SecurityContext.
// +optional
Rule FSGroupStrategyType
// Ranges are the allowed ranges of fs groups. If you would like to force a single
// fs group then supply a single range with the same start and end. Required for MustRunAs.
// +optional
Ranges []IDRange
}
// FSGroupStrategyType denotes strategy types for generating FSGroup values for a
// SecurityContext
type FSGroupStrategyType string
const (
// FSGroupStrategyMayRunAs means that container does not need to have FSGroup of X applied.
// However, when FSGroups are specified, they have to fall in the defined range.
FSGroupStrategyMayRunAs FSGroupStrategyType = "MayRunAs"
// FSGroupStrategyMustRunAs means that container must have FSGroup of X applied.
FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs"
// FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels.
FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny"
)
// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.
type SupplementalGroupsStrategyOptions struct {
// Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.
// +optional
Rule SupplementalGroupsStrategyType
// Ranges are the allowed ranges of supplemental groups. If you would like to force a single
// supplemental group then supply a single range with the same start and end. Required for MustRunAs.
// +optional
Ranges []IDRange
}
// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental
// groups for a SecurityContext.
type SupplementalGroupsStrategyType string
const (
// SupplementalGroupsStrategyMayRunAs means that container does not need to run with a particular gid.
// However, when gids are specified, they have to fall in the defined range.
SupplementalGroupsStrategyMayRunAs SupplementalGroupsStrategyType = "MayRunAs"
// SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid.
SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs"
// SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid.
SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny"
)
// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses
// for a pod.
type RuntimeClassStrategyOptions struct {
// allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod.
// A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the
// list. An empty list requires the RuntimeClassName field to be unset.
AllowedRuntimeClassNames []string
// defaultRuntimeClassName is the default RuntimeClassName to set on the pod.
// The default MUST be allowed by the allowedRuntimeClassNames list.
// A value of nil does not mutate the Pod.
// +optional
DefaultRuntimeClassName *string
}
// AllowAllRuntimeClassNames can be used as a value for the
// RuntimeClassStrategyOptions.allowedRuntimeClassNames field and means that any runtimeClassName is
// allowed.
const AllowAllRuntimeClassNames = "*"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodSecurityPolicyList is a list of PodSecurityPolicy objects.
type PodSecurityPolicyList struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
Items []PodSecurityPolicy
}

View File

@ -1,548 +0,0 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package policy
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
core "k8s.io/kubernetes/pkg/apis/core"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver.
func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver {
if in == nil {
return nil
}
out := new(AllowedCSIDriver)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume.
func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume {
if in == nil {
return nil
}
out := new(AllowedFlexVolume)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath.
func (in *AllowedHostPath) DeepCopy() *AllowedHostPath {
if in == nil {
return nil
}
out := new(AllowedHostPath)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Eviction) DeepCopyInto(out *Eviction) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.DeleteOptions != nil {
in, out := &in.DeleteOptions, &out.DeleteOptions
*out = new(v1.DeleteOptions)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Eviction.
func (in *Eviction) DeepCopy() *Eviction {
if in == nil {
return nil
}
out := new(Eviction)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Eviction) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) {
*out = *in
if in.Ranges != nil {
in, out := &in.Ranges, &out.Ranges
*out = make([]IDRange, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions.
func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions {
if in == nil {
return nil
}
out := new(FSGroupStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostPortRange) DeepCopyInto(out *HostPortRange) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange.
func (in *HostPortRange) DeepCopy() *HostPortRange {
if in == nil {
return nil
}
out := new(HostPortRange)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IDRange) DeepCopyInto(out *IDRange) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange.
func (in *IDRange) DeepCopy() *IDRange {
if in == nil {
return nil
}
out := new(IDRange)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodDisruptionBudget) DeepCopyInto(out *PodDisruptionBudget) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudget.
func (in *PodDisruptionBudget) DeepCopy() *PodDisruptionBudget {
if in == nil {
return nil
}
out := new(PodDisruptionBudget)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodDisruptionBudget) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodDisruptionBudgetList) DeepCopyInto(out *PodDisruptionBudgetList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PodDisruptionBudget, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetList.
func (in *PodDisruptionBudgetList) DeepCopy() *PodDisruptionBudgetList {
if in == nil {
return nil
}
out := new(PodDisruptionBudgetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodDisruptionBudgetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodDisruptionBudgetSpec) DeepCopyInto(out *PodDisruptionBudgetSpec) {
*out = *in
if in.MinAvailable != nil {
in, out := &in.MinAvailable, &out.MinAvailable
*out = new(intstr.IntOrString)
**out = **in
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
*out = new(intstr.IntOrString)
**out = **in
}
if in.UnhealthyPodEvictionPolicy != nil {
in, out := &in.UnhealthyPodEvictionPolicy, &out.UnhealthyPodEvictionPolicy
*out = new(UnhealthyPodEvictionPolicyType)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetSpec.
func (in *PodDisruptionBudgetSpec) DeepCopy() *PodDisruptionBudgetSpec {
if in == nil {
return nil
}
out := new(PodDisruptionBudgetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodDisruptionBudgetStatus) DeepCopyInto(out *PodDisruptionBudgetStatus) {
*out = *in
if in.DisruptedPods != nil {
in, out := &in.DisruptedPods, &out.DisruptedPods
*out = make(map[string]v1.Time, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetStatus.
func (in *PodDisruptionBudgetStatus) DeepCopy() *PodDisruptionBudgetStatus {
if in == nil {
return nil
}
out := new(PodDisruptionBudgetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy.
func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy {
if in == nil {
return nil
}
out := new(PodSecurityPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PodSecurityPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList.
func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList {
if in == nil {
return nil
}
out := new(PodSecurityPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) {
*out = *in
if in.DefaultAddCapabilities != nil {
in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities
*out = make([]core.Capability, len(*in))
copy(*out, *in)
}
if in.RequiredDropCapabilities != nil {
in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities
*out = make([]core.Capability, len(*in))
copy(*out, *in)
}
if in.AllowedCapabilities != nil {
in, out := &in.AllowedCapabilities, &out.AllowedCapabilities
*out = make([]core.Capability, len(*in))
copy(*out, *in)
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]FSType, len(*in))
copy(*out, *in)
}
if in.HostPorts != nil {
in, out := &in.HostPorts, &out.HostPorts
*out = make([]HostPortRange, len(*in))
copy(*out, *in)
}
in.SELinux.DeepCopyInto(&out.SELinux)
in.RunAsUser.DeepCopyInto(&out.RunAsUser)
if in.RunAsGroup != nil {
in, out := &in.RunAsGroup, &out.RunAsGroup
*out = new(RunAsGroupStrategyOptions)
(*in).DeepCopyInto(*out)
}
in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups)
in.FSGroup.DeepCopyInto(&out.FSGroup)
if in.DefaultAllowPrivilegeEscalation != nil {
in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation
*out = new(bool)
**out = **in
}
if in.AllowedHostPaths != nil {
in, out := &in.AllowedHostPaths, &out.AllowedHostPaths
*out = make([]AllowedHostPath, len(*in))
copy(*out, *in)
}
if in.AllowedFlexVolumes != nil {
in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes
*out = make([]AllowedFlexVolume, len(*in))
copy(*out, *in)
}
if in.AllowedCSIDrivers != nil {
in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers
*out = make([]AllowedCSIDriver, len(*in))
copy(*out, *in)
}
if in.AllowedUnsafeSysctls != nil {
in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ForbiddenSysctls != nil {
in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.AllowedProcMountTypes != nil {
in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes
*out = make([]core.ProcMountType, len(*in))
copy(*out, *in)
}
if in.RuntimeClass != nil {
in, out := &in.RuntimeClass, &out.RuntimeClass
*out = new(RuntimeClassStrategyOptions)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec.
func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec {
if in == nil {
return nil
}
out := new(PodSecurityPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) {
*out = *in
if in.Ranges != nil {
in, out := &in.Ranges, &out.Ranges
*out = make([]IDRange, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions.
func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions {
if in == nil {
return nil
}
out := new(RunAsGroupStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) {
*out = *in
if in.Ranges != nil {
in, out := &in.Ranges, &out.Ranges
*out = make([]IDRange, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions.
func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions {
if in == nil {
return nil
}
out := new(RunAsUserStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) {
*out = *in
if in.AllowedRuntimeClassNames != nil {
in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.DefaultRuntimeClassName != nil {
in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions.
func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions {
if in == nil {
return nil
}
out := new(RuntimeClassStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) {
*out = *in
if in.SELinuxOptions != nil {
in, out := &in.SELinuxOptions, &out.SELinuxOptions
*out = new(core.SELinuxOptions)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions.
func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions {
if in == nil {
return nil
}
out := new(SELinuxStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) {
*out = *in
if in.Ranges != nil {
in, out := &in.Ranges, &out.Ranges
*out = make([]IDRange, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions.
func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions {
if in == nil {
return nil
}
out := new(SupplementalGroupsStrategyOptions)
in.DeepCopyInto(out)
return out
}