mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update kubernetes to 1.28.0 in main
updating kubernetes to 1.28.0 in the main repo. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
b2fdc269c3
commit
ff3e84ad67
1
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/OWNERS
generated
vendored
@ -2,7 +2,6 @@
|
||||
|
||||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
|
92
vendor/k8s.io/kubernetes/pkg/apis/batch/types.go
generated
vendored
92
vendor/k8s.io/kubernetes/pkg/apis/batch/types.go
generated
vendored
@ -44,6 +44,13 @@ const (
|
||||
JobNameLabel = labelPrefix + LegacyJobNameLabel
|
||||
// Controller UID is used for selectors and labels for jobs
|
||||
ControllerUidLabel = labelPrefix + LegacyControllerUidLabel
|
||||
// Annotation indicating the number of failures for the index corresponding
|
||||
// to the pod, which are counted towards the backoff limit.
|
||||
JobIndexFailureCountAnnotation = labelPrefix + "job-index-failure-count"
|
||||
// Annotation indicating the number of failures for the index corresponding
|
||||
// to the pod, which don't count towards the backoff limit, according to the
|
||||
// pod failure policy. When the annotation is absent zero is implied.
|
||||
JobIndexIgnoredFailureCountAnnotation = labelPrefix + "job-index-ignored-failure-count"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@ -119,6 +126,12 @@ const (
|
||||
// pod's job as Failed and terminate all running pods.
|
||||
PodFailurePolicyActionFailJob PodFailurePolicyAction = "FailJob"
|
||||
|
||||
// This is an action which might be taken on a pod failure - mark the
|
||||
// Job's index as failed to avoid restarts within this index. This action
|
||||
// can only be used when backoffLimitPerIndex is set.
|
||||
// This value is alpha-level.
|
||||
PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex"
|
||||
|
||||
// This is an action which might be taken on a pod failure - the counter towards
|
||||
// .backoffLimit, represented by the job's .status.failed field, is not
|
||||
// incremented and a replacement pod is created.
|
||||
@ -138,6 +151,19 @@ const (
|
||||
PodFailurePolicyOnExitCodesOpNotIn PodFailurePolicyOnExitCodesOperator = "NotIn"
|
||||
)
|
||||
|
||||
// PodReplacementPolicy specifies the policy for creating pod replacements.
|
||||
// +enum
|
||||
type PodReplacementPolicy string
|
||||
|
||||
const (
|
||||
// TerminatingOrFailed means that we recreate pods
|
||||
// when they are terminating (has a metadata.deletionTimestamp) or failed.
|
||||
TerminatingOrFailed PodReplacementPolicy = "TerminatingOrFailed"
|
||||
//Failed means to wait until a previously created Pod is fully terminated (has phase
|
||||
//Failed or Succeeded) before creating a replacement Pod.
|
||||
Failed PodReplacementPolicy = "Failed"
|
||||
)
|
||||
|
||||
// PodFailurePolicyOnExitCodesRequirement describes the requirement for handling
|
||||
// a failed pod based on its container exit codes. In particular, it lookups the
|
||||
// .state.terminated.exitCode for each app container and init container status,
|
||||
@ -195,6 +221,10 @@ type PodFailurePolicyRule struct {
|
||||
//
|
||||
// - FailJob: indicates that the pod's job is marked as Failed and all
|
||||
// running pods are terminated.
|
||||
// - FailIndex: indicates that the pod's index is marked as Failed and will
|
||||
// not be restarted.
|
||||
// This value is alpha-level. It can be used when the
|
||||
// `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).
|
||||
// - Ignore: indicates that the counter towards the .backoffLimit is not
|
||||
// incremented and a replacement pod is created.
|
||||
// - Count: indicates that the pod is handled in the default way - the
|
||||
@ -251,8 +281,8 @@ type JobSpec struct {
|
||||
// checked against the backoffLimit. This field cannot be used in combination
|
||||
// with .spec.podTemplate.spec.restartPolicy=OnFailure.
|
||||
//
|
||||
// This field is alpha-level. To use this field, you must enable the
|
||||
// `JobPodFailurePolicy` feature gate (disabled by default).
|
||||
// This field is beta-level. It can be used when the `JobPodFailurePolicy`
|
||||
// feature gate is enabled (enabled by default).
|
||||
// +optional
|
||||
PodFailurePolicy *PodFailurePolicy
|
||||
|
||||
@ -269,6 +299,30 @@ type JobSpec struct {
|
||||
// +optional
|
||||
BackoffLimit *int32
|
||||
|
||||
// Specifies the limit for the number of retries within an
|
||||
// index before marking this index as failed. When enabled the number of
|
||||
// failures per index is kept in the pod's
|
||||
// batch.kubernetes.io/job-index-failure-count annotation. It can only
|
||||
// be set when Job's completionMode=Indexed, and the Pod's restart
|
||||
// policy is Never. The field is immutable.
|
||||
// This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (disabled by default).
|
||||
// +optional
|
||||
BackoffLimitPerIndex *int32
|
||||
|
||||
// Specifies the maximal number of failed indexes before marking the Job as
|
||||
// failed, when backoffLimitPerIndex is set. Once the number of failed
|
||||
// indexes exceeds this number the entire Job is marked as Failed and its
|
||||
// execution is terminated. When left as null the job continues execution of
|
||||
// all of its indexes and is marked with the `Complete` Job condition.
|
||||
// It can only be specified when backoffLimitPerIndex is set.
|
||||
// It can be null or up to completions. It is required and must be
|
||||
// less than or equal to 10^4 when is completions greater than 10^5.
|
||||
// This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (disabled by default).
|
||||
// +optional
|
||||
MaxFailedIndexes *int32
|
||||
|
||||
// TODO enabled it when https://github.com/kubernetes/kubernetes/issues/28486 has been fixed
|
||||
// Optional number of failed pods to retain.
|
||||
// +optional
|
||||
@ -340,6 +394,19 @@ type JobSpec struct {
|
||||
//
|
||||
// +optional
|
||||
Suspend *bool
|
||||
|
||||
// podReplacementPolicy specifies when to create replacement Pods.
|
||||
// Possible values are:
|
||||
// - TerminatingOrFailed means that we recreate pods
|
||||
// when they are terminating (has a metadata.deletionTimestamp) or failed.
|
||||
// - Failed means to wait until a previously created Pod is fully terminated (has phase
|
||||
// Failed or Succeeded) before creating a replacement Pod.
|
||||
//
|
||||
// When using podFailurePolicy, Failed is the the only allowed value.
|
||||
// TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
|
||||
// This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field.
|
||||
// +optional
|
||||
PodReplacementPolicy *PodReplacementPolicy
|
||||
}
|
||||
|
||||
// JobStatus represents the current state of a Job.
|
||||
@ -372,6 +439,14 @@ type JobStatus struct {
|
||||
// +optional
|
||||
Active int32
|
||||
|
||||
// The number of pods which are terminating (in phase Pending or Running
|
||||
// and have a deletionTimestamp).
|
||||
//
|
||||
// This field is alpha-level. The job controller populates the field when
|
||||
// the feature gate JobPodReplacementPolicy is enabled (disabled by default).
|
||||
// +optional
|
||||
Terminating *int32
|
||||
|
||||
// The number of active pods which have a Ready condition.
|
||||
//
|
||||
// This field is beta-level. The job controller populates the field when
|
||||
@ -397,6 +472,19 @@ type JobStatus struct {
|
||||
// +optional
|
||||
CompletedIndexes string
|
||||
|
||||
// FailedIndexes holds the failed indexes when backoffLimitPerIndex=true.
|
||||
// The indexes are represented in the text format analogous as for the
|
||||
// `completedIndexes` field, ie. they are kept as decimal integers
|
||||
// separated by commas. The numbers are listed in increasing order. Three or
|
||||
// more consecutive numbers are compressed and represented by the first and
|
||||
// last element of the series, separated by a hyphen.
|
||||
// For example, if the failed indexes are 1, 3, 4, 5 and 7, they are
|
||||
// represented as "1,3-5,7".
|
||||
// This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
|
||||
// feature gate is enabled (disabled by default).
|
||||
// +optional
|
||||
FailedIndexes *string
|
||||
|
||||
// uncountedTerminatedPods holds the UIDs of Pods that have terminated but
|
||||
// the job controller hasn't yet accounted for in the status counters.
|
||||
//
|
||||
|
25
vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go
generated
vendored
25
vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go
generated
vendored
@ -267,6 +267,16 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.BackoffLimitPerIndex != nil {
|
||||
in, out := &in.BackoffLimitPerIndex, &out.BackoffLimitPerIndex
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxFailedIndexes != nil {
|
||||
in, out := &in.MaxFailedIndexes, &out.MaxFailedIndexes
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = new(v1.LabelSelector)
|
||||
@ -293,6 +303,11 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.PodReplacementPolicy != nil {
|
||||
in, out := &in.PodReplacementPolicy, &out.PodReplacementPolicy
|
||||
*out = new(PodReplacementPolicy)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -324,11 +339,21 @@ func (in *JobStatus) DeepCopyInto(out *JobStatus) {
|
||||
in, out := &in.CompletionTime, &out.CompletionTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.Terminating != nil {
|
||||
in, out := &in.Terminating, &out.Terminating
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.Ready != nil {
|
||||
in, out := &in.Ready, &out.Ready
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.FailedIndexes != nil {
|
||||
in, out := &in.FailedIndexes, &out.FailedIndexes
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.UncountedTerminatedPods != nil {
|
||||
in, out := &in.UncountedTerminatedPods, &out.UncountedTerminatedPods
|
||||
*out = new(UncountedTerminatedPods)
|
||||
|
57
vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go
generated
vendored
57
vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go
generated
vendored
@ -360,6 +360,28 @@ func ContainsAccessMode(modes []core.PersistentVolumeAccessMode, mode core.Persi
|
||||
return false
|
||||
}
|
||||
|
||||
func ClaimContainsAllocatedResources(pvc *core.PersistentVolumeClaim) bool {
|
||||
if pvc == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if pvc.Status.AllocatedResources != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func ClaimContainsAllocatedResourceStatus(pvc *core.PersistentVolumeClaim) bool {
|
||||
if pvc == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if pvc.Status.AllocatedResourceStatuses != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations
|
||||
// and converts it to the []Toleration type in core.
|
||||
func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]core.Toleration, error) {
|
||||
@ -453,41 +475,6 @@ func PersistentVolumeClaimHasClass(claim *core.PersistentVolumeClaim) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func toResourceNames(resources core.ResourceList) []core.ResourceName {
|
||||
result := []core.ResourceName{}
|
||||
for resourceName := range resources {
|
||||
result = append(result, resourceName)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func toSet(resourceNames []core.ResourceName) sets.String {
|
||||
result := sets.NewString()
|
||||
for _, resourceName := range resourceNames {
|
||||
result.Insert(string(resourceName))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// toContainerResourcesSet returns a set of resources names in container resource requirements
|
||||
func toContainerResourcesSet(ctr *core.Container) sets.String {
|
||||
resourceNames := toResourceNames(ctr.Resources.Requests)
|
||||
resourceNames = append(resourceNames, toResourceNames(ctr.Resources.Limits)...)
|
||||
return toSet(resourceNames)
|
||||
}
|
||||
|
||||
// ToPodResourcesSet returns a set of resource names in all containers in a pod.
|
||||
func ToPodResourcesSet(podSpec *core.PodSpec) sets.String {
|
||||
result := sets.NewString()
|
||||
for i := range podSpec.InitContainers {
|
||||
result = result.Union(toContainerResourcesSet(&podSpec.InitContainers[i]))
|
||||
}
|
||||
for i := range podSpec.Containers {
|
||||
result = result.Union(toContainerResourcesSet(&podSpec.Containers[i]))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// GetDeletionCostFromPodAnnotations returns the integer value of pod-deletion-cost. Returns 0
|
||||
// if not set or the value is invalid.
|
||||
func GetDeletionCostFromPodAnnotations(annotations map[string]string) (int32, error) {
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/apis/core/install/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/core/install/OWNERS
generated
vendored
@ -1,7 +1,6 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- deads2k
|
||||
- caesarxuchao
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go
generated
vendored
@ -84,6 +84,7 @@ func ConvertDownwardAPIFieldLabel(version, label, value string) (string, string,
|
||||
"spec.schedulerName",
|
||||
"status.phase",
|
||||
"status.hostIP",
|
||||
"status.hostIPs",
|
||||
"status.podIP",
|
||||
"status.podIPs":
|
||||
return label, value, nil
|
||||
|
218
vendor/k8s.io/kubernetes/pkg/apis/core/types.go
generated
vendored
218
vendor/k8s.io/kubernetes/pkg/apis/core/types.go
generated
vendored
@ -380,6 +380,12 @@ type PersistentVolumeStatus struct {
|
||||
// Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI
|
||||
// +optional
|
||||
Reason string
|
||||
// LastPhaseTransitionTime is the time the phase transitioned from one to another
|
||||
// and automatically resets to current time everytime a volume phase transitions.
|
||||
// This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature.
|
||||
// +featureGate=PersistentVolumeLastPhaseTransitionTime
|
||||
// +optional
|
||||
LastPhaseTransitionTime *metav1.Time
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@ -515,23 +521,27 @@ const (
|
||||
)
|
||||
|
||||
// +enum
|
||||
type PersistentVolumeClaimResizeStatus string
|
||||
// When a controller receives persistentvolume claim update with ClaimResourceStatus for a resource
|
||||
// that it does not recognizes, then it should ignore that update and let other controllers
|
||||
// handle it.
|
||||
type ClaimResourceStatus string
|
||||
|
||||
const (
|
||||
// When expansion is complete, the empty string is set by resize controller or kubelet.
|
||||
PersistentVolumeClaimNoExpansionInProgress PersistentVolumeClaimResizeStatus = ""
|
||||
// State set when resize controller starts expanding the volume in control-plane
|
||||
PersistentVolumeClaimControllerExpansionInProgress PersistentVolumeClaimResizeStatus = "ControllerExpansionInProgress"
|
||||
// State set when expansion has failed in resize controller with a terminal error.
|
||||
// Transient errors such as timeout should not set this status and should leave ResizeStatus
|
||||
// State set when resize controller starts resizing the volume in control-plane
|
||||
PersistentVolumeClaimControllerResizeInProgress ClaimResourceStatus = "ControllerResizeInProgress"
|
||||
|
||||
// State set when resize has failed in resize controller with a terminal error.
|
||||
// Transient errors such as timeout should not set this status and should leave allocatedResourceStatus
|
||||
// unmodified, so as resize controller can resume the volume expansion.
|
||||
PersistentVolumeClaimControllerExpansionFailed PersistentVolumeClaimResizeStatus = "ControllerExpansionFailed"
|
||||
// State set when resize controller has finished expanding the volume but further expansion is needed on the node.
|
||||
PersistentVolumeClaimNodeExpansionPending PersistentVolumeClaimResizeStatus = "NodeExpansionPending"
|
||||
// State set when kubelet starts expanding the volume.
|
||||
PersistentVolumeClaimNodeExpansionInProgress PersistentVolumeClaimResizeStatus = "NodeExpansionInProgress"
|
||||
// State set when expansion has failed in kubelet with a terminal error. Transient errors don't set NodeExpansionFailed.
|
||||
PersistentVolumeClaimNodeExpansionFailed PersistentVolumeClaimResizeStatus = "NodeExpansionFailed"
|
||||
PersistentVolumeClaimControllerResizeFailed ClaimResourceStatus = "ControllerResizeFailed"
|
||||
|
||||
// State set when resize controller has finished resizing the volume but further resizing of volume
|
||||
// is needed on the node.
|
||||
PersistentVolumeClaimNodeResizePending ClaimResourceStatus = "NodeResizePending"
|
||||
// State set when kubelet starts resizing the volume.
|
||||
PersistentVolumeClaimNodeResizeInProgress ClaimResourceStatus = "NodeResizeInProgress"
|
||||
// State set when resizing has failed in kubelet with a terminal error. Transient errors don't set NodeResizeFailed
|
||||
PersistentVolumeClaimNodeResizeFailed ClaimResourceStatus = "NodeResizeFailed"
|
||||
)
|
||||
|
||||
// PersistentVolumeClaimCondition represents the current condition of PV claim
|
||||
@ -561,24 +571,70 @@ type PersistentVolumeClaimStatus struct {
|
||||
Capacity ResourceList
|
||||
// +optional
|
||||
Conditions []PersistentVolumeClaimCondition
|
||||
// The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may
|
||||
// be larger than the actual capacity when a volume expansion operation is requested.
|
||||
// AllocatedResources tracks the resources allocated to a PVC including its capacity.
|
||||
// Key names follow standard Kubernetes label syntax. Valid values are either:
|
||||
// * Un-prefixed keys:
|
||||
// - storage - the capacity of the volume.
|
||||
// * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource"
|
||||
// Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered
|
||||
// reserved and hence may not be used.
|
||||
//
|
||||
// Capacity reported here may be larger than the actual capacity when a volume expansion operation
|
||||
// is requested.
|
||||
// For storage quota, the larger value from allocatedResources and PVC.spec.resources is used.
|
||||
// If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation.
|
||||
// If a volume expansion capacity request is lowered, allocatedResources is only
|
||||
// lowered if there are no expansion operations in progress and if the actual volume capacity
|
||||
// is equal or lower than the requested capacity.
|
||||
//
|
||||
// A controller that receives PVC update with previously unknown resourceName
|
||||
// should ignore the update for the purpose it was designed. For example - a controller that
|
||||
// only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
|
||||
// resources associated with PVC.
|
||||
//
|
||||
// This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
|
||||
// +featureGate=RecoverVolumeExpansionFailure
|
||||
// +optional
|
||||
AllocatedResources ResourceList
|
||||
// ResizeStatus stores status of resize operation.
|
||||
// ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty
|
||||
// string by resize controller or kubelet.
|
||||
// AllocatedResourceStatuses stores status of resource being resized for the given PVC.
|
||||
// Key names follow standard Kubernetes label syntax. Valid values are either:
|
||||
// * Un-prefixed keys:
|
||||
// - storage - the capacity of the volume.
|
||||
// * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource"
|
||||
// Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered
|
||||
// reserved and hence may not be used.
|
||||
//
|
||||
// ClaimResourceStatus can be in any of following states:
|
||||
// - ControllerResizeInProgress:
|
||||
// State set when resize controller starts resizing the volume in control-plane.
|
||||
// - ControllerResizeFailed:
|
||||
// State set when resize has failed in resize controller with a terminal error.
|
||||
// - NodeResizePending:
|
||||
// State set when resize controller has finished resizing the volume but further resizing of
|
||||
// volume is needed on the node.
|
||||
// - NodeResizeInProgress:
|
||||
// State set when kubelet starts resizing the volume.
|
||||
// - NodeResizeFailed:
|
||||
// State set when resizing has failed in kubelet with a terminal error. Transient errors don't set
|
||||
// NodeResizeFailed.
|
||||
// For example: if expanding a PVC for more capacity - this field can be one of the following states:
|
||||
// - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeInProgress"
|
||||
// - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeFailed"
|
||||
// - pvc.status.allocatedResourceStatus['storage'] = "NodeResizePending"
|
||||
// - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeInProgress"
|
||||
// - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeFailed"
|
||||
// When this field is not set, it means that no resize operation is in progress for the given PVC.
|
||||
//
|
||||
// A controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus
|
||||
// should ignore the update for the purpose it was designed. For example - a controller that
|
||||
// only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
|
||||
// resources associated with PVC.
|
||||
//
|
||||
// This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
|
||||
// +featureGate=RecoverVolumeExpansionFailure
|
||||
// +mapType=granular
|
||||
// +optional
|
||||
ResizeStatus *PersistentVolumeClaimResizeStatus
|
||||
AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus
|
||||
}
|
||||
|
||||
// PersistentVolumeAccessMode defines various access modes for PV.
|
||||
@ -2279,6 +2335,24 @@ type Container struct {
|
||||
// +featureGate=InPlacePodVerticalScaling
|
||||
// +optional
|
||||
ResizePolicy []ContainerResizePolicy
|
||||
// RestartPolicy defines the restart behavior of individual containers in a pod.
|
||||
// This field may only be set for init containers, and the only allowed value is "Always".
|
||||
// For non-init containers or when this field is not specified,
|
||||
// the restart behavior is defined by the Pod's restart policy and the container type.
|
||||
// Setting the RestartPolicy as "Always" for the init container will have the following effect:
|
||||
// this init container will be continually restarted on
|
||||
// exit until all regular containers have terminated. Once all regular
|
||||
// containers have completed, all init containers with restartPolicy "Always"
|
||||
// will be shut down. This lifecycle differs from normal init containers and
|
||||
// is often referred to as a "sidecar" container. Although this init
|
||||
// container still starts in the init container sequence, it does not wait
|
||||
// for the container to complete before proceeding to the next init
|
||||
// container. Instead, the next init container starts immediately after this
|
||||
// init container is started, or after any startupProbe has successfully
|
||||
// completed.
|
||||
// +featureGate=SidecarContainers
|
||||
// +optional
|
||||
RestartPolicy *ContainerRestartPolicy
|
||||
// +optional
|
||||
VolumeMounts []VolumeMount
|
||||
// volumeDevices is the list of block devices to be used by the container.
|
||||
@ -2597,6 +2671,14 @@ const (
|
||||
RestartPolicyNever RestartPolicy = "Never"
|
||||
)
|
||||
|
||||
// ContainerRestartPolicy is the restart policy for a single container.
|
||||
// This may only be set for init containers and only allowed value is "Always".
|
||||
type ContainerRestartPolicy string
|
||||
|
||||
const (
|
||||
ContainerRestartPolicyAlways ContainerRestartPolicy = "Always"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PodList is a list of Pods.
|
||||
@ -3184,15 +3266,9 @@ type ClaimSource struct {
|
||||
//
|
||||
// The template will be used to create a new ResourceClaim, which will
|
||||
// be bound to this pod. When this pod is deleted, the ResourceClaim
|
||||
// will also be deleted. The name of the ResourceClaim will be <pod
|
||||
// name>-<resource name>, where <resource name> is the
|
||||
// PodResourceClaim.Name. Pod validation will reject the pod if the
|
||||
// concatenated name is not valid for a ResourceClaim (e.g. too long).
|
||||
//
|
||||
// An existing ResourceClaim with that name that is not owned by the
|
||||
// pod will not be used for the pod to avoid using an unrelated
|
||||
// resource by mistake. Scheduling and pod startup are then blocked
|
||||
// until the unrelated ResourceClaim is removed.
|
||||
// will also be deleted. The pod name and resource name, along with a
|
||||
// generated component, will be used to form a unique name for the
|
||||
// ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.
|
||||
//
|
||||
// This field is immutable and no changes will be made to the
|
||||
// corresponding ResourceClaim by the control plane after creating the
|
||||
@ -3200,6 +3276,22 @@ type ClaimSource struct {
|
||||
ResourceClaimTemplateName *string
|
||||
}
|
||||
|
||||
// PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim
|
||||
// which references a ResourceClaimTemplate. It stores the generated name for
|
||||
// the corresponding ResourceClaim.
|
||||
type PodResourceClaimStatus struct {
|
||||
// Name uniquely identifies this resource claim inside the pod.
|
||||
// This must match the name of an entry in pod.spec.resourceClaims,
|
||||
// which implies that the string must be a DNS_LABEL.
|
||||
Name string
|
||||
|
||||
// ResourceClaimName is the name of the ResourceClaim that was
|
||||
// generated for the Pod in the namespace of the Pod. It this is
|
||||
// unset, then generating a ResourceClaim was not necessary. The
|
||||
// pod.spec.resourceClaims entry can be ignored in this case.
|
||||
ResourceClaimName *string
|
||||
}
|
||||
|
||||
// OSName is the set of OS'es that can be used in OS.
|
||||
type OSName string
|
||||
|
||||
@ -3446,12 +3538,15 @@ type PodDNSConfigOption struct {
|
||||
Value *string
|
||||
}
|
||||
|
||||
// PodIP represents the IP address of a pod.
|
||||
// IP address information. Each entry includes:
|
||||
//
|
||||
// IP: An IP address allocated to the pod. Routable at least within
|
||||
// the cluster.
|
||||
// PodIP represents a single IP address allocated to the pod.
|
||||
type PodIP struct {
|
||||
// IP is the IP address assigned to the pod
|
||||
IP string
|
||||
}
|
||||
|
||||
// HostIP represents a single IP address allocated to the host.
|
||||
type HostIP struct {
|
||||
// IP is the IP address assigned to the host
|
||||
IP string
|
||||
}
|
||||
|
||||
@ -3505,6 +3600,13 @@ type EphemeralContainerCommon struct {
|
||||
// +featureGate=InPlacePodVerticalScaling
|
||||
// +optional
|
||||
ResizePolicy []ContainerResizePolicy
|
||||
// Restart policy for the container to manage the restart behavior of each
|
||||
// container within a pod.
|
||||
// This may only be set for init containers. You cannot set this field on
|
||||
// ephemeral containers.
|
||||
// +featureGate=SidecarContainers
|
||||
// +optional
|
||||
RestartPolicy *ContainerRestartPolicy
|
||||
// Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
|
||||
// +optional
|
||||
VolumeMounts []VolumeMount
|
||||
@ -3594,9 +3696,21 @@ type PodStatus struct {
|
||||
// give the resources on this node to a higher priority pod that is created after preemption.
|
||||
// +optional
|
||||
NominatedNodeName string
|
||||
|
||||
// HostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet.
|
||||
// A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will
|
||||
// not be updated even if there is a node is assigned to pod
|
||||
// +optional
|
||||
HostIP string
|
||||
|
||||
// HostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must
|
||||
// match the hostIP field. This list is empty if the pod has not started yet.
|
||||
// A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will
|
||||
// not be updated even if there is a node is assigned to this pod.
|
||||
// match the hostIP field. This list is empty if no IPs have been allocated yet.
|
||||
// +optional
|
||||
HostIPs []HostIP
|
||||
|
||||
// PodIPs holds all of the known IP addresses allocated to the pod. Pods may be assigned AT MOST
|
||||
// one value for each of IPv4 and IPv6.
|
||||
// +optional
|
||||
@ -3628,6 +3742,11 @@ type PodStatus struct {
|
||||
// +featureGate=InPlacePodVerticalScaling
|
||||
// +optional
|
||||
Resize PodResizeStatus
|
||||
|
||||
// Status of resource claims.
|
||||
// +featureGate=DynamicResourceAllocation
|
||||
// +optional
|
||||
ResourceClaimStatuses []PodResourceClaimStatus
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@ -4083,10 +4202,9 @@ type ServiceSpec struct {
|
||||
// This feature depends on whether the underlying cloud-provider supports specifying
|
||||
// the loadBalancerIP when a load balancer is created.
|
||||
// This field will be ignored if the cloud-provider does not support the feature.
|
||||
// Deprecated: This field was under-specified and its meaning varies across implementations,
|
||||
// and it cannot support dual-stack.
|
||||
// As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available.
|
||||
// This field may be removed in a future API version.
|
||||
// Deprecated: This field was under-specified and its meaning varies across implementations.
|
||||
// Using it is non-portable and it may not support dual-stack.
|
||||
// Users are encouraged to use implementation-specific annotations when available.
|
||||
// +optional
|
||||
LoadBalancerIP string
|
||||
|
||||
@ -4193,6 +4311,8 @@ type ServicePort struct {
|
||||
//
|
||||
// * Kubernetes-defined prefixed names:
|
||||
// * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
|
||||
// * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
|
||||
// * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
|
||||
//
|
||||
// * Other protocols should use implementation-defined prefixed names such as
|
||||
// mycompany.com/my-custom-protocol.
|
||||
@ -4347,10 +4467,19 @@ type EndpointPort struct {
|
||||
Protocol Protocol
|
||||
|
||||
// The application protocol for this port.
|
||||
// This is used as a hint for implementations to offer richer behavior for protocols that they understand.
|
||||
// This field follows standard Kubernetes label syntax.
|
||||
// Un-prefixed names are reserved for IANA standard service names (as per
|
||||
// Valid values are either:
|
||||
//
|
||||
// * Un-prefixed protocol names - reserved for IANA standard service names (as per
|
||||
// RFC-6335 and https://www.iana.org/assignments/service-names).
|
||||
// Non-standard protocols should use prefixed names such as
|
||||
//
|
||||
// * Kubernetes-defined prefixed names:
|
||||
// * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
|
||||
// * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
|
||||
// * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
|
||||
//
|
||||
// * Other protocols should use implementation-defined prefixed names such as
|
||||
// mycompany.com/my-custom-protocol.
|
||||
// +optional
|
||||
AppProtocol *string
|
||||
@ -5803,12 +5932,9 @@ type WindowsSecurityContextOptions struct {
|
||||
RunAsUserName *string
|
||||
|
||||
// HostProcess determines if a container should be run as a 'Host Process' container.
|
||||
// This field is alpha-level and will only be honored by components that enable the
|
||||
// WindowsHostProcessContainers feature flag. Setting this field without the feature
|
||||
// flag will result in errors when validating the Pod. All of a Pod's containers must
|
||||
// have the same effective HostProcess value (it is not allowed to have a mix of HostProcess
|
||||
// containers and non-HostProcess containers). In addition, if HostProcess is true
|
||||
// then HostNetwork must also be set to true.
|
||||
// All of a Pod's containers must have the same effective HostProcess value
|
||||
// (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
|
||||
// In addition, if HostProcess is true then HostNetwork must also be set to true.
|
||||
// +optional
|
||||
HostProcess *bool
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/core/v1/OWNERS
generated
vendored
@ -2,7 +2,6 @@
|
||||
|
||||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go
generated
vendored
@ -42,6 +42,7 @@ func addConversionFuncs(scheme *runtime.Scheme) error {
|
||||
"spec.restartPolicy",
|
||||
"spec.schedulerName",
|
||||
"spec.serviceAccountName",
|
||||
"spec.hostNetwork",
|
||||
"status.phase",
|
||||
"status.podIP",
|
||||
"status.podIPs",
|
||||
|
17
vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go
generated
vendored
@ -118,8 +118,8 @@ func SetDefaults_Service(obj *v1.Service) {
|
||||
if sp.Protocol == "" {
|
||||
sp.Protocol = v1.ProtocolTCP
|
||||
}
|
||||
if sp.TargetPort == intstr.FromInt(0) || sp.TargetPort == intstr.FromString("") {
|
||||
sp.TargetPort = intstr.FromInt(int(sp.Port))
|
||||
if sp.TargetPort == intstr.FromInt32(0) || sp.TargetPort == intstr.FromString("") {
|
||||
sp.TargetPort = intstr.FromInt32(sp.Port)
|
||||
}
|
||||
}
|
||||
// Defaults ExternalTrafficPolicy field for NodePort / LoadBalancer service
|
||||
@ -199,6 +199,11 @@ func SetDefaults_Pod(obj *v1.Pod) {
|
||||
enableServiceLinks := v1.DefaultEnableServiceLinks
|
||||
obj.Spec.EnableServiceLinks = &enableServiceLinks
|
||||
}
|
||||
|
||||
if obj.Spec.HostNetwork {
|
||||
defaultHostNetworkPorts(&obj.Spec.Containers)
|
||||
defaultHostNetworkPorts(&obj.Spec.InitContainers)
|
||||
}
|
||||
}
|
||||
func SetDefaults_PodSpec(obj *v1.PodSpec) {
|
||||
// New fields added here will break upgrade tests:
|
||||
@ -211,9 +216,11 @@ func SetDefaults_PodSpec(obj *v1.PodSpec) {
|
||||
if obj.RestartPolicy == "" {
|
||||
obj.RestartPolicy = v1.RestartPolicyAlways
|
||||
}
|
||||
if obj.HostNetwork {
|
||||
defaultHostNetworkPorts(&obj.Containers)
|
||||
defaultHostNetworkPorts(&obj.InitContainers)
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DefaultHostNetworkHostPortsInPodTemplates) {
|
||||
if obj.HostNetwork {
|
||||
defaultHostNetworkPorts(&obj.Containers)
|
||||
defaultHostNetworkPorts(&obj.InitContainers)
|
||||
}
|
||||
}
|
||||
if obj.SecurityContext == nil {
|
||||
obj.SecurityContext = &v1.PodSecurityContext{}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go
generated
vendored
@ -315,12 +315,6 @@ func AddOrUpdateTolerationInPodSpec(spec *v1.PodSpec, toleration *v1.Toleration)
|
||||
return true
|
||||
}
|
||||
|
||||
// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list.
|
||||
// Returns true if something was updated, false otherwise.
|
||||
func AddOrUpdateTolerationInPod(pod *v1.Pod, toleration *v1.Toleration) bool {
|
||||
return AddOrUpdateTolerationInPodSpec(&pod.Spec, toleration)
|
||||
}
|
||||
|
||||
// GetMatchingTolerations returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise.
|
||||
func GetMatchingTolerations(taints []v1.Taint, tolerations []v1.Toleration) (bool, []v1.Toleration) {
|
||||
if len(taints) == 0 {
|
||||
|
76
vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go
generated
vendored
76
vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go
generated
vendored
@ -732,6 +732,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.HostIP)(nil), (*core.HostIP)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_HostIP_To_core_HostIP(a.(*v1.HostIP), b.(*core.HostIP), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.HostIP)(nil), (*v1.HostIP)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_HostIP_To_v1_HostIP(a.(*core.HostIP), b.(*v1.HostIP), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.HostPathVolumeSource)(nil), (*core.HostPathVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(a.(*v1.HostPathVolumeSource), b.(*core.HostPathVolumeSource), scope)
|
||||
}); err != nil {
|
||||
@ -1382,6 +1392,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.PodResourceClaimStatus)(nil), (*core.PodResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus(a.(*v1.PodResourceClaimStatus), b.(*core.PodResourceClaimStatus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.PodResourceClaimStatus)(nil), (*v1.PodResourceClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus(a.(*core.PodResourceClaimStatus), b.(*v1.PodResourceClaimStatus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.PodSchedulingGate)(nil), (*core.PodSchedulingGate)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_PodSchedulingGate_To_core_PodSchedulingGate(a.(*v1.PodSchedulingGate), b.(*core.PodSchedulingGate), scope)
|
||||
}); err != nil {
|
||||
@ -2986,6 +3006,7 @@ func autoConvert_v1_Container_To_core_Container(in *v1.Container, out *core.Cont
|
||||
return err
|
||||
}
|
||||
out.ResizePolicy = *(*[]core.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy))
|
||||
out.RestartPolicy = (*core.ContainerRestartPolicy)(unsafe.Pointer(in.RestartPolicy))
|
||||
out.VolumeMounts = *(*[]core.VolumeMount)(unsafe.Pointer(&in.VolumeMounts))
|
||||
out.VolumeDevices = *(*[]core.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices))
|
||||
out.LivenessProbe = (*core.Probe)(unsafe.Pointer(in.LivenessProbe))
|
||||
@ -3020,6 +3041,7 @@ func autoConvert_core_Container_To_v1_Container(in *core.Container, out *v1.Cont
|
||||
return err
|
||||
}
|
||||
out.ResizePolicy = *(*[]v1.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy))
|
||||
out.RestartPolicy = (*v1.ContainerRestartPolicy)(unsafe.Pointer(in.RestartPolicy))
|
||||
out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts))
|
||||
out.VolumeDevices = *(*[]v1.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices))
|
||||
out.LivenessProbe = (*v1.Probe)(unsafe.Pointer(in.LivenessProbe))
|
||||
@ -3602,6 +3624,7 @@ func autoConvert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon(in
|
||||
return err
|
||||
}
|
||||
out.ResizePolicy = *(*[]core.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy))
|
||||
out.RestartPolicy = (*core.ContainerRestartPolicy)(unsafe.Pointer(in.RestartPolicy))
|
||||
out.VolumeMounts = *(*[]core.VolumeMount)(unsafe.Pointer(&in.VolumeMounts))
|
||||
out.VolumeDevices = *(*[]core.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices))
|
||||
out.LivenessProbe = (*core.Probe)(unsafe.Pointer(in.LivenessProbe))
|
||||
@ -3636,6 +3659,7 @@ func autoConvert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon(in
|
||||
return err
|
||||
}
|
||||
out.ResizePolicy = *(*[]v1.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy))
|
||||
out.RestartPolicy = (*v1.ContainerRestartPolicy)(unsafe.Pointer(in.RestartPolicy))
|
||||
out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts))
|
||||
out.VolumeDevices = *(*[]v1.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices))
|
||||
out.LivenessProbe = (*v1.Probe)(unsafe.Pointer(in.LivenessProbe))
|
||||
@ -4119,6 +4143,26 @@ func Convert_core_HostAlias_To_v1_HostAlias(in *core.HostAlias, out *v1.HostAlia
|
||||
return autoConvert_core_HostAlias_To_v1_HostAlias(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_HostIP_To_core_HostIP(in *v1.HostIP, out *core.HostIP, s conversion.Scope) error {
|
||||
out.IP = in.IP
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_HostIP_To_core_HostIP is an autogenerated conversion function.
|
||||
func Convert_v1_HostIP_To_core_HostIP(in *v1.HostIP, out *core.HostIP, s conversion.Scope) error {
|
||||
return autoConvert_v1_HostIP_To_core_HostIP(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_HostIP_To_v1_HostIP(in *core.HostIP, out *v1.HostIP, s conversion.Scope) error {
|
||||
out.IP = in.IP
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_HostIP_To_v1_HostIP is an autogenerated conversion function.
|
||||
func Convert_core_HostIP_To_v1_HostIP(in *core.HostIP, out *v1.HostIP, s conversion.Scope) error {
|
||||
return autoConvert_core_HostIP_To_v1_HostIP(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *core.HostPathVolumeSource, s conversion.Scope) error {
|
||||
out.Path = in.Path
|
||||
out.Type = (*core.HostPathType)(unsafe.Pointer(in.Type))
|
||||
@ -5318,7 +5362,7 @@ func autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimSta
|
||||
out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity))
|
||||
out.Conditions = *(*[]core.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions))
|
||||
out.AllocatedResources = *(*core.ResourceList)(unsafe.Pointer(&in.AllocatedResources))
|
||||
out.ResizeStatus = (*core.PersistentVolumeClaimResizeStatus)(unsafe.Pointer(in.ResizeStatus))
|
||||
out.AllocatedResourceStatuses = *(*map[core.ResourceName]core.ClaimResourceStatus)(unsafe.Pointer(&in.AllocatedResourceStatuses))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -5333,7 +5377,7 @@ func autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimSta
|
||||
out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity))
|
||||
out.Conditions = *(*[]v1.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions))
|
||||
out.AllocatedResources = *(*v1.ResourceList)(unsafe.Pointer(&in.AllocatedResources))
|
||||
out.ResizeStatus = (*v1.PersistentVolumeClaimResizeStatus)(unsafe.Pointer(in.ResizeStatus))
|
||||
out.AllocatedResourceStatuses = *(*map[v1.ResourceName]v1.ClaimResourceStatus)(unsafe.Pointer(&in.AllocatedResourceStatuses))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -5528,6 +5572,7 @@ func autoConvert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in *v1
|
||||
out.Phase = core.PersistentVolumePhase(in.Phase)
|
||||
out.Message = in.Message
|
||||
out.Reason = in.Reason
|
||||
out.LastPhaseTransitionTime = (*metav1.Time)(unsafe.Pointer(in.LastPhaseTransitionTime))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -5540,6 +5585,7 @@ func autoConvert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *co
|
||||
out.Phase = v1.PersistentVolumePhase(in.Phase)
|
||||
out.Message = in.Message
|
||||
out.Reason = in.Reason
|
||||
out.LastPhaseTransitionTime = (*metav1.Time)(unsafe.Pointer(in.LastPhaseTransitionTime))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -6207,6 +6253,28 @@ func Convert_core_PodResourceClaim_To_v1_PodResourceClaim(in *core.PodResourceCl
|
||||
return autoConvert_core_PodResourceClaim_To_v1_PodResourceClaim(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus(in *v1.PodResourceClaimStatus, out *core.PodResourceClaimStatus, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus is an autogenerated conversion function.
|
||||
func Convert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus(in *v1.PodResourceClaimStatus, out *core.PodResourceClaimStatus, s conversion.Scope) error {
|
||||
return autoConvert_v1_PodResourceClaimStatus_To_core_PodResourceClaimStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus(in *core.PodResourceClaimStatus, out *v1.PodResourceClaimStatus, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus is an autogenerated conversion function.
|
||||
func Convert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus(in *core.PodResourceClaimStatus, out *v1.PodResourceClaimStatus, s conversion.Scope) error {
|
||||
return autoConvert_core_PodResourceClaimStatus_To_v1_PodResourceClaimStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_PodSchedulingGate_To_core_PodSchedulingGate(in *v1.PodSchedulingGate, out *core.PodSchedulingGate, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
return nil
|
||||
@ -6413,6 +6481,7 @@ func autoConvert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodS
|
||||
out.Reason = in.Reason
|
||||
out.NominatedNodeName = in.NominatedNodeName
|
||||
out.HostIP = in.HostIP
|
||||
out.HostIPs = *(*[]core.HostIP)(unsafe.Pointer(&in.HostIPs))
|
||||
// WARNING: in.PodIP requires manual conversion: does not exist in peer-type
|
||||
out.PodIPs = *(*[]core.PodIP)(unsafe.Pointer(&in.PodIPs))
|
||||
out.StartTime = (*metav1.Time)(unsafe.Pointer(in.StartTime))
|
||||
@ -6421,6 +6490,7 @@ func autoConvert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodS
|
||||
out.QOSClass = core.PodQOSClass(in.QOSClass)
|
||||
out.EphemeralContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.EphemeralContainerStatuses))
|
||||
out.Resize = core.PodResizeStatus(in.Resize)
|
||||
out.ResourceClaimStatuses = *(*[]core.PodResourceClaimStatus)(unsafe.Pointer(&in.ResourceClaimStatuses))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -6431,6 +6501,7 @@ func autoConvert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodS
|
||||
out.Reason = in.Reason
|
||||
out.NominatedNodeName = in.NominatedNodeName
|
||||
out.HostIP = in.HostIP
|
||||
out.HostIPs = *(*[]v1.HostIP)(unsafe.Pointer(&in.HostIPs))
|
||||
out.PodIPs = *(*[]v1.PodIP)(unsafe.Pointer(&in.PodIPs))
|
||||
out.StartTime = (*metav1.Time)(unsafe.Pointer(in.StartTime))
|
||||
out.QOSClass = v1.PodQOSClass(in.QOSClass)
|
||||
@ -6438,6 +6509,7 @@ func autoConvert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodS
|
||||
out.ContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses))
|
||||
out.EphemeralContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.EphemeralContainerStatuses))
|
||||
out.Resize = v1.PodResizeStatus(in.Resize)
|
||||
out.ResourceClaimStatuses = *(*[]v1.PodResourceClaimStatus)(unsafe.Pointer(&in.ResourceClaimStatuses))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/apis/core/validation/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/core/validation/OWNERS
generated
vendored
@ -2,7 +2,6 @@
|
||||
|
||||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
|
461
vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
generated
vendored
461
vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
generated
vendored
@ -1055,6 +1055,7 @@ func validateDownwardAPIVolumeFile(file *core.DownwardAPIVolumeFile, fldPath *fi
|
||||
if file.ResourceFieldRef != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously"))
|
||||
}
|
||||
allErrs = append(allErrs, validateDownwardAPIHostIPs(file.FieldRef, fldPath.Child("fieldRef"), opts)...)
|
||||
} else if file.ResourceFieldRef != nil {
|
||||
localValidContainerResourceFieldPathPrefixes := validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages
|
||||
allErrs = append(allErrs, validateContainerResourceFieldSelector(file.ResourceFieldRef, &validContainerResourceFieldPathExpressions, &localValidContainerResourceFieldPathPrefixes, fldPath.Child("resourceFieldRef"), true)...)
|
||||
@ -2019,18 +2020,15 @@ type PersistentVolumeClaimSpecValidationOptions struct {
|
||||
AllowReadWriteOncePod bool
|
||||
// Allow users to recover from previously failing expansion operation
|
||||
EnableRecoverFromExpansionFailure bool
|
||||
// Allow assigning StorageClass to unbound PVCs retroactively
|
||||
EnableRetroactiveDefaultStorageClass bool
|
||||
// Allow to validate the label value of the label selector
|
||||
AllowInvalidLabelValueInSelector bool
|
||||
}
|
||||
|
||||
func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolumeClaim) PersistentVolumeClaimSpecValidationOptions {
|
||||
opts := PersistentVolumeClaimSpecValidationOptions{
|
||||
AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
|
||||
EnableRecoverFromExpansionFailure: utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure),
|
||||
EnableRetroactiveDefaultStorageClass: utilfeature.DefaultFeatureGate.Enabled(features.RetroactiveDefaultStorageClass),
|
||||
AllowInvalidLabelValueInSelector: false,
|
||||
AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
|
||||
EnableRecoverFromExpansionFailure: utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure),
|
||||
AllowInvalidLabelValueInSelector: false,
|
||||
}
|
||||
if oldPvc == nil {
|
||||
// If there's no old PVC, use the options based solely on feature enablement
|
||||
@ -2048,6 +2046,11 @@ func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolum
|
||||
// If the old object allowed "ReadWriteOncePod", continue to allow it in the new object
|
||||
opts.AllowReadWriteOncePod = true
|
||||
}
|
||||
|
||||
if helper.ClaimContainsAllocatedResources(oldPvc) ||
|
||||
helper.ClaimContainsAllocatedResourceStatus(oldPvc) {
|
||||
opts.EnableRecoverFromExpansionFailure = true
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
@ -2286,24 +2289,39 @@ func validateStorageClassUpgradeFromAnnotation(oldAnnotations, newAnnotations ma
|
||||
|
||||
// Provide an upgrade path from PVC with nil storage class. We allow update of
|
||||
// StorageClassName only if following four conditions are met at the same time:
|
||||
// 1. RetroactiveDefaultStorageClass FeatureGate is enabled
|
||||
// 2. The new pvc's StorageClassName is not nil
|
||||
// 3. The old pvc's StorageClassName is nil
|
||||
// 4. The old pvc either does not have beta annotation set, or the beta annotation matches new pvc's StorageClassName
|
||||
// 1. The new pvc's StorageClassName is not nil
|
||||
// 2. The old pvc's StorageClassName is nil
|
||||
// 3. The old pvc either does not have beta annotation set, or the beta annotation matches new pvc's StorageClassName
|
||||
func validateStorageClassUpgradeFromNil(oldAnnotations map[string]string, oldScName, newScName *string, opts PersistentVolumeClaimSpecValidationOptions) bool {
|
||||
oldAnnotation, oldAnnotationExist := oldAnnotations[core.BetaStorageClassAnnotation]
|
||||
return opts.EnableRetroactiveDefaultStorageClass /* condition 1 */ &&
|
||||
newScName != nil /* condition 2 */ &&
|
||||
oldScName == nil /* condition 3 */ &&
|
||||
(!oldAnnotationExist || *newScName == oldAnnotation) /* condition 4 */
|
||||
return newScName != nil /* condition 1 */ &&
|
||||
oldScName == nil /* condition 2 */ &&
|
||||
(!oldAnnotationExist || *newScName == oldAnnotation) /* condition 3 */
|
||||
}
|
||||
|
||||
var resizeStatusSet = sets.NewString(string(core.PersistentVolumeClaimNoExpansionInProgress),
|
||||
string(core.PersistentVolumeClaimControllerExpansionInProgress),
|
||||
string(core.PersistentVolumeClaimControllerExpansionFailed),
|
||||
string(core.PersistentVolumeClaimNodeExpansionPending),
|
||||
string(core.PersistentVolumeClaimNodeExpansionInProgress),
|
||||
string(core.PersistentVolumeClaimNodeExpansionFailed))
|
||||
func validatePersistentVolumeClaimResourceKey(value string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for _, msg := range validation.IsQualifiedName(value) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
|
||||
}
|
||||
if len(allErrs) != 0 {
|
||||
return allErrs
|
||||
}
|
||||
// For native resource names such as - either unprefixed names or with kubernetes.io prefix,
|
||||
// only allowed value is storage
|
||||
if helper.IsNativeResource(core.ResourceName(value)) {
|
||||
if core.ResourceName(value) != core.ResourceStorage {
|
||||
return append(allErrs, field.NotSupported(fldPath, value, []string{string(core.ResourceStorage)}))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
var resizeStatusSet = sets.NewString(string(core.PersistentVolumeClaimControllerResizeInProgress),
|
||||
string(core.PersistentVolumeClaimControllerResizeFailed),
|
||||
string(core.PersistentVolumeClaimNodeResizePending),
|
||||
string(core.PersistentVolumeClaimNodeResizeInProgress),
|
||||
string(core.PersistentVolumeClaimNodeResizeFailed))
|
||||
|
||||
// ValidatePersistentVolumeClaimStatusUpdate validates an update to status of a PersistentVolumeClaim
|
||||
func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *core.PersistentVolumeClaim, validationOpts PersistentVolumeClaimSpecValidationOptions) field.ErrorList {
|
||||
@ -2320,19 +2338,26 @@ func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *core.PersistentVo
|
||||
allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...)
|
||||
}
|
||||
if validationOpts.EnableRecoverFromExpansionFailure {
|
||||
resizeStatusPath := field.NewPath("status", "resizeStatus")
|
||||
if newPvc.Status.ResizeStatus != nil {
|
||||
resizeStatus := *newPvc.Status.ResizeStatus
|
||||
if !resizeStatusSet.Has(string(resizeStatus)) {
|
||||
allErrs = append(allErrs, field.NotSupported(resizeStatusPath, resizeStatus, resizeStatusSet.List()))
|
||||
resizeStatusPath := field.NewPath("status", "allocatedResourceStatus")
|
||||
if newPvc.Status.AllocatedResourceStatuses != nil {
|
||||
resizeStatus := newPvc.Status.AllocatedResourceStatuses
|
||||
for k, v := range resizeStatus {
|
||||
if errs := validatePersistentVolumeClaimResourceKey(k.String(), resizeStatusPath); len(errs) > 0 {
|
||||
allErrs = append(allErrs, errs...)
|
||||
}
|
||||
if !resizeStatusSet.Has(string(v)) {
|
||||
allErrs = append(allErrs, field.NotSupported(resizeStatusPath, k, resizeStatusSet.List()))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
allocPath := field.NewPath("status", "allocatedResources")
|
||||
for r, qty := range newPvc.Status.AllocatedResources {
|
||||
if r != core.ResourceStorage {
|
||||
allErrs = append(allErrs, field.NotSupported(allocPath, r, []string{string(core.ResourceStorage)}))
|
||||
if errs := validatePersistentVolumeClaimResourceKey(r.String(), allocPath); len(errs) > 0 {
|
||||
allErrs = append(allErrs, errs...)
|
||||
continue
|
||||
}
|
||||
|
||||
if errs := validateBasicResource(qty, allocPath.Key(string(r))); len(errs) > 0 {
|
||||
allErrs = append(allErrs, errs...)
|
||||
} else {
|
||||
@ -2408,8 +2433,10 @@ var validEnvDownwardAPIFieldPathExpressions = sets.NewString(
|
||||
"spec.nodeName",
|
||||
"spec.serviceAccountName",
|
||||
"status.hostIP",
|
||||
"status.hostIPs",
|
||||
"status.podIP",
|
||||
"status.podIPs")
|
||||
"status.podIPs",
|
||||
)
|
||||
|
||||
var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "limits.ephemeral-storage", "requests.cpu", "requests.memory", "requests.ephemeral-storage")
|
||||
|
||||
@ -2430,6 +2457,7 @@ func validateEnvVarValueFrom(ev core.EnvVar, fldPath *field.Path, opts PodValida
|
||||
if ev.ValueFrom.FieldRef != nil {
|
||||
numSources++
|
||||
allErrs = append(allErrs, validateObjectFieldSelector(ev.ValueFrom.FieldRef, &validEnvDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...)
|
||||
allErrs = append(allErrs, validateDownwardAPIHostIPs(ev.ValueFrom.FieldRef, fldPath.Child("fieldRef"), opts)...)
|
||||
}
|
||||
if ev.ValueFrom.ResourceFieldRef != nil {
|
||||
numSources++
|
||||
@ -2493,6 +2521,16 @@ func validateObjectFieldSelector(fs *core.ObjectFieldSelector, expressions *sets
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateDownwardAPIHostIPs(fieldSel *core.ObjectFieldSelector, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if !opts.AllowHostIPsField {
|
||||
if fieldSel.FieldPath == "status.hostIPs" {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath, "may not be set when feature gate 'PodHostIPs' is not enabled"))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateContainerResourceFieldSelector(fs *core.ResourceFieldSelector, expressions *sets.String, prefixes *sets.String, fldPath *field.Path, volume bool) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
@ -2821,6 +2859,45 @@ func validatePodResourceClaimSource(claimSource core.ClaimSource, fldPath *field
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateLivenessProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if probe == nil {
|
||||
return allErrs
|
||||
}
|
||||
allErrs = append(allErrs, validateProbe(probe, fldPath)...)
|
||||
if probe.SuccessThreshold != 1 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("successThreshold"), probe.SuccessThreshold, "must be 1"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateReadinessProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if probe == nil {
|
||||
return allErrs
|
||||
}
|
||||
allErrs = append(allErrs, validateProbe(probe, fldPath)...)
|
||||
if probe.TerminationGracePeriodSeconds != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("terminationGracePeriodSeconds"), probe.TerminationGracePeriodSeconds, "must not be set for readinessProbes"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateStartupProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if probe == nil {
|
||||
return allErrs
|
||||
}
|
||||
allErrs = append(allErrs, validateProbe(probe, fldPath)...)
|
||||
if probe.SuccessThreshold != 1 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("successThreshold"), probe.SuccessThreshold, "must be 1"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
@ -2840,6 +2917,23 @@ func validateProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList {
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateInitContainerRestartPolicy(restartPolicy *core.ContainerRestartPolicy, fldPath *field.Path) field.ErrorList {
|
||||
var allErrors field.ErrorList
|
||||
|
||||
if restartPolicy == nil {
|
||||
return allErrors
|
||||
}
|
||||
switch *restartPolicy {
|
||||
case core.ContainerRestartPolicyAlways:
|
||||
break
|
||||
default:
|
||||
validValues := []string{string(core.ContainerRestartPolicyAlways)}
|
||||
allErrors = append(allErrors, field.NotSupported(fldPath, *restartPolicy, validValues))
|
||||
}
|
||||
|
||||
return allErrors
|
||||
}
|
||||
|
||||
type commonHandler struct {
|
||||
Exec *core.ExecAction
|
||||
HTTPGet *core.HTTPGetAction
|
||||
@ -2970,7 +3064,7 @@ func validateTCPSocketAction(tcp *core.TCPSocketAction, fldPath *field.Path) fie
|
||||
return ValidatePortNumOrName(tcp.Port, fldPath.Child("port"))
|
||||
}
|
||||
func validateGRPCAction(grpc *core.GRPCAction, fldPath *field.Path) field.ErrorList {
|
||||
return ValidatePortNumOrName(intstr.FromInt(int(grpc.Port)), fldPath.Child("port"))
|
||||
return ValidatePortNumOrName(intstr.FromInt32(grpc.Port), fldPath.Child("port"))
|
||||
}
|
||||
func validateHandler(handler commonHandler, fldPath *field.Path) field.ErrorList {
|
||||
numHandlers := 0
|
||||
@ -3170,6 +3264,13 @@ func validateInitContainers(containers []core.Container, regularContainers []cor
|
||||
// Apply the validation common to all container types
|
||||
allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, podClaimNames, idxPath, opts)...)
|
||||
|
||||
restartAlways := false
|
||||
// Apply the validation specific to init containers
|
||||
if ctr.RestartPolicy != nil {
|
||||
allErrs = append(allErrs, validateInitContainerRestartPolicy(ctr.RestartPolicy, idxPath.Child("restartPolicy"))...)
|
||||
restartAlways = *ctr.RestartPolicy == core.ContainerRestartPolicyAlways
|
||||
}
|
||||
|
||||
// Names must be unique within regular and init containers. Collisions with ephemeral containers
|
||||
// will be detected by validateEphemeralContainers().
|
||||
if allNames.Has(ctr.Name) {
|
||||
@ -3181,19 +3282,31 @@ func validateInitContainers(containers []core.Container, regularContainers []cor
|
||||
// Check for port conflicts in init containers individually since init containers run one-by-one.
|
||||
allErrs = append(allErrs, checkHostPortConflicts([]core.Container{ctr}, fldPath)...)
|
||||
|
||||
// These fields are disallowed for init containers.
|
||||
if ctr.Lifecycle != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("lifecycle"), "may not be set for init containers"))
|
||||
}
|
||||
if ctr.LivenessProbe != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("livenessProbe"), "may not be set for init containers"))
|
||||
}
|
||||
if ctr.ReadinessProbe != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("readinessProbe"), "may not be set for init containers"))
|
||||
}
|
||||
if ctr.StartupProbe != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("startupProbe"), "may not be set for init containers"))
|
||||
switch {
|
||||
case restartAlways:
|
||||
if ctr.Lifecycle != nil {
|
||||
allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, idxPath.Child("lifecycle"))...)
|
||||
}
|
||||
allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, idxPath.Child("livenessProbe"))...)
|
||||
allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, idxPath.Child("readinessProbe"))...)
|
||||
allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, idxPath.Child("startupProbe"))...)
|
||||
|
||||
default:
|
||||
// These fields are disallowed for init containers.
|
||||
if ctr.Lifecycle != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("lifecycle"), "may not be set for init containers without restartPolicy=Always"))
|
||||
}
|
||||
if ctr.LivenessProbe != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("livenessProbe"), "may not be set for init containers without restartPolicy=Always"))
|
||||
}
|
||||
if ctr.ReadinessProbe != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("readinessProbe"), "may not be set for init containers without restartPolicy=Always"))
|
||||
}
|
||||
if ctr.StartupProbe != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("startupProbe"), "may not be set for init containers without restartPolicy=Always"))
|
||||
}
|
||||
}
|
||||
|
||||
if len(ctr.ResizePolicy) > 0 {
|
||||
allErrs = append(allErrs, field.Invalid(idxPath.Child("resizePolicy"), ctr.ResizePolicy, "must not be set for init containers"))
|
||||
}
|
||||
@ -3256,25 +3369,6 @@ func validateHostUsers(spec *core.PodSpec, fldPath *field.Path) field.ErrorList
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// For now only these volumes are supported:
|
||||
// - configmap
|
||||
// - secret
|
||||
// - downwardAPI
|
||||
// - emptyDir
|
||||
// - projected
|
||||
// So reject anything else.
|
||||
for i, vol := range spec.Volumes {
|
||||
switch {
|
||||
case vol.EmptyDir != nil:
|
||||
case vol.Secret != nil:
|
||||
case vol.DownwardAPI != nil:
|
||||
case vol.ConfigMap != nil:
|
||||
case vol.Projected != nil:
|
||||
default:
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("volumes").Index(i), "volume type not supported when `pod.Spec.HostUsers` is false"))
|
||||
}
|
||||
}
|
||||
|
||||
// We decided to restrict the usage of userns with other host namespaces:
|
||||
// https://github.com/kubernetes/kubernetes/pull/111090#discussion_r935994282
|
||||
// The tl;dr is: you can easily run into permission issues that seem unexpected, we don't
|
||||
@ -3318,22 +3412,20 @@ func validateContainers(containers []core.Container, volumes map[string]core.Vol
|
||||
allNames.Insert(ctr.Name)
|
||||
}
|
||||
|
||||
// These fields are only allowed for regular containers, so only check supported values here.
|
||||
// Init and ephemeral container validation will return field.Forbidden() for these paths.
|
||||
// These fields are allowed for regular containers and restartable init
|
||||
// containers.
|
||||
// Regular init container and ephemeral container validation will return
|
||||
// field.Forbidden() for these paths.
|
||||
if ctr.Lifecycle != nil {
|
||||
allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, path.Child("lifecycle"))...)
|
||||
}
|
||||
allErrs = append(allErrs, validateProbe(ctr.LivenessProbe, path.Child("livenessProbe"))...)
|
||||
if ctr.LivenessProbe != nil && ctr.LivenessProbe.SuccessThreshold != 1 {
|
||||
allErrs = append(allErrs, field.Invalid(path.Child("livenessProbe", "successThreshold"), ctr.LivenessProbe.SuccessThreshold, "must be 1"))
|
||||
}
|
||||
allErrs = append(allErrs, validateProbe(ctr.ReadinessProbe, path.Child("readinessProbe"))...)
|
||||
if ctr.ReadinessProbe != nil && ctr.ReadinessProbe.TerminationGracePeriodSeconds != nil {
|
||||
allErrs = append(allErrs, field.Invalid(path.Child("readinessProbe", "terminationGracePeriodSeconds"), ctr.ReadinessProbe.TerminationGracePeriodSeconds, "must not be set for readinessProbes"))
|
||||
}
|
||||
allErrs = append(allErrs, validateProbe(ctr.StartupProbe, path.Child("startupProbe"))...)
|
||||
if ctr.StartupProbe != nil && ctr.StartupProbe.SuccessThreshold != 1 {
|
||||
allErrs = append(allErrs, field.Invalid(path.Child("startupProbe", "successThreshold"), ctr.StartupProbe.SuccessThreshold, "must be 1"))
|
||||
allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, path.Child("livenessProbe"))...)
|
||||
allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, path.Child("readinessProbe"))...)
|
||||
allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, path.Child("startupProbe"))...)
|
||||
|
||||
// These fields are disallowed for regular containers
|
||||
if ctr.RestartPolicy != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(path.Child("restartPolicy"), "may not be set for non-init containers"))
|
||||
}
|
||||
}
|
||||
|
||||
@ -3399,14 +3491,10 @@ const (
|
||||
// restrictions in Linux libc name resolution handling.
|
||||
// Max number of DNS name servers.
|
||||
MaxDNSNameservers = 3
|
||||
// Expanded max number of domains in the search path list.
|
||||
MaxDNSSearchPathsExpanded = 32
|
||||
// Expanded max number of characters in the search path.
|
||||
MaxDNSSearchListCharsExpanded = 2048
|
||||
// Max number of domains in the search path list.
|
||||
MaxDNSSearchPathsLegacy = 6
|
||||
// Max number of characters in the search path list.
|
||||
MaxDNSSearchListCharsLegacy = 256
|
||||
MaxDNSSearchPaths = 32
|
||||
// Max number of characters in the search path.
|
||||
MaxDNSSearchListChars = 2048
|
||||
)
|
||||
|
||||
func validateReadinessGates(readinessGates []core.PodReadinessGate, fldPath *field.Path) field.ErrorList {
|
||||
@ -3455,16 +3543,12 @@ func validatePodDNSConfig(dnsConfig *core.PodDNSConfig, dnsPolicy *core.DNSPolic
|
||||
}
|
||||
}
|
||||
// Validate searches.
|
||||
maxDNSSearchPaths, maxDNSSearchListChars := MaxDNSSearchPathsLegacy, MaxDNSSearchListCharsLegacy
|
||||
if opts.AllowExpandedDNSConfig {
|
||||
maxDNSSearchPaths, maxDNSSearchListChars = MaxDNSSearchPathsExpanded, MaxDNSSearchListCharsExpanded
|
||||
}
|
||||
if len(dnsConfig.Searches) > maxDNSSearchPaths {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, fmt.Sprintf("must not have more than %v search paths", maxDNSSearchPaths)))
|
||||
if len(dnsConfig.Searches) > MaxDNSSearchPaths {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, fmt.Sprintf("must not have more than %v search paths", MaxDNSSearchPaths)))
|
||||
}
|
||||
// Include the space between search paths.
|
||||
if len(strings.Join(dnsConfig.Searches, " ")) > maxDNSSearchListChars {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, fmt.Sprintf("must not have more than %v characters (including spaces) in the search list", maxDNSSearchListChars)))
|
||||
if len(strings.Join(dnsConfig.Searches, " ")) > MaxDNSSearchListChars {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, fmt.Sprintf("must not have more than %v characters (including spaces) in the search list", MaxDNSSearchListChars)))
|
||||
}
|
||||
for i, search := range dnsConfig.Searches {
|
||||
// it is fine to have a trailing dot
|
||||
@ -3481,15 +3565,35 @@ func validatePodDNSConfig(dnsConfig *core.PodDNSConfig, dnsPolicy *core.DNSPolic
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateHostNetwork(hostNetwork bool, containers []core.Container, fldPath *field.Path) field.ErrorList {
|
||||
// validatePodHostNetworkDeps checks fields which depend on whether HostNetwork is
|
||||
// true or not. It should be called on all PodSpecs, but opts can change what
|
||||
// is enforce. E.g. opts.ResourceIsPod should only be set when called in the
|
||||
// context of a Pod, and not on PodSpecs which are embedded in other resources
|
||||
// (e.g. Deployments).
|
||||
func validatePodHostNetworkDeps(spec *core.PodSpec, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
// For <reasons> we keep `.HostNetwork` in .SecurityContext on the internal
|
||||
// version of Pod.
|
||||
hostNetwork := false
|
||||
if spec.SecurityContext != nil {
|
||||
hostNetwork = spec.SecurityContext.HostNetwork
|
||||
}
|
||||
|
||||
allErrors := field.ErrorList{}
|
||||
|
||||
if hostNetwork {
|
||||
for i, container := range containers {
|
||||
fldPath := fldPath.Child("containers")
|
||||
for i, container := range spec.Containers {
|
||||
portsPath := fldPath.Index(i).Child("ports")
|
||||
for i, port := range container.Ports {
|
||||
idxPath := portsPath.Index(i)
|
||||
if port.HostPort != port.ContainerPort {
|
||||
allErrors = append(allErrors, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, "must match `hostPort` when `hostNetwork` is true"))
|
||||
// At this point, we know that HostNetwork is true. If this
|
||||
// PodSpec is in a Pod (opts.ResourceIsPod), then HostPort must
|
||||
// be the same value as ContainerPort. If this PodSpec is in
|
||||
// some other resource (e.g. Deployment) we allow 0 (i.e.
|
||||
// unspecified) because it will be defaulted when the Pod is
|
||||
// ultimately created, but we do not allow any other values.
|
||||
if hp, cp := port.HostPort, port.ContainerPort; (opts.ResourceIsPod || hp != 0) && hp != cp {
|
||||
allErrors = append(allErrors, field.Invalid(idxPath.Child("hostPort"), port.HostPort, "must match `containerPort` when `hostNetwork` is true"))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3688,25 +3792,29 @@ type PodValidationOptions struct {
|
||||
AllowInvalidLabelValueInSelector bool
|
||||
// Allow pod spec to use non-integer multiple of huge page unit size
|
||||
AllowIndivisibleHugePagesValues bool
|
||||
// Allow more DNSSearchPaths and longer DNSSearchListChars
|
||||
AllowExpandedDNSConfig bool
|
||||
// Allow pod spec to use status.hostIPs in downward API if feature is enabled
|
||||
AllowHostIPsField bool
|
||||
// Allow invalid topologySpreadConstraint labelSelector for backward compatibility
|
||||
AllowInvalidTopologySpreadConstraintLabelSelector bool
|
||||
// Allow node selector additions for gated pods.
|
||||
AllowMutableNodeSelectorAndNodeAffinity bool
|
||||
// The top-level resource being validated is a Pod, not just a PodSpec
|
||||
// embedded in some other resource.
|
||||
ResourceIsPod bool
|
||||
}
|
||||
|
||||
// validatePodMetadataAndSpec tests if required fields in the pod.metadata and pod.spec are set,
|
||||
// and is called by ValidatePodCreate and ValidatePodUpdate.
|
||||
func validatePodMetadataAndSpec(pod *core.Pod, opts PodValidationOptions) field.ErrorList {
|
||||
fldPath := field.NewPath("metadata")
|
||||
allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, fldPath)
|
||||
allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, &pod.Spec, fldPath.Child("annotations"), opts)...)
|
||||
allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, &pod.ObjectMeta, field.NewPath("spec"), opts)...)
|
||||
metaPath := field.NewPath("metadata")
|
||||
specPath := field.NewPath("spec")
|
||||
|
||||
allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, metaPath)
|
||||
allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, &pod.Spec, metaPath.Child("annotations"), opts)...)
|
||||
allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, &pod.ObjectMeta, specPath, opts)...)
|
||||
|
||||
// we do additional validation only pertinent for pods and not pod templates
|
||||
// this was done to preserve backwards compatibility
|
||||
specPath := field.NewPath("spec")
|
||||
|
||||
if pod.Spec.ServiceAccountName == "" {
|
||||
for vi, volume := range pod.Spec.Volumes {
|
||||
@ -3774,6 +3882,58 @@ func validatePodIPs(pod *core.Pod) field.ErrorList {
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateHostIPs validates IPs in pod status
|
||||
func validateHostIPs(pod *core.Pod) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if len(pod.Status.HostIPs) == 0 {
|
||||
return allErrs
|
||||
}
|
||||
|
||||
hostIPsField := field.NewPath("status", "hostIPs")
|
||||
|
||||
// hostIP must be equal to hostIPs[0].IP
|
||||
if pod.Status.HostIP != pod.Status.HostIPs[0].IP {
|
||||
allErrs = append(allErrs, field.Invalid(hostIPsField.Index(0).Child("ip"), pod.Status.HostIPs[0].IP, "must be equal to `hostIP`"))
|
||||
}
|
||||
|
||||
// all HostPs must be valid IPs
|
||||
for i, hostIP := range pod.Status.HostIPs {
|
||||
for _, msg := range validation.IsValidIP(hostIP.IP) {
|
||||
allErrs = append(allErrs, field.Invalid(hostIPsField.Index(i), hostIP.IP, msg))
|
||||
}
|
||||
}
|
||||
|
||||
// if we have more than one Pod.HostIP then
|
||||
// - validate for dual stack
|
||||
// - validate for duplication
|
||||
if len(pod.Status.HostIPs) > 1 {
|
||||
seen := sets.String{}
|
||||
hostIPs := make([]string, 0, len(pod.Status.HostIPs))
|
||||
|
||||
// There should be no duplicates in list of Pod.HostIPs
|
||||
for i, hostIP := range pod.Status.HostIPs {
|
||||
hostIPs = append(hostIPs, hostIP.IP)
|
||||
if seen.Has(hostIP.IP) {
|
||||
allErrs = append(allErrs, field.Duplicate(hostIPsField.Index(i), hostIP))
|
||||
}
|
||||
seen.Insert(hostIP.IP)
|
||||
}
|
||||
|
||||
dualStack, err := netutils.IsDualStackIPStrings(hostIPs)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.InternalError(hostIPsField, fmt.Errorf("failed to check for dual stack with error:%v", err)))
|
||||
}
|
||||
|
||||
// We only support one from each IP family (i.e. max two IPs in this list).
|
||||
if !dualStack || len(hostIPs) > 2 {
|
||||
allErrs = append(allErrs, field.Invalid(hostIPsField, pod.Status.HostIPs, "may specify no more than one IP for each IP family"))
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodSpec tests that the specified PodSpec has valid data.
|
||||
// This includes checking formatting and uniqueness. It also canonicalizes the
|
||||
// structure by setting default values and implementing any backwards-compatibility
|
||||
@ -3790,10 +3950,11 @@ func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *fi
|
||||
allErrs = append(allErrs, validateContainers(spec.Containers, vols, podClaimNames, fldPath.Child("containers"), opts)...)
|
||||
allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, podClaimNames, fldPath.Child("initContainers"), opts)...)
|
||||
allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, podClaimNames, fldPath.Child("ephemeralContainers"), opts)...)
|
||||
allErrs = append(allErrs, validatePodHostNetworkDeps(spec, fldPath, opts)...)
|
||||
allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...)
|
||||
allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...)
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"), opts)...)
|
||||
allErrs = append(allErrs, validatePodSpecSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"), opts)...)
|
||||
allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...)
|
||||
allErrs = append(allErrs, validateAffinity(spec.Affinity, opts, fldPath.Child("affinity"))...)
|
||||
allErrs = append(allErrs, validatePodDNSConfig(spec.DNSConfig, &spec.DNSPolicy, fldPath.Child("dnsConfig"), opts)...)
|
||||
@ -4396,12 +4557,13 @@ func validateSysctls(sysctls []core.Sysctl, fldPath *field.Path) field.ErrorList
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodSecurityContext test that the specified PodSecurityContext has valid data.
|
||||
func ValidatePodSecurityContext(securityContext *core.PodSecurityContext, spec *core.PodSpec, specPath, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
// validatePodSpecSecurityContext verifies the SecurityContext of a PodSpec,
|
||||
// whether that is defined in a Pod or in an embedded PodSpec (e.g. a
|
||||
// Deployment's pod template).
|
||||
func validatePodSpecSecurityContext(securityContext *core.PodSecurityContext, spec *core.PodSpec, specPath, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if securityContext != nil {
|
||||
allErrs = append(allErrs, validateHostNetwork(securityContext.HostNetwork, spec.Containers, specPath.Child("containers"))...)
|
||||
if securityContext.FSGroup != nil {
|
||||
for _, msg := range validation.IsValidGroupID(*securityContext.FSGroup) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("fsGroup"), *(securityContext.FSGroup), msg))
|
||||
@ -4802,11 +4964,16 @@ func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions
|
||||
allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), oldPod.Spec.RestartPolicy)...)
|
||||
// The kubelet will never restart ephemeral containers, so treat them like they have an implicit RestartPolicyNever.
|
||||
allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.EphemeralContainerStatuses, oldPod.Status.EphemeralContainerStatuses, fldPath.Child("ephemeralContainerStatuses"), core.RestartPolicyNever)...)
|
||||
allErrs = append(allErrs, validatePodResourceClaimStatuses(newPod.Status.ResourceClaimStatuses, newPod.Spec.ResourceClaims, fldPath.Child("resourceClaimStatuses"))...)
|
||||
|
||||
if newIPErrs := validatePodIPs(newPod); len(newIPErrs) > 0 {
|
||||
allErrs = append(allErrs, newIPErrs...)
|
||||
}
|
||||
|
||||
if newIPErrs := validateHostIPs(newPod); len(newIPErrs) > 0 {
|
||||
allErrs = append(allErrs, newIPErrs...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
@ -4823,6 +4990,42 @@ func validatePodConditions(conditions []core.PodCondition, fldPath *field.Path)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePodResourceClaimStatuses validates the ResourceClaimStatuses slice in a pod status.
|
||||
func validatePodResourceClaimStatuses(statuses []core.PodResourceClaimStatus, podClaims []core.PodResourceClaim, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
|
||||
claimNames := sets.New[string]()
|
||||
for i, status := range statuses {
|
||||
idxPath := fldPath.Index(i)
|
||||
// There's no need to check the content of the name. If it matches an entry,
|
||||
// then it is valid, otherwise we reject it here.
|
||||
if !havePodClaim(podClaims, status.Name) {
|
||||
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), status.Name, "must match the name of an entry in `spec.resourceClaims`"))
|
||||
}
|
||||
if claimNames.Has(status.Name) {
|
||||
allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), status.Name))
|
||||
} else {
|
||||
claimNames.Insert(status.Name)
|
||||
}
|
||||
if status.ResourceClaimName != nil {
|
||||
for _, detail := range ValidateResourceClaimName(*status.ResourceClaimName, false) {
|
||||
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), status.ResourceClaimName, detail))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func havePodClaim(podClaims []core.PodResourceClaim, name string) bool {
|
||||
for _, podClaim := range podClaims {
|
||||
if podClaim.Name == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidatePodEphemeralContainersUpdate tests that a user update to EphemeralContainers is valid.
|
||||
// newPod and oldPod must only differ in their EphemeralContainers.
|
||||
func ValidatePodEphemeralContainersUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList {
|
||||
@ -5245,14 +5448,14 @@ func ValidateServiceStatusUpdate(service, oldService *core.Service) field.ErrorL
|
||||
// ValidateReplicationController tests if required fields in the replication controller are set.
|
||||
func ValidateReplicationController(controller *core.ReplicationController, opts PodValidationOptions) field.ErrorList {
|
||||
allErrs := ValidateObjectMeta(&controller.ObjectMeta, true, ValidateReplicationControllerName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"), opts)...)
|
||||
allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, nil, field.NewPath("spec"), opts)...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateReplicationControllerUpdate tests if required fields in the replication controller are set.
|
||||
func ValidateReplicationControllerUpdate(controller, oldController *core.ReplicationController, opts PodValidationOptions) field.ErrorList {
|
||||
allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"), opts)...)
|
||||
allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, &oldController.Spec, field.NewPath("spec"), opts)...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
@ -5297,7 +5500,7 @@ func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path
|
||||
}
|
||||
|
||||
// Validates the given template and ensures that it is in accordance with the desired selector and replicas.
|
||||
func ValidatePodTemplateSpecForRC(template *core.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
func ValidatePodTemplateSpecForRC(template, oldTemplate *core.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if template == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath, ""))
|
||||
@ -5311,8 +5514,13 @@ func ValidatePodTemplateSpecForRC(template *core.PodTemplateSpec, selectorMap ma
|
||||
}
|
||||
}
|
||||
allErrs = append(allErrs, ValidatePodTemplateSpec(template, fldPath, opts)...)
|
||||
// get rid of apivalidation.ValidateReadOnlyPersistentDisks,stop passing oldTemplate to this function
|
||||
var oldVols []core.Volume
|
||||
if oldTemplate != nil {
|
||||
oldVols = oldTemplate.Spec.Volumes // +k8s:verify-mutation:reason=clone
|
||||
}
|
||||
if replicas > 1 {
|
||||
allErrs = append(allErrs, ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...)
|
||||
allErrs = append(allErrs, ValidateReadOnlyPersistentDisks(template.Spec.Volumes, oldVols, fldPath.Child("spec", "volumes"))...)
|
||||
}
|
||||
// RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec().
|
||||
if template.Spec.RestartPolicy != core.RestartPolicyAlways {
|
||||
@ -5326,12 +5534,17 @@ func ValidatePodTemplateSpecForRC(template *core.PodTemplateSpec, selectorMap ma
|
||||
}
|
||||
|
||||
// ValidateReplicationControllerSpec tests if required fields in the replication controller spec are set.
|
||||
func ValidateReplicationControllerSpec(spec *core.ReplicationControllerSpec, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
func ValidateReplicationControllerSpec(spec, oldSpec *core.ReplicationControllerSpec, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
|
||||
allErrs = append(allErrs, ValidateNonEmptySelector(spec.Selector, fldPath.Child("selector"))...)
|
||||
allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...)
|
||||
allErrs = append(allErrs, ValidatePodTemplateSpecForRC(spec.Template, spec.Selector, spec.Replicas, fldPath.Child("template"), opts)...)
|
||||
// oldSpec is not empty, pass oldSpec.template.
|
||||
var oldTemplate *core.PodTemplateSpec
|
||||
if oldSpec != nil {
|
||||
oldTemplate = oldSpec.Template // +k8s:verify-mutation:reason=clone
|
||||
}
|
||||
allErrs = append(allErrs, ValidatePodTemplateSpecForRC(spec.Template, oldTemplate, spec.Selector, spec.Replicas, fldPath.Child("template"), opts)...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
@ -5351,17 +5564,29 @@ func ValidatePodTemplateSpec(spec *core.PodTemplateSpec, fldPath *field.Path, op
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateReadOnlyPersistentDisks(volumes []core.Volume, fldPath *field.Path) field.ErrorList {
|
||||
// ValidateReadOnlyPersistentDisks stick this AFTER the short-circuit checks
|
||||
func ValidateReadOnlyPersistentDisks(volumes, oldVolumes []core.Volume, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for i := range volumes {
|
||||
vol := &volumes[i]
|
||||
idxPath := fldPath.Index(i)
|
||||
if vol.GCEPersistentDisk != nil {
|
||||
if !vol.GCEPersistentDisk.ReadOnly {
|
||||
allErrs = append(allErrs, field.Invalid(idxPath.Child("gcePersistentDisk", "readOnly"), false, "must be true for replicated pods > 1; GCE PD can only be mounted on multiple machines if it is read-only"))
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.SkipReadOnlyValidationGCE) {
|
||||
return field.ErrorList{}
|
||||
}
|
||||
|
||||
isWriteablePD := func(vol *core.Volume) bool {
|
||||
return vol.GCEPersistentDisk != nil && !vol.GCEPersistentDisk.ReadOnly
|
||||
}
|
||||
|
||||
for i := range oldVolumes {
|
||||
if isWriteablePD(&oldVolumes[i]) {
|
||||
return field.ErrorList{}
|
||||
}
|
||||
}
|
||||
|
||||
for i := range volumes {
|
||||
idxPath := fldPath.Index(i)
|
||||
if isWriteablePD(&volumes[i]) {
|
||||
allErrs = append(allErrs, field.Invalid(idxPath.Child("gcePersistentDisk", "readOnly"), false, "must be true for replicated pods > 1; GCE PD can only be mounted on multiple machines if it is read-only"))
|
||||
}
|
||||
// TODO: What to do for AWS? It doesn't support replicas
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
75
vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go
generated
vendored
75
vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go
generated
vendored
@ -793,6 +793,11 @@ func (in *Container) DeepCopyInto(out *Container) {
|
||||
*out = make([]ContainerResizePolicy, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.RestartPolicy != nil {
|
||||
in, out := &in.RestartPolicy, &out.RestartPolicy
|
||||
*out = new(ContainerRestartPolicy)
|
||||
**out = **in
|
||||
}
|
||||
if in.VolumeMounts != nil {
|
||||
in, out := &in.VolumeMounts, &out.VolumeMounts
|
||||
*out = make([]VolumeMount, len(*in))
|
||||
@ -1420,6 +1425,11 @@ func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon)
|
||||
*out = make([]ContainerResizePolicy, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.RestartPolicy != nil {
|
||||
in, out := &in.RestartPolicy, &out.RestartPolicy
|
||||
*out = new(ContainerRestartPolicy)
|
||||
**out = **in
|
||||
}
|
||||
if in.VolumeMounts != nil {
|
||||
in, out := &in.VolumeMounts, &out.VolumeMounts
|
||||
*out = make([]VolumeMount, len(*in))
|
||||
@ -1871,6 +1881,22 @@ func (in *HostAlias) DeepCopy() *HostAlias {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HostIP) DeepCopyInto(out *HostIP) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostIP.
|
||||
func (in *HostIP) DeepCopy() *HostIP {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HostIP)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) {
|
||||
*out = *in
|
||||
@ -2897,7 +2923,7 @@ func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
@ -3074,10 +3100,12 @@ func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimSt
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.ResizeStatus != nil {
|
||||
in, out := &in.ResizeStatus, &out.ResizeStatus
|
||||
*out = new(PersistentVolumeClaimResizeStatus)
|
||||
**out = **in
|
||||
if in.AllocatedResourceStatuses != nil {
|
||||
in, out := &in.AllocatedResourceStatuses, &out.AllocatedResourceStatuses
|
||||
*out = make(map[ResourceName]ClaimResourceStatus, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -3337,6 +3365,10 @@ func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) {
|
||||
*out = *in
|
||||
if in.LastPhaseTransitionTime != nil {
|
||||
in, out := &in.LastPhaseTransitionTime, &out.LastPhaseTransitionTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -3809,6 +3841,27 @@ func (in *PodResourceClaim) DeepCopy() *PodResourceClaim {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodResourceClaimStatus) DeepCopyInto(out *PodResourceClaimStatus) {
|
||||
*out = *in
|
||||
if in.ResourceClaimName != nil {
|
||||
in, out := &in.ResourceClaimName, &out.ResourceClaimName
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourceClaimStatus.
|
||||
func (in *PodResourceClaimStatus) DeepCopy() *PodResourceClaimStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodResourceClaimStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodSchedulingGate) DeepCopyInto(out *PodSchedulingGate) {
|
||||
*out = *in
|
||||
@ -4093,6 +4146,11 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.HostIPs != nil {
|
||||
in, out := &in.HostIPs, &out.HostIPs
|
||||
*out = make([]HostIP, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.PodIPs != nil {
|
||||
in, out := &in.PodIPs, &out.PodIPs
|
||||
*out = make([]PodIP, len(*in))
|
||||
@ -4123,6 +4181,13 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ResourceClaimStatuses != nil {
|
||||
in, out := &in.ResourceClaimStatuses, &out.ResourceClaimStatuses
|
||||
*out = make([]PodResourceClaimStatus, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/apis/extensions/OWNERS
generated
vendored
@ -2,7 +2,6 @@
|
||||
|
||||
reviewers:
|
||||
- thockin
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
@ -19,5 +18,6 @@ reviewers:
|
||||
- mwielgus
|
||||
- soltysh
|
||||
- dims
|
||||
- jpbetz
|
||||
labels:
|
||||
- sig/apps
|
||||
|
41
vendor/k8s.io/kubernetes/pkg/apis/networking/types.go
generated
vendored
41
vendor/k8s.io/kubernetes/pkg/apis/networking/types.go
generated
vendored
@ -35,11 +35,6 @@ type NetworkPolicy struct {
|
||||
// spec represents the specification of the desired behavior for this NetworkPolicy.
|
||||
// +optional
|
||||
Spec NetworkPolicySpec
|
||||
|
||||
// status represents the current state of the NetworkPolicy.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Status NetworkPolicyStatus
|
||||
}
|
||||
|
||||
// PolicyType describes the NetworkPolicy type
|
||||
@ -201,42 +196,6 @@ type NetworkPolicyPeer struct {
|
||||
IPBlock *IPBlock
|
||||
}
|
||||
|
||||
// NetworkPolicyConditionType is the type for status conditions on
|
||||
// a NetworkPolicy. This type should be used with the
|
||||
// NetworkPolicyStatus.Conditions field.
|
||||
type NetworkPolicyConditionType string
|
||||
|
||||
const (
|
||||
// NetworkPolicyConditionStatusAccepted represents status of a Network Policy that could be properly parsed by
|
||||
// the Network Policy provider and will be implemented in the cluster
|
||||
NetworkPolicyConditionStatusAccepted NetworkPolicyConditionType = "Accepted"
|
||||
|
||||
// NetworkPolicyConditionStatusPartialFailure represents status of a Network Policy that could be partially
|
||||
// parsed by the Network Policy provider and may not be completely implemented due to a lack of a feature or some
|
||||
// other condition
|
||||
NetworkPolicyConditionStatusPartialFailure NetworkPolicyConditionType = "PartialFailure"
|
||||
|
||||
// NetworkPolicyConditionStatusFailure represents status of a Network Policy that could not be parsed by the
|
||||
// Network Policy provider and will not be implemented in the cluster
|
||||
NetworkPolicyConditionStatusFailure NetworkPolicyConditionType = "Failure"
|
||||
)
|
||||
|
||||
// NetworkPolicyConditionReason defines the set of reasons that explain why a
|
||||
// particular NetworkPolicy condition type has been raised.
|
||||
type NetworkPolicyConditionReason string
|
||||
|
||||
const (
|
||||
// NetworkPolicyConditionReasonFeatureNotSupported represents a reason where the Network Policy may not have been
|
||||
// implemented in the cluster due to a lack of some feature not supported by the Network Policy provider
|
||||
NetworkPolicyConditionReasonFeatureNotSupported NetworkPolicyConditionReason = "FeatureNotSupported"
|
||||
)
|
||||
|
||||
// NetworkPolicyStatus describes the current state of the NetworkPolicy.
|
||||
type NetworkPolicyStatus struct {
|
||||
// conditions holds an array of metav1.Condition that describes the state of the NetworkPolicy.
|
||||
Conditions []metav1.Condition
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// NetworkPolicyList is a list of NetworkPolicy objects.
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go
generated
vendored
@ -661,7 +661,6 @@ func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
@ -874,29 +873,6 @@ func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NetworkPolicyStatus) DeepCopyInto(out *NetworkPolicyStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]v1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyStatus.
|
||||
func (in *NetworkPolicyStatus) DeepCopy() *NetworkPolicyStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NetworkPolicyStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ParentReference) DeepCopyInto(out *ParentReference) {
|
||||
*out = *in
|
||||
|
Reference in New Issue
Block a user