mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: update kubernetes to v1.23.0
updating go dependency to latest kubernetes released version i.e v1.23.0 Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
42403e2ba7
commit
5762da3e91
46
vendor/k8s.io/kubernetes/pkg/apis/apps/types.go
generated
vendored
46
vendor/k8s.io/kubernetes/pkg/apis/apps/types.go
generated
vendored
@ -97,6 +97,40 @@ type RollingUpdateStatefulSetStrategy struct {
|
||||
Partition int32
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimRetentionPolicyType is a string enumeration of the policies that will determine
|
||||
// when volumes from the VolumeClaimTemplates will be deleted when the controlling StatefulSet is
|
||||
// deleted or scaled down.
|
||||
type PersistentVolumeClaimRetentionPolicyType string
|
||||
|
||||
const (
|
||||
// RetainPersistentVolumeClaimRetentionPolicyType is the default
|
||||
// PersistentVolumeClaimRetentionPolicy and specifies that
|
||||
// PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates
|
||||
// will not be deleted.
|
||||
RetainPersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Retain"
|
||||
// DeletePersistentVolumeClaimRetentionPolicyType specifies that
|
||||
// PersistentVolumeClaims associated with StatefulSet VolumeClaimTemplates
|
||||
// will be deleted in the scenario specified in
|
||||
// StatefulSetPersistentVolumeClaimPolicy.
|
||||
DeletePersistentVolumeClaimRetentionPolicyType PersistentVolumeClaimRetentionPolicyType = "Delete"
|
||||
)
|
||||
|
||||
// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs
|
||||
// created from the StatefulSet VolumeClaimTemplates.
|
||||
type StatefulSetPersistentVolumeClaimRetentionPolicy struct {
|
||||
// WhenDeleted specifies what happens to PVCs created from StatefulSet
|
||||
// VolumeClaimTemplates when the StatefulSet is deleted. The default policy
|
||||
// of `Retain` causes PVCs to not be affected by StatefulSet deletion. The
|
||||
// `Delete` policy causes those PVCs to be deleted.
|
||||
WhenDeleted PersistentVolumeClaimRetentionPolicyType
|
||||
// WhenScaled specifies what happens to PVCs created from StatefulSet
|
||||
// VolumeClaimTemplates when the StatefulSet is scaled down. The default
|
||||
// policy of `Retain` causes PVCs to not be affected by a scaledown. The
|
||||
// `Delete` policy causes the associated PVCs for any excess pods above
|
||||
// the replica count to be deleted.
|
||||
WhenScaled PersistentVolumeClaimRetentionPolicyType
|
||||
}
|
||||
|
||||
// A StatefulSetSpec is the specification of a StatefulSet.
|
||||
type StatefulSetSpec struct {
|
||||
// Replicas is the desired number of replicas of the given Template.
|
||||
@ -164,6 +198,12 @@ type StatefulSetSpec struct {
|
||||
// This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.
|
||||
// +optional
|
||||
MinReadySeconds int32
|
||||
|
||||
// PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from
|
||||
// the StatefulSet VolumeClaimTemplates. This requires the
|
||||
// StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.
|
||||
// +optional
|
||||
PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy
|
||||
}
|
||||
|
||||
// StatefulSetStatus represents the current state of a StatefulSet.
|
||||
@ -205,9 +245,7 @@ type StatefulSetStatus struct {
|
||||
Conditions []StatefulSetCondition
|
||||
|
||||
// Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset.
|
||||
// This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.
|
||||
// Remove omitempty when graduating to beta
|
||||
// +optional
|
||||
// This is a beta field and requires enabling StatefulSetMinReadySeconds feature gate.
|
||||
AvailableReplicas int32
|
||||
}
|
||||
|
||||
@ -572,7 +610,7 @@ type RollingUpdateDaemonSet struct {
|
||||
// pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
|
||||
// on that node is marked deleted. If the old pod becomes unavailable for any
|
||||
// reason (Ready transitions to false, is evicted, or is drained) an updated
|
||||
// pod is immediatedly created on that node without considering surge limits.
|
||||
// pod is immediately created on that node without considering surge limits.
|
||||
// Allowing surge implies the possibility that the resources consumed by the
|
||||
// daemonset on any given node can double if the readiness check fails, and
|
||||
// so resource intensive daemonsets should take into account that they may
|
||||
|
22
vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go
generated
vendored
22
vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
@ -711,6 +712,22 @@ func (in *StatefulSetList) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetPersistentVolumeClaimRetentionPolicy.
|
||||
func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopy() *StatefulSetPersistentVolumeClaimRetentionPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StatefulSetPersistentVolumeClaimRetentionPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
|
||||
*out = *in
|
||||
@ -733,6 +750,11 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.PersistentVolumeClaimRetentionPolicy != nil {
|
||||
in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy
|
||||
*out = new(StatefulSetPersistentVolumeClaimRetentionPolicy)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
|
16
vendor/k8s.io/kubernetes/pkg/apis/batch/types.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/apis/batch/types.go
generated
vendored
@ -173,8 +173,6 @@ type JobSpec struct {
|
||||
// guarantees (e.g. finalizers) will be honored. If this field is unset,
|
||||
// the Job won't be automatically deleted. If this field is set to zero,
|
||||
// the Job becomes eligible to be deleted immediately after it finishes.
|
||||
// This field is alpha-level and is only honored by servers that enable the
|
||||
// TTLAfterFinished feature.
|
||||
// +optional
|
||||
TTLSecondsAfterFinished *int32
|
||||
|
||||
@ -243,10 +241,17 @@ type JobStatus struct {
|
||||
// +optional
|
||||
CompletionTime *metav1.Time
|
||||
|
||||
// The number of actively running pods.
|
||||
// The number of pending and running pods.
|
||||
// +optional
|
||||
Active int32
|
||||
|
||||
// The number of active pods which have a Ready condition.
|
||||
//
|
||||
// This field is alpha-level. The job controller populates the field when
|
||||
// the feature gate JobReadyPods is enabled (disabled by default).
|
||||
// +optional
|
||||
Ready *int32
|
||||
|
||||
// The number of pods which reached phase Succeeded.
|
||||
// +optional
|
||||
Succeeded int32
|
||||
@ -276,8 +281,9 @@ type JobStatus struct {
|
||||
// (3) Remove the pod UID from the array while increasing the corresponding
|
||||
// counter.
|
||||
//
|
||||
// This field is alpha-level. The job controller only makes use of this field
|
||||
// when the feature gate PodTrackingWithFinalizers is enabled.
|
||||
// This field is beta-level. The job controller only makes use of this field
|
||||
// when the feature gate JobTrackingWithFinalizers is enabled (enabled
|
||||
// by default).
|
||||
// Old jobs might not be tracked using this field, in which case the field
|
||||
// remains null.
|
||||
// +optional
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/apis/batch/zz_generated.deepcopy.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
@ -313,6 +314,11 @@ func (in *JobStatus) DeepCopyInto(out *JobStatus) {
|
||||
in, out := &in.CompletionTime, &out.CompletionTime
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.Ready != nil {
|
||||
in, out := &in.Ready, &out.Ready
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.UncountedTerminatedPods != nil {
|
||||
in, out := &in.UncountedTerminatedPods, &out.UncountedTerminatedPods
|
||||
*out = new(UncountedTerminatedPods)
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go
generated
vendored
@ -99,7 +99,7 @@ const (
|
||||
EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time"
|
||||
|
||||
// EndpointsOverCapacity will be set on an Endpoints resource when it
|
||||
// exceeds the maximum capacity of 1000 addresses. Inititially the Endpoints
|
||||
// exceeds the maximum capacity of 1000 addresses. Initially the Endpoints
|
||||
// controller will set this annotation with a value of "warning". In a
|
||||
// future release, the controller may set this annotation with a value of
|
||||
// "truncated" to indicate that any addresses exceeding the limit of 1000
|
||||
|
221
vendor/k8s.io/kubernetes/pkg/apis/core/types.go
generated
vendored
221
vendor/k8s.io/kubernetes/pkg/apis/core/types.go
generated
vendored
@ -111,7 +111,7 @@ type VolumeSource struct {
|
||||
// +optional
|
||||
FlexVolume *FlexVolumeSource
|
||||
|
||||
// Cinder represents a cinder volume attached and mounted on kubelets host machine.
|
||||
// Cinder represents a cinder volume attached and mounted on kubelet's host machine.
|
||||
// +optional
|
||||
Cinder *CinderVolumeSource
|
||||
|
||||
@ -135,17 +135,17 @@ type VolumeSource struct {
|
||||
// ConfigMap represents a configMap that should populate this volume
|
||||
// +optional
|
||||
ConfigMap *ConfigMapVolumeSource
|
||||
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
|
||||
// VsphereVolume represents a vSphere volume attached and mounted on kubelet's host machine
|
||||
// +optional
|
||||
VsphereVolume *VsphereVirtualDiskVolumeSource
|
||||
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
|
||||
// +optional
|
||||
AzureDisk *AzureDiskVolumeSource
|
||||
// PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine
|
||||
// PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelet's host machine
|
||||
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource
|
||||
// Items for all in one resources secrets, configmaps, and downward API
|
||||
Projected *ProjectedVolumeSource
|
||||
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
|
||||
// PortworxVolume represents a portworx volume attached and mounted on kubelet's host machine
|
||||
// +optional
|
||||
PortworxVolume *PortworxVolumeSource
|
||||
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
|
||||
@ -182,9 +182,6 @@ type VolumeSource struct {
|
||||
// A pod can use both types of ephemeral volumes and
|
||||
// persistent volumes at the same time.
|
||||
//
|
||||
// This is a beta feature and only available when the GenericEphemeralVolume
|
||||
// feature gate is enabled.
|
||||
//
|
||||
// +optional
|
||||
Ephemeral *EphemeralVolumeSource
|
||||
}
|
||||
@ -226,7 +223,7 @@ type PersistentVolumeSource struct {
|
||||
// provisioned/attached using an exec based plugin.
|
||||
// +optional
|
||||
FlexVolume *FlexPersistentVolumeSource
|
||||
// Cinder represents a cinder volume attached and mounted on kubelets host machine.
|
||||
// Cinder represents a cinder volume attached and mounted on kubelet's host machine.
|
||||
// +optional
|
||||
Cinder *CinderPersistentVolumeSource
|
||||
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
|
||||
@ -241,15 +238,15 @@ type PersistentVolumeSource struct {
|
||||
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
|
||||
// +optional
|
||||
AzureFile *AzureFilePersistentVolumeSource
|
||||
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
|
||||
// VsphereVolume represents a vSphere volume attached and mounted on kubelet's host machine
|
||||
// +optional
|
||||
VsphereVolume *VsphereVirtualDiskVolumeSource
|
||||
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
|
||||
// +optional
|
||||
AzureDisk *AzureDiskVolumeSource
|
||||
// PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine
|
||||
// PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelet's host machine
|
||||
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource
|
||||
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
|
||||
// PortworxVolume represents a portworx volume attached and mounted on kubelet's host machine
|
||||
// +optional
|
||||
PortworxVolume *PortworxVolumeSource
|
||||
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
|
||||
@ -433,6 +430,9 @@ type PersistentVolumeClaimSpec struct {
|
||||
// +optional
|
||||
Selector *metav1.LabelSelector
|
||||
// Resources represents the minimum resources required
|
||||
// If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
|
||||
// that are lower than previous value but must still be higher than capacity recorded in the
|
||||
// status field of the claim.
|
||||
// +optional
|
||||
Resources ResourceRequirements
|
||||
// VolumeName is the binding reference to the PersistentVolume backing this
|
||||
@ -489,6 +489,26 @@ const (
|
||||
PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending"
|
||||
)
|
||||
|
||||
// +enum
|
||||
type PersistentVolumeClaimResizeStatus string
|
||||
|
||||
const (
|
||||
// When expansion is complete, the empty string is set by resize controller or kubelet.
|
||||
PersistentVolumeClaimNoExpansionInProgress PersistentVolumeClaimResizeStatus = ""
|
||||
// State set when resize controller starts expanding the volume in control-plane
|
||||
PersistentVolumeClaimControllerExpansionInProgress PersistentVolumeClaimResizeStatus = "ControllerExpansionInProgress"
|
||||
// State set when expansion has failed in resize controller with a terminal error.
|
||||
// Transient errors such as timeout should not set this status and should leave ResizeStatus
|
||||
// unmodified, so as resize controller can resume the volume expansion.
|
||||
PersistentVolumeClaimControllerExpansionFailed PersistentVolumeClaimResizeStatus = "ControllerExpansionFailed"
|
||||
// State set when resize controller has finished expanding the volume but further expansion is needed on the node.
|
||||
PersistentVolumeClaimNodeExpansionPending PersistentVolumeClaimResizeStatus = "NodeExpansionPending"
|
||||
// State set when kubelet starts expanding the volume.
|
||||
PersistentVolumeClaimNodeExpansionInProgress PersistentVolumeClaimResizeStatus = "NodeExpansionInProgress"
|
||||
// State set when expansion has failed in kubelet with a terminal error. Transient errors don't set NodeExpansionFailed.
|
||||
PersistentVolumeClaimNodeExpansionFailed PersistentVolumeClaimResizeStatus = "NodeExpansionFailed"
|
||||
)
|
||||
|
||||
// PersistentVolumeClaimCondition represents the current condition of PV claim
|
||||
type PersistentVolumeClaimCondition struct {
|
||||
Type PersistentVolumeClaimConditionType
|
||||
@ -516,6 +536,24 @@ type PersistentVolumeClaimStatus struct {
|
||||
Capacity ResourceList
|
||||
// +optional
|
||||
Conditions []PersistentVolumeClaimCondition
|
||||
// The storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may
|
||||
// be larger than the actual capacity when a volume expansion operation is requested.
|
||||
// For storage quota, the larger value from allocatedResources and PVC.spec.resources is used.
|
||||
// If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation.
|
||||
// If a volume expansion capacity request is lowered, allocatedResources is only
|
||||
// lowered if there are no expansion operations in progress and if the actual volume capacity
|
||||
// is equal or lower than the requested capacity.
|
||||
// This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
|
||||
// +featureGate=RecoverVolumeExpansionFailure
|
||||
// +optional
|
||||
AllocatedResources ResourceList
|
||||
// ResizeStatus stores status of resize operation.
|
||||
// ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty
|
||||
// string by resize controller or kubelet.
|
||||
// This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
|
||||
// +featureGate=RecoverVolumeExpansionFailure
|
||||
// +optional
|
||||
ResizeStatus *PersistentVolumeClaimResizeStatus
|
||||
}
|
||||
|
||||
// PersistentVolumeAccessMode defines various access modes for PV.
|
||||
@ -1620,7 +1658,7 @@ type LocalVolumeSource struct {
|
||||
// Filesystem type to mount.
|
||||
// It applies only when the Path is a block device.
|
||||
// Must be a filesystem type supported by the host operating system.
|
||||
// Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified.
|
||||
// Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a filesystem if unspecified.
|
||||
// +optional
|
||||
FSType *string
|
||||
}
|
||||
@ -2025,7 +2063,7 @@ type ExecAction struct {
|
||||
// alive or ready to receive traffic.
|
||||
type Probe struct {
|
||||
// The action taken to determine the health of a container
|
||||
Handler
|
||||
ProbeHandler
|
||||
// Length of time before health checking is activated. In seconds.
|
||||
// +optional
|
||||
InitialDelaySeconds int32
|
||||
@ -2191,10 +2229,9 @@ type Container struct {
|
||||
TTY bool
|
||||
}
|
||||
|
||||
// Handler defines a specific action that should be taken
|
||||
// TODO: pass structured data to these actions, and document that data here.
|
||||
type Handler struct {
|
||||
// One and only one of the following should be specified.
|
||||
// ProbeHandler defines a specific action that should be taken in a probe.
|
||||
// One and only one of the fields must be specified.
|
||||
type ProbeHandler struct {
|
||||
// Exec specifies the action to take.
|
||||
// +optional
|
||||
Exec *ExecAction
|
||||
@ -2202,9 +2239,43 @@ type Handler struct {
|
||||
// +optional
|
||||
HTTPGet *HTTPGetAction
|
||||
// TCPSocket specifies an action involving a TCP port.
|
||||
// TODO: implement a realistic TCP lifecycle hook
|
||||
// +optional
|
||||
TCPSocket *TCPSocketAction
|
||||
|
||||
// GRPC specifies an action involving a GRPC port.
|
||||
// This is an alpha field and requires enabling GRPCContainerProbe feature gate.
|
||||
// +featureGate=GRPCContainerProbe
|
||||
// +optional
|
||||
GRPC *GRPCAction
|
||||
}
|
||||
|
||||
// LifecycleHandler defines a specific action that should be taken in a lifecycle
|
||||
// hook. One and only one of the fields, except TCPSocket must be specified.
|
||||
type LifecycleHandler struct {
|
||||
// Exec specifies the action to take.
|
||||
// +optional
|
||||
Exec *ExecAction
|
||||
// HTTPGet specifies the http request to perform.
|
||||
// +optional
|
||||
HTTPGet *HTTPGetAction
|
||||
// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
|
||||
// for the backward compatibility. There are no validation of this field and
|
||||
// lifecycle hooks will fail in runtime when tcp handler is specified.
|
||||
// +optional
|
||||
TCPSocket *TCPSocketAction
|
||||
}
|
||||
|
||||
type GRPCAction struct {
|
||||
// Port number of the gRPC service.
|
||||
// Note: Number must be in the range 1 to 65535.
|
||||
Port int32
|
||||
|
||||
// Service is the name of the service to place in the gRPC HealthCheckRequest
|
||||
// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
|
||||
//
|
||||
// If this is not specified, the default behavior is to probe the server's overall health status.
|
||||
// +optional
|
||||
Service *string
|
||||
}
|
||||
|
||||
// Lifecycle describes actions that the management system should take in response to container lifecycle
|
||||
@ -2213,19 +2284,20 @@ type Handler struct {
|
||||
type Lifecycle struct {
|
||||
// PostStart is called immediately after a container is created. If the handler fails, the container
|
||||
// is terminated and restarted.
|
||||
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
|
||||
// +optional
|
||||
PostStart *Handler
|
||||
PostStart *LifecycleHandler
|
||||
// PreStop is called immediately before a container is terminated due to an
|
||||
// API request or management event such as liveness/startup probe failure,
|
||||
// preemption, resource contention, etc. The handler is not called if the
|
||||
// container crashes or exits. The reason for termination is passed to the
|
||||
// handler. The Pod's termination grace period countdown begins before the
|
||||
// PreStop hooked is executed. Regardless of the outcome of the handler, the
|
||||
// container crashes or exits. The Pod's termination grace period countdown begins before the
|
||||
// PreStop hook is executed. Regardless of the outcome of the handler, the
|
||||
// container will eventually terminate within the Pod's termination grace
|
||||
// period. Other management of the container blocks until the hook completes
|
||||
// period (unless delayed by finalizers). Other management of the container blocks until the hook completes
|
||||
// or until the termination grace period is reached.
|
||||
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
|
||||
// +optional
|
||||
PreStop *Handler
|
||||
PreStop *LifecycleHandler
|
||||
}
|
||||
|
||||
// The below types are used by kube_client and api_server.
|
||||
@ -2749,7 +2821,7 @@ type PodSpec struct {
|
||||
// pod to perform user-initiated actions such as debugging. This list cannot be specified when
|
||||
// creating a pod, and it cannot be modified by updating the pod spec. In order to add an
|
||||
// ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.
|
||||
// This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.
|
||||
// This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.
|
||||
// +optional
|
||||
EphemeralContainers []EphemeralContainer
|
||||
// +optional
|
||||
@ -2887,6 +2959,54 @@ type PodSpec struct {
|
||||
// All topologySpreadConstraints are ANDed.
|
||||
// +optional
|
||||
TopologySpreadConstraints []TopologySpreadConstraint
|
||||
// Specifies the OS of the containers in the pod.
|
||||
// Some pod and container fields are restricted if this is set.
|
||||
//
|
||||
// If the OS field is set to linux, the following fields must be unset:
|
||||
// - securityContext.windowsOptions
|
||||
//
|
||||
// If the OS field is set to windows, following fields must be unset:
|
||||
// - spec.hostPID
|
||||
// - spec.hostIPC
|
||||
// - spec.securityContext.seLinuxOptions
|
||||
// - spec.securityContext.seccompProfile
|
||||
// - spec.securityContext.fsGroup
|
||||
// - spec.securityContext.fsGroupChangePolicy
|
||||
// - spec.securityContext.sysctls
|
||||
// - spec.shareProcessNamespace
|
||||
// - spec.securityContext.runAsUser
|
||||
// - spec.securityContext.runAsGroup
|
||||
// - spec.securityContext.supplementalGroups
|
||||
// - spec.containers[*].securityContext.seLinuxOptions
|
||||
// - spec.containers[*].securityContext.seccompProfile
|
||||
// - spec.containers[*].securityContext.capabilities
|
||||
// - spec.containers[*].securityContext.readOnlyRootFilesystem
|
||||
// - spec.containers[*].securityContext.privileged
|
||||
// - spec.containers[*].securityContext.allowPrivilegeEscalation
|
||||
// - spec.containers[*].securityContext.procMount
|
||||
// - spec.containers[*].securityContext.runAsUser
|
||||
// - spec.containers[*].securityContext.runAsGroup
|
||||
// +optional
|
||||
// This is an alpha field and requires the IdentifyPodOS feature
|
||||
OS *PodOS
|
||||
}
|
||||
|
||||
// OSName is the set of OS'es that can be used in OS.
|
||||
type OSName string
|
||||
|
||||
// These are valid values for OSName
|
||||
const (
|
||||
Linux OSName = "linux"
|
||||
Windows OSName = "windows"
|
||||
)
|
||||
|
||||
// PodOS defines the OS parameters of a pod.
|
||||
type PodOS struct {
|
||||
// Name is the name of the operating system. The currently supported values are linux and windows.
|
||||
// Additional value may be defined in future and can be one of:
|
||||
// https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration
|
||||
// Clients should expect to handle additional values and treat unrecognized values in this field as os: null
|
||||
Name OSName
|
||||
}
|
||||
|
||||
// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
|
||||
@ -2932,11 +3052,13 @@ type PodSecurityContext struct {
|
||||
HostNetwork bool
|
||||
// Use the host's pid namespace.
|
||||
// Optional: Default to false.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +k8s:conversion-gen=false
|
||||
// +optional
|
||||
HostPID bool
|
||||
// Use the host's ipc namespace.
|
||||
// Optional: Default to false.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +k8s:conversion-gen=false
|
||||
// +optional
|
||||
HostIPC bool
|
||||
@ -2944,6 +3066,7 @@ type PodSecurityContext struct {
|
||||
// When this is set containers will be able to view and signal processes from other containers
|
||||
// in the same pod, and the first process in each container will not be assigned PID 1.
|
||||
// HostPID and ShareProcessNamespace cannot both be set.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// Optional: Default to false.
|
||||
// +k8s:conversion-gen=false
|
||||
// +optional
|
||||
@ -2953,11 +3076,13 @@ type PodSecurityContext struct {
|
||||
// container. May also be set in SecurityContext. If set in
|
||||
// both SecurityContext and PodSecurityContext, the value specified in SecurityContext
|
||||
// takes precedence for that container.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
SELinuxOptions *SELinuxOptions
|
||||
// The Windows specific settings applied to all containers.
|
||||
// If unspecified, the options within a container's SecurityContext will be used.
|
||||
// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
// Note that this field cannot be set when spec.os.name is linux.
|
||||
// +optional
|
||||
WindowsOptions *WindowsSecurityContextOptions
|
||||
// The UID to run the entrypoint of the container process.
|
||||
@ -2965,6 +3090,7 @@ type PodSecurityContext struct {
|
||||
// May also be set in SecurityContext. If set in both SecurityContext and
|
||||
// PodSecurityContext, the value specified in SecurityContext takes precedence
|
||||
// for that container.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
RunAsUser *int64
|
||||
// The GID to run the entrypoint of the container process.
|
||||
@ -2972,6 +3098,7 @@ type PodSecurityContext struct {
|
||||
// May also be set in SecurityContext. If set in both SecurityContext and
|
||||
// PodSecurityContext, the value specified in SecurityContext takes precedence
|
||||
// for that container.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
RunAsGroup *int64
|
||||
// Indicates that the container must run as a non-root user.
|
||||
@ -2986,6 +3113,7 @@ type PodSecurityContext struct {
|
||||
// A list of groups applied to the first process run in each container, in addition
|
||||
// to the container's primary GID. If unspecified, no groups will be added to
|
||||
// any container.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
SupplementalGroups []int64
|
||||
// A special supplemental group that applies to all containers in a pod.
|
||||
@ -2997,6 +3125,7 @@ type PodSecurityContext struct {
|
||||
// 3. The permission bits are OR'd with rw-rw----
|
||||
//
|
||||
// If unset, the Kubelet will not modify the ownership and permissions of any volume.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
FSGroup *int64
|
||||
// fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
|
||||
@ -3005,13 +3134,16 @@ type PodSecurityContext struct {
|
||||
// It will have no effect on ephemeral volume types such as: secret, configmaps
|
||||
// and emptydir.
|
||||
// Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
FSGroupChangePolicy *PodFSGroupChangePolicy
|
||||
// Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
|
||||
// sysctls (by the container runtime) might fail to launch.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
Sysctls []Sysctl
|
||||
// The seccomp options to use by the containers in this pod.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
SeccompProfile *SeccompProfile
|
||||
}
|
||||
@ -3137,6 +3269,7 @@ type EphemeralContainerCommon struct {
|
||||
// already allocated to the pod.
|
||||
// +optional
|
||||
Resources ResourceRequirements
|
||||
// Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
|
||||
// +optional
|
||||
VolumeMounts []VolumeMount
|
||||
// volumeDevices is the list of block devices to be used by the container.
|
||||
@ -3180,15 +3313,16 @@ type EphemeralContainerCommon struct {
|
||||
// these two types.
|
||||
var _ = Container(EphemeralContainerCommon{})
|
||||
|
||||
// An EphemeralContainer is a temporary container that may be added to an existing pod for
|
||||
// An EphemeralContainer is a temporary container that you may add to an existing Pod for
|
||||
// user-initiated activities such as debugging. Ephemeral containers have no resource or
|
||||
// scheduling guarantees, and they will not be restarted when they exit or when a pod is
|
||||
// removed or restarted. If an ephemeral container causes a pod to exceed its resource
|
||||
// allocation, the pod may be evicted.
|
||||
// Ephemeral containers may not be added by directly updating the pod spec. They must be added
|
||||
// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec
|
||||
// once added.
|
||||
// This is an alpha feature enabled by the EphemeralContainers feature flag.
|
||||
// scheduling guarantees, and they will not be restarted when they exit or when a Pod is
|
||||
// removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the
|
||||
// Pod to exceed its resource allocation.
|
||||
//
|
||||
// To add an ephemeral container, use the ephemeralcontainers subresource of an existing
|
||||
// Pod. Ephemeral containers may not be removed or restarted.
|
||||
//
|
||||
// This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate.
|
||||
type EphemeralContainer struct {
|
||||
// Ephemeral containers have all of the fields of Container, plus additional fields
|
||||
// specific to ephemeral containers. Fields in common with Container are in the
|
||||
@ -3198,8 +3332,10 @@ type EphemeralContainer struct {
|
||||
|
||||
// If set, the name of the container from PodSpec that this ephemeral container targets.
|
||||
// The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.
|
||||
// If not set then the ephemeral container is run in whatever namespaces are shared
|
||||
// for the pod. Note that the container runtime must support this feature.
|
||||
// If not set then the ephemeral container uses the namespaces configured in the Pod spec.
|
||||
//
|
||||
// The container runtime must implement support for this feature. If the runtime does not
|
||||
// support namespace targeting then the result of setting this field is undefined.
|
||||
// +optional
|
||||
TargetContainerName string
|
||||
}
|
||||
@ -3253,7 +3389,7 @@ type PodStatus struct {
|
||||
ContainerStatuses []ContainerStatus
|
||||
|
||||
// Status for any ephemeral containers that have run in this pod.
|
||||
// This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.
|
||||
// This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.
|
||||
// +optional
|
||||
EphemeralContainerStatuses []ContainerStatus
|
||||
}
|
||||
@ -5044,6 +5180,7 @@ type Secret struct {
|
||||
Data map[string][]byte `datapolicy:"password,security-key,token"`
|
||||
|
||||
// Used to facilitate programmatic handling of secret data.
|
||||
// More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types
|
||||
// +optional
|
||||
Type SecretType
|
||||
}
|
||||
@ -5284,34 +5421,40 @@ type ComponentStatusList struct {
|
||||
type SecurityContext struct {
|
||||
// The capabilities to add/drop when running containers.
|
||||
// Defaults to the default set of capabilities granted by the container runtime.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
Capabilities *Capabilities
|
||||
// Run container in privileged mode.
|
||||
// Processes in privileged containers are essentially equivalent to root on the host.
|
||||
// Defaults to false.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
Privileged *bool
|
||||
// The SELinux context to be applied to the container.
|
||||
// If unspecified, the container runtime will allocate a random SELinux context for each
|
||||
// container. May also be set in PodSecurityContext. If set in both SecurityContext and
|
||||
// PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
SELinuxOptions *SELinuxOptions
|
||||
// The Windows specific settings applied to all containers.
|
||||
// If unspecified, the options from the PodSecurityContext will be used.
|
||||
// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
// Note that this field cannot be set when spec.os.name is linux.
|
||||
// +optional
|
||||
WindowsOptions *WindowsSecurityContextOptions
|
||||
// The UID to run the entrypoint of the container process.
|
||||
// Defaults to user specified in image metadata if unspecified.
|
||||
// May also be set in PodSecurityContext. If set in both SecurityContext and
|
||||
// PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
RunAsUser *int64
|
||||
// The GID to run the entrypoint of the container process.
|
||||
// Uses runtime default if unset.
|
||||
// May also be set in PodSecurityContext. If set in both SecurityContext and
|
||||
// PodSecurityContext, the value specified in SecurityContext takes precedence.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
RunAsGroup *int64
|
||||
// Indicates that the container must run as a non-root user.
|
||||
@ -5324,21 +5467,25 @@ type SecurityContext struct {
|
||||
RunAsNonRoot *bool
|
||||
// The read-only root filesystem allows you to restrict the locations that an application can write
|
||||
// files to, ensuring the persistent data can only be written to mounts.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
ReadOnlyRootFilesystem *bool
|
||||
// AllowPrivilegeEscalation controls whether a process can gain more
|
||||
// privileges than its parent process. This bool directly controls if
|
||||
// the no_new_privs flag will be set on the container process.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
AllowPrivilegeEscalation *bool
|
||||
// ProcMount denotes the type of proc mount to use for the containers.
|
||||
// The default is DefaultProcMount which uses the container runtime defaults for
|
||||
// readonly paths and masked paths.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
ProcMount *ProcMountType
|
||||
// The seccomp options to use by this container. If seccomp options are
|
||||
// provided at both the pod & container level, the container options
|
||||
// override the pod options.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
SeccompProfile *SeccompProfile
|
||||
}
|
||||
@ -5484,7 +5631,7 @@ type TopologySpreadConstraint struct {
|
||||
// but giving higher precedence to topologies that would help reduce the
|
||||
// skew.
|
||||
// A constraint is considered "Unsatisfiable" for an incoming pod
|
||||
// if and only if every possible node assigment for that pod would violate
|
||||
// if and only if every possible node assignment for that pod would violate
|
||||
// "MaxSkew" on some topology.
|
||||
// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
|
||||
// labelSelector spread as 3/1/1:
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go
generated
vendored
@ -17,7 +17,7 @@ limitations under the License.
|
||||
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/core
|
||||
// +k8s:conversion-gen-external-types=k8s.io/api/core/v1
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/core/v1
|
||||
// +k8s:defaulter-gen-input=k8s.io/api/core/v1
|
||||
|
||||
// Package v1 is the v1 version of the API.
|
||||
package v1 // import "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
|
185
vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go
generated
vendored
185
vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
@ -641,6 +642,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.GRPCAction)(nil), (*core.GRPCAction)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_GRPCAction_To_core_GRPCAction(a.(*v1.GRPCAction), b.(*core.GRPCAction), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.GRPCAction)(nil), (*v1.GRPCAction)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_GRPCAction_To_v1_GRPCAction(a.(*core.GRPCAction), b.(*v1.GRPCAction), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.GitRepoVolumeSource)(nil), (*core.GitRepoVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(a.(*v1.GitRepoVolumeSource), b.(*core.GitRepoVolumeSource), scope)
|
||||
}); err != nil {
|
||||
@ -691,16 +702,6 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.Handler)(nil), (*core.Handler)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_Handler_To_core_Handler(a.(*v1.Handler), b.(*core.Handler), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.Handler)(nil), (*v1.Handler)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_Handler_To_v1_Handler(a.(*core.Handler), b.(*v1.Handler), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.HostAlias)(nil), (*core.HostAlias)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_HostAlias_To_core_HostAlias(a.(*v1.HostAlias), b.(*core.HostAlias), scope)
|
||||
}); err != nil {
|
||||
@ -761,6 +762,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.LifecycleHandler)(nil), (*core.LifecycleHandler)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_LifecycleHandler_To_core_LifecycleHandler(a.(*v1.LifecycleHandler), b.(*core.LifecycleHandler), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.LifecycleHandler)(nil), (*v1.LifecycleHandler)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_LifecycleHandler_To_v1_LifecycleHandler(a.(*core.LifecycleHandler), b.(*v1.LifecycleHandler), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.LimitRange)(nil), (*core.LimitRange)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_LimitRange_To_core_LimitRange(a.(*v1.LimitRange), b.(*core.LimitRange), scope)
|
||||
}); err != nil {
|
||||
@ -1301,6 +1312,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.PodOS)(nil), (*core.PodOS)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_PodOS_To_core_PodOS(a.(*v1.PodOS), b.(*core.PodOS), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.PodOS)(nil), (*v1.PodOS)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_PodOS_To_v1_PodOS(a.(*core.PodOS), b.(*v1.PodOS), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.PodPortForwardOptions)(nil), (*core.PodPortForwardOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(a.(*v1.PodPortForwardOptions), b.(*core.PodPortForwardOptions), scope)
|
||||
}); err != nil {
|
||||
@ -1441,6 +1462,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.ProbeHandler)(nil), (*core.ProbeHandler)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_ProbeHandler_To_core_ProbeHandler(a.(*v1.ProbeHandler), b.(*core.ProbeHandler), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.ProbeHandler)(nil), (*v1.ProbeHandler)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_ProbeHandler_To_v1_ProbeHandler(a.(*core.ProbeHandler), b.(*v1.ProbeHandler), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.ProjectedVolumeSource)(nil), (*core.ProjectedVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(a.(*v1.ProjectedVolumeSource), b.(*core.ProjectedVolumeSource), scope)
|
||||
}); err != nil {
|
||||
@ -3806,6 +3837,28 @@ func Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSou
|
||||
return autoConvert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_GRPCAction_To_core_GRPCAction(in *v1.GRPCAction, out *core.GRPCAction, s conversion.Scope) error {
|
||||
out.Port = in.Port
|
||||
out.Service = (*string)(unsafe.Pointer(in.Service))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_GRPCAction_To_core_GRPCAction is an autogenerated conversion function.
|
||||
func Convert_v1_GRPCAction_To_core_GRPCAction(in *v1.GRPCAction, out *core.GRPCAction, s conversion.Scope) error {
|
||||
return autoConvert_v1_GRPCAction_To_core_GRPCAction(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_GRPCAction_To_v1_GRPCAction(in *core.GRPCAction, out *v1.GRPCAction, s conversion.Scope) error {
|
||||
out.Port = in.Port
|
||||
out.Service = (*string)(unsafe.Pointer(in.Service))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_GRPCAction_To_v1_GRPCAction is an autogenerated conversion function.
|
||||
func Convert_core_GRPCAction_To_v1_GRPCAction(in *core.GRPCAction, out *v1.GRPCAction, s conversion.Scope) error {
|
||||
return autoConvert_core_GRPCAction_To_v1_GRPCAction(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *core.GitRepoVolumeSource, s conversion.Scope) error {
|
||||
out.Repository = in.Repository
|
||||
out.Revision = in.Revision
|
||||
@ -3930,30 +3983,6 @@ func Convert_core_HTTPHeader_To_v1_HTTPHeader(in *core.HTTPHeader, out *v1.HTTPH
|
||||
return autoConvert_core_HTTPHeader_To_v1_HTTPHeader(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_Handler_To_core_Handler(in *v1.Handler, out *core.Handler, s conversion.Scope) error {
|
||||
out.Exec = (*core.ExecAction)(unsafe.Pointer(in.Exec))
|
||||
out.HTTPGet = (*core.HTTPGetAction)(unsafe.Pointer(in.HTTPGet))
|
||||
out.TCPSocket = (*core.TCPSocketAction)(unsafe.Pointer(in.TCPSocket))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_Handler_To_core_Handler is an autogenerated conversion function.
|
||||
func Convert_v1_Handler_To_core_Handler(in *v1.Handler, out *core.Handler, s conversion.Scope) error {
|
||||
return autoConvert_v1_Handler_To_core_Handler(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_Handler_To_v1_Handler(in *core.Handler, out *v1.Handler, s conversion.Scope) error {
|
||||
out.Exec = (*v1.ExecAction)(unsafe.Pointer(in.Exec))
|
||||
out.HTTPGet = (*v1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet))
|
||||
out.TCPSocket = (*v1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_Handler_To_v1_Handler is an autogenerated conversion function.
|
||||
func Convert_core_Handler_To_v1_Handler(in *core.Handler, out *v1.Handler, s conversion.Scope) error {
|
||||
return autoConvert_core_Handler_To_v1_Handler(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_HostAlias_To_core_HostAlias(in *v1.HostAlias, out *core.HostAlias, s conversion.Scope) error {
|
||||
out.IP = in.IP
|
||||
out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames))
|
||||
@ -4103,8 +4132,8 @@ func Convert_core_KeyToPath_To_v1_KeyToPath(in *core.KeyToPath, out *v1.KeyToPat
|
||||
}
|
||||
|
||||
func autoConvert_v1_Lifecycle_To_core_Lifecycle(in *v1.Lifecycle, out *core.Lifecycle, s conversion.Scope) error {
|
||||
out.PostStart = (*core.Handler)(unsafe.Pointer(in.PostStart))
|
||||
out.PreStop = (*core.Handler)(unsafe.Pointer(in.PreStop))
|
||||
out.PostStart = (*core.LifecycleHandler)(unsafe.Pointer(in.PostStart))
|
||||
out.PreStop = (*core.LifecycleHandler)(unsafe.Pointer(in.PreStop))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -4114,8 +4143,8 @@ func Convert_v1_Lifecycle_To_core_Lifecycle(in *v1.Lifecycle, out *core.Lifecycl
|
||||
}
|
||||
|
||||
func autoConvert_core_Lifecycle_To_v1_Lifecycle(in *core.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error {
|
||||
out.PostStart = (*v1.Handler)(unsafe.Pointer(in.PostStart))
|
||||
out.PreStop = (*v1.Handler)(unsafe.Pointer(in.PreStop))
|
||||
out.PostStart = (*v1.LifecycleHandler)(unsafe.Pointer(in.PostStart))
|
||||
out.PreStop = (*v1.LifecycleHandler)(unsafe.Pointer(in.PreStop))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -4124,6 +4153,30 @@ func Convert_core_Lifecycle_To_v1_Lifecycle(in *core.Lifecycle, out *v1.Lifecycl
|
||||
return autoConvert_core_Lifecycle_To_v1_Lifecycle(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_LifecycleHandler_To_core_LifecycleHandler(in *v1.LifecycleHandler, out *core.LifecycleHandler, s conversion.Scope) error {
|
||||
out.Exec = (*core.ExecAction)(unsafe.Pointer(in.Exec))
|
||||
out.HTTPGet = (*core.HTTPGetAction)(unsafe.Pointer(in.HTTPGet))
|
||||
out.TCPSocket = (*core.TCPSocketAction)(unsafe.Pointer(in.TCPSocket))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_LifecycleHandler_To_core_LifecycleHandler is an autogenerated conversion function.
|
||||
func Convert_v1_LifecycleHandler_To_core_LifecycleHandler(in *v1.LifecycleHandler, out *core.LifecycleHandler, s conversion.Scope) error {
|
||||
return autoConvert_v1_LifecycleHandler_To_core_LifecycleHandler(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_LifecycleHandler_To_v1_LifecycleHandler(in *core.LifecycleHandler, out *v1.LifecycleHandler, s conversion.Scope) error {
|
||||
out.Exec = (*v1.ExecAction)(unsafe.Pointer(in.Exec))
|
||||
out.HTTPGet = (*v1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet))
|
||||
out.TCPSocket = (*v1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_LifecycleHandler_To_v1_LifecycleHandler is an autogenerated conversion function.
|
||||
func Convert_core_LifecycleHandler_To_v1_LifecycleHandler(in *core.LifecycleHandler, out *v1.LifecycleHandler, s conversion.Scope) error {
|
||||
return autoConvert_core_LifecycleHandler_To_v1_LifecycleHandler(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_LimitRange_To_core_LimitRange(in *v1.LimitRange, out *core.LimitRange, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
@ -5150,6 +5203,8 @@ func autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimSta
|
||||
out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes))
|
||||
out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity))
|
||||
out.Conditions = *(*[]core.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions))
|
||||
out.AllocatedResources = *(*core.ResourceList)(unsafe.Pointer(&in.AllocatedResources))
|
||||
out.ResizeStatus = (*core.PersistentVolumeClaimResizeStatus)(unsafe.Pointer(in.ResizeStatus))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -5163,6 +5218,8 @@ func autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimSta
|
||||
out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes))
|
||||
out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity))
|
||||
out.Conditions = *(*[]v1.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions))
|
||||
out.AllocatedResources = *(*v1.ResourceList)(unsafe.Pointer(&in.AllocatedResources))
|
||||
out.ResizeStatus = (*v1.PersistentVolumeClaimResizeStatus)(unsafe.Pointer(in.ResizeStatus))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -5894,6 +5951,26 @@ func Convert_url_Values_To_v1_PodLogOptions(in *url.Values, out *v1.PodLogOption
|
||||
return autoConvert_url_Values_To_v1_PodLogOptions(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_PodOS_To_core_PodOS(in *v1.PodOS, out *core.PodOS, s conversion.Scope) error {
|
||||
out.Name = core.OSName(in.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_PodOS_To_core_PodOS is an autogenerated conversion function.
|
||||
func Convert_v1_PodOS_To_core_PodOS(in *v1.PodOS, out *core.PodOS, s conversion.Scope) error {
|
||||
return autoConvert_v1_PodOS_To_core_PodOS(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_PodOS_To_v1_PodOS(in *core.PodOS, out *v1.PodOS, s conversion.Scope) error {
|
||||
out.Name = v1.OSName(in.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_PodOS_To_v1_PodOS is an autogenerated conversion function.
|
||||
func Convert_core_PodOS_To_v1_PodOS(in *core.PodOS, out *v1.PodOS, s conversion.Scope) error {
|
||||
return autoConvert_core_PodOS_To_v1_PodOS(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *core.PodPortForwardOptions, s conversion.Scope) error {
|
||||
out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports))
|
||||
return nil
|
||||
@ -6106,6 +6183,7 @@ func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s
|
||||
out.Overhead = *(*core.ResourceList)(unsafe.Pointer(&in.Overhead))
|
||||
out.TopologySpreadConstraints = *(*[]core.TopologySpreadConstraint)(unsafe.Pointer(&in.TopologySpreadConstraints))
|
||||
out.SetHostnameAsFQDN = (*bool)(unsafe.Pointer(in.SetHostnameAsFQDN))
|
||||
out.OS = (*core.PodOS)(unsafe.Pointer(in.OS))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -6158,6 +6236,7 @@ func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s
|
||||
out.Overhead = *(*v1.ResourceList)(unsafe.Pointer(&in.Overhead))
|
||||
out.EnableServiceLinks = (*bool)(unsafe.Pointer(in.EnableServiceLinks))
|
||||
out.TopologySpreadConstraints = *(*[]v1.TopologySpreadConstraint)(unsafe.Pointer(&in.TopologySpreadConstraints))
|
||||
out.OS = (*v1.PodOS)(unsafe.Pointer(in.OS))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -6429,7 +6508,7 @@ func Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *core
|
||||
}
|
||||
|
||||
func autoConvert_v1_Probe_To_core_Probe(in *v1.Probe, out *core.Probe, s conversion.Scope) error {
|
||||
if err := Convert_v1_Handler_To_core_Handler(&in.Handler, &out.Handler, s); err != nil {
|
||||
if err := Convert_v1_ProbeHandler_To_core_ProbeHandler(&in.ProbeHandler, &out.ProbeHandler, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.InitialDelaySeconds = in.InitialDelaySeconds
|
||||
@ -6447,7 +6526,7 @@ func Convert_v1_Probe_To_core_Probe(in *v1.Probe, out *core.Probe, s conversion.
|
||||
}
|
||||
|
||||
func autoConvert_core_Probe_To_v1_Probe(in *core.Probe, out *v1.Probe, s conversion.Scope) error {
|
||||
if err := Convert_core_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil {
|
||||
if err := Convert_core_ProbeHandler_To_v1_ProbeHandler(&in.ProbeHandler, &out.ProbeHandler, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.InitialDelaySeconds = in.InitialDelaySeconds
|
||||
@ -6464,6 +6543,32 @@ func Convert_core_Probe_To_v1_Probe(in *core.Probe, out *v1.Probe, s conversion.
|
||||
return autoConvert_core_Probe_To_v1_Probe(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_ProbeHandler_To_core_ProbeHandler(in *v1.ProbeHandler, out *core.ProbeHandler, s conversion.Scope) error {
|
||||
out.Exec = (*core.ExecAction)(unsafe.Pointer(in.Exec))
|
||||
out.HTTPGet = (*core.HTTPGetAction)(unsafe.Pointer(in.HTTPGet))
|
||||
out.TCPSocket = (*core.TCPSocketAction)(unsafe.Pointer(in.TCPSocket))
|
||||
out.GRPC = (*core.GRPCAction)(unsafe.Pointer(in.GRPC))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_ProbeHandler_To_core_ProbeHandler is an autogenerated conversion function.
|
||||
func Convert_v1_ProbeHandler_To_core_ProbeHandler(in *v1.ProbeHandler, out *core.ProbeHandler, s conversion.Scope) error {
|
||||
return autoConvert_v1_ProbeHandler_To_core_ProbeHandler(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_ProbeHandler_To_v1_ProbeHandler(in *core.ProbeHandler, out *v1.ProbeHandler, s conversion.Scope) error {
|
||||
out.Exec = (*v1.ExecAction)(unsafe.Pointer(in.Exec))
|
||||
out.HTTPGet = (*v1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet))
|
||||
out.TCPSocket = (*v1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket))
|
||||
out.GRPC = (*v1.GRPCAction)(unsafe.Pointer(in.GRPC))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_ProbeHandler_To_v1_ProbeHandler is an autogenerated conversion function.
|
||||
func Convert_core_ProbeHandler_To_v1_ProbeHandler(in *core.ProbeHandler, out *v1.ProbeHandler, s conversion.Scope) error {
|
||||
return autoConvert_core_ProbeHandler_To_v1_ProbeHandler(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *core.ProjectedVolumeSource, s conversion.Scope) error {
|
||||
if in.Sources != nil {
|
||||
in, out := &in.Sources, &out.Sources
|
||||
|
272
vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go
generated
vendored
272
vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
@ -154,6 +155,7 @@ func SetObjectDefaults_PersistentVolumeClaim(in *v1.PersistentVolumeClaim) {
|
||||
SetDefaults_ResourceList(&in.Spec.Resources.Limits)
|
||||
SetDefaults_ResourceList(&in.Spec.Resources.Requests)
|
||||
SetDefaults_ResourceList(&in.Status.Capacity)
|
||||
SetDefaults_ResourceList(&in.Status.AllocatedResources)
|
||||
}
|
||||
|
||||
func SetObjectDefaults_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList) {
|
||||
@ -252,20 +254,38 @@ func SetObjectDefaults_Pod(in *v1.Pod) {
|
||||
SetDefaults_ResourceList(&a.Resources.Requests)
|
||||
if a.LivenessProbe != nil {
|
||||
SetDefaults_Probe(a.LivenessProbe)
|
||||
if a.LivenessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
|
||||
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.LivenessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.ReadinessProbe != nil {
|
||||
SetDefaults_Probe(a.ReadinessProbe)
|
||||
if a.ReadinessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
|
||||
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.StartupProbe != nil {
|
||||
SetDefaults_Probe(a.StartupProbe)
|
||||
if a.StartupProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.StartupProbe.Handler.HTTPGet)
|
||||
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.StartupProbe.ProbeHandler.GRPC != nil {
|
||||
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.Lifecycle != nil {
|
||||
@ -302,20 +322,38 @@ func SetObjectDefaults_Pod(in *v1.Pod) {
|
||||
SetDefaults_ResourceList(&a.Resources.Requests)
|
||||
if a.LivenessProbe != nil {
|
||||
SetDefaults_Probe(a.LivenessProbe)
|
||||
if a.LivenessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
|
||||
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.LivenessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.ReadinessProbe != nil {
|
||||
SetDefaults_Probe(a.ReadinessProbe)
|
||||
if a.ReadinessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
|
||||
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.StartupProbe != nil {
|
||||
SetDefaults_Probe(a.StartupProbe)
|
||||
if a.StartupProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.StartupProbe.Handler.HTTPGet)
|
||||
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.StartupProbe.ProbeHandler.GRPC != nil {
|
||||
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.Lifecycle != nil {
|
||||
@ -352,20 +390,38 @@ func SetObjectDefaults_Pod(in *v1.Pod) {
|
||||
SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
|
||||
if a.EphemeralContainerCommon.LivenessProbe != nil {
|
||||
SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
|
||||
if a.EphemeralContainerCommon.LivenessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.Handler.HTTPGet)
|
||||
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.EphemeralContainerCommon.ReadinessProbe != nil {
|
||||
SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
|
||||
if a.EphemeralContainerCommon.ReadinessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.Handler.HTTPGet)
|
||||
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.EphemeralContainerCommon.StartupProbe != nil {
|
||||
SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
|
||||
if a.EphemeralContainerCommon.StartupProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.Handler.HTTPGet)
|
||||
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
|
||||
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.EphemeralContainerCommon.Lifecycle != nil {
|
||||
@ -472,20 +528,38 @@ func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) {
|
||||
SetDefaults_ResourceList(&a.Resources.Requests)
|
||||
if a.LivenessProbe != nil {
|
||||
SetDefaults_Probe(a.LivenessProbe)
|
||||
if a.LivenessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
|
||||
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.LivenessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.ReadinessProbe != nil {
|
||||
SetDefaults_Probe(a.ReadinessProbe)
|
||||
if a.ReadinessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
|
||||
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.StartupProbe != nil {
|
||||
SetDefaults_Probe(a.StartupProbe)
|
||||
if a.StartupProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.StartupProbe.Handler.HTTPGet)
|
||||
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.StartupProbe.ProbeHandler.GRPC != nil {
|
||||
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.Lifecycle != nil {
|
||||
@ -522,20 +596,38 @@ func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) {
|
||||
SetDefaults_ResourceList(&a.Resources.Requests)
|
||||
if a.LivenessProbe != nil {
|
||||
SetDefaults_Probe(a.LivenessProbe)
|
||||
if a.LivenessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
|
||||
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.LivenessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.ReadinessProbe != nil {
|
||||
SetDefaults_Probe(a.ReadinessProbe)
|
||||
if a.ReadinessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
|
||||
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.StartupProbe != nil {
|
||||
SetDefaults_Probe(a.StartupProbe)
|
||||
if a.StartupProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.StartupProbe.Handler.HTTPGet)
|
||||
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.StartupProbe.ProbeHandler.GRPC != nil {
|
||||
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.Lifecycle != nil {
|
||||
@ -572,20 +664,38 @@ func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) {
|
||||
SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
|
||||
if a.EphemeralContainerCommon.LivenessProbe != nil {
|
||||
SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
|
||||
if a.EphemeralContainerCommon.LivenessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.Handler.HTTPGet)
|
||||
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.EphemeralContainerCommon.ReadinessProbe != nil {
|
||||
SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
|
||||
if a.EphemeralContainerCommon.ReadinessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.Handler.HTTPGet)
|
||||
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.EphemeralContainerCommon.StartupProbe != nil {
|
||||
SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
|
||||
if a.EphemeralContainerCommon.StartupProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.Handler.HTTPGet)
|
||||
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
|
||||
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.EphemeralContainerCommon.Lifecycle != nil {
|
||||
@ -694,20 +804,38 @@ func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) {
|
||||
SetDefaults_ResourceList(&a.Resources.Requests)
|
||||
if a.LivenessProbe != nil {
|
||||
SetDefaults_Probe(a.LivenessProbe)
|
||||
if a.LivenessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
|
||||
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.LivenessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.ReadinessProbe != nil {
|
||||
SetDefaults_Probe(a.ReadinessProbe)
|
||||
if a.ReadinessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
|
||||
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.StartupProbe != nil {
|
||||
SetDefaults_Probe(a.StartupProbe)
|
||||
if a.StartupProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.StartupProbe.Handler.HTTPGet)
|
||||
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.StartupProbe.ProbeHandler.GRPC != nil {
|
||||
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.Lifecycle != nil {
|
||||
@ -744,20 +872,38 @@ func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) {
|
||||
SetDefaults_ResourceList(&a.Resources.Requests)
|
||||
if a.LivenessProbe != nil {
|
||||
SetDefaults_Probe(a.LivenessProbe)
|
||||
if a.LivenessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet)
|
||||
if a.LivenessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.LivenessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.LivenessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.LivenessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.ReadinessProbe != nil {
|
||||
SetDefaults_Probe(a.ReadinessProbe)
|
||||
if a.ReadinessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet)
|
||||
if a.ReadinessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.ReadinessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.ReadinessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.StartupProbe != nil {
|
||||
SetDefaults_Probe(a.StartupProbe)
|
||||
if a.StartupProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.StartupProbe.Handler.HTTPGet)
|
||||
if a.StartupProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.StartupProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.StartupProbe.ProbeHandler.GRPC != nil {
|
||||
if a.StartupProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.Lifecycle != nil {
|
||||
@ -794,20 +940,38 @@ func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) {
|
||||
SetDefaults_ResourceList(&a.EphemeralContainerCommon.Resources.Requests)
|
||||
if a.EphemeralContainerCommon.LivenessProbe != nil {
|
||||
SetDefaults_Probe(a.EphemeralContainerCommon.LivenessProbe)
|
||||
if a.EphemeralContainerCommon.LivenessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.Handler.HTTPGet)
|
||||
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.EphemeralContainerCommon.LivenessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.EphemeralContainerCommon.ReadinessProbe != nil {
|
||||
SetDefaults_Probe(a.EphemeralContainerCommon.ReadinessProbe)
|
||||
if a.EphemeralContainerCommon.ReadinessProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.Handler.HTTPGet)
|
||||
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC != nil {
|
||||
if a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.EphemeralContainerCommon.ReadinessProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.EphemeralContainerCommon.StartupProbe != nil {
|
||||
SetDefaults_Probe(a.EphemeralContainerCommon.StartupProbe)
|
||||
if a.EphemeralContainerCommon.StartupProbe.Handler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.Handler.HTTPGet)
|
||||
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet != nil {
|
||||
SetDefaults_HTTPGetAction(a.EphemeralContainerCommon.StartupProbe.ProbeHandler.HTTPGet)
|
||||
}
|
||||
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC != nil {
|
||||
if a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service == nil {
|
||||
var ptrVar1 string = ""
|
||||
a.EphemeralContainerCommon.StartupProbe.ProbeHandler.GRPC.Service = &ptrVar1
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.EphemeralContainerCommon.Lifecycle != nil {
|
||||
|
446
vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
generated
vendored
446
vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
generated
vendored
@ -45,6 +45,7 @@ import (
|
||||
schedulinghelper "k8s.io/component-helpers/scheduling/corev1"
|
||||
apiservice "k8s.io/kubernetes/pkg/api/service"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
podshelper "k8s.io/kubernetes/pkg/apis/core/pods"
|
||||
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
@ -78,9 +79,16 @@ var allowedEphemeralContainerFields = map[string]bool{
|
||||
"Command": true,
|
||||
"Args": true,
|
||||
"WorkingDir": true,
|
||||
"Ports": false,
|
||||
"EnvFrom": true,
|
||||
"Env": true,
|
||||
"Resources": false,
|
||||
"VolumeMounts": true,
|
||||
"VolumeDevices": true,
|
||||
"LivenessProbe": false,
|
||||
"ReadinessProbe": false,
|
||||
"StartupProbe": false,
|
||||
"Lifecycle": false,
|
||||
"TerminationMessagePath": true,
|
||||
"TerminationMessagePolicy": true,
|
||||
"ImagePullPolicy": true,
|
||||
@ -90,6 +98,12 @@ var allowedEphemeralContainerFields = map[string]bool{
|
||||
"TTY": true,
|
||||
}
|
||||
|
||||
// validOS stores the set of valid OSes within pod spec.
|
||||
// The valid values currently are linux, windows.
|
||||
// In future, they can be expanded to values from
|
||||
// https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration
|
||||
var validOS = sets.NewString(string(core.Linux), string(core.Windows))
|
||||
|
||||
// ValidateHasLabel requires that metav1.ObjectMeta has a Label with key and expectedValue
|
||||
func ValidateHasLabel(meta metav1.ObjectMeta, fldPath *field.Path, key, expectedValue string) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
@ -415,9 +429,12 @@ func IsMatchedVolume(name string, volumes map[string]core.VolumeSource) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func isMatchedDevice(name string, volumes map[string]core.VolumeSource) (bool, bool) {
|
||||
// isMatched checks whether the volume with the given name is used by a
|
||||
// container and if so, if it involves a PVC.
|
||||
func isMatchedDevice(name string, volumes map[string]core.VolumeSource) (isMatched bool, isPVC bool) {
|
||||
if source, ok := volumes[name]; ok {
|
||||
if source.PersistentVolumeClaim != nil {
|
||||
if source.PersistentVolumeClaim != nil ||
|
||||
source.Ephemeral != nil {
|
||||
return true, true
|
||||
}
|
||||
return true, false
|
||||
@ -2003,20 +2020,26 @@ func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *core.PersistentVolume) f
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimSpecValidationOptions contains the different settings for PersistentVolumeClaim validation
|
||||
type PersistentVolumeClaimSpecValidationOptions struct {
|
||||
// Allow spec to contain the "ReadWiteOncePod" access mode
|
||||
AllowReadWriteOncePod bool
|
||||
// Allow pvc expansion after PVC is created and bound to a PV
|
||||
EnableExpansion bool
|
||||
// Allow users to recover from previously failing expansion operation
|
||||
EnableRecoverFromExpansionFailure bool
|
||||
}
|
||||
|
||||
func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolumeClaim) PersistentVolumeClaimSpecValidationOptions {
|
||||
opts := PersistentVolumeClaimSpecValidationOptions{
|
||||
AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
|
||||
AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
|
||||
EnableExpansion: utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes),
|
||||
EnableRecoverFromExpansionFailure: utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure),
|
||||
}
|
||||
if oldPvc == nil {
|
||||
// If there's no old PVC, use the options based solely on feature enablement
|
||||
return opts
|
||||
}
|
||||
|
||||
if helper.ContainsAccessMode(oldPvc.Spec.AccessModes, core.ReadWriteOncePod) {
|
||||
// If the old object allowed "ReadWriteOncePod", continue to allow it in the new object
|
||||
opts.AllowReadWriteOncePod = true
|
||||
@ -2156,7 +2179,7 @@ func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeCl
|
||||
allErrs = append(allErrs, ValidateImmutableAnnotation(newPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], oldPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], v1.BetaStorageClassAnnotation, field.NewPath("metadata"))...)
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) {
|
||||
if opts.EnableExpansion {
|
||||
// lets make sure storage values are same.
|
||||
if newPvc.Status.Phase == core.ClaimBound && newPvcClone.Spec.Resources.Requests != nil {
|
||||
newPvcClone.Spec.Resources.Requests["storage"] = oldPvc.Spec.Resources.Requests["storage"] // +k8s:verify-mutation:reason=clone
|
||||
@ -2164,13 +2187,23 @@ func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeCl
|
||||
|
||||
oldSize := oldPvc.Spec.Resources.Requests["storage"]
|
||||
newSize := newPvc.Spec.Resources.Requests["storage"]
|
||||
statusSize := oldPvc.Status.Capacity["storage"]
|
||||
|
||||
if !apiequality.Semantic.DeepEqual(newPvcClone.Spec, oldPvcClone.Spec) {
|
||||
specDiff := diff.ObjectDiff(newPvcClone.Spec, oldPvcClone.Spec)
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), fmt.Sprintf("spec is immutable after creation except resources.requests for bound claims\n%v", specDiff)))
|
||||
}
|
||||
if newSize.Cmp(oldSize) < 0 {
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "resources", "requests", "storage"), "field can not be less than previous value"))
|
||||
if !opts.EnableRecoverFromExpansionFailure {
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "resources", "requests", "storage"), "field can not be less than previous value"))
|
||||
} else {
|
||||
// This validation permits reducing pvc requested size up to capacity recorded in pvc.status
|
||||
// so that users can recover from volume expansion failure, but Kubernetes does not actually
|
||||
// support volume shrinking
|
||||
if newSize.Cmp(statusSize) <= 0 {
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "resources", "requests", "storage"), "field can not be less than status.capacity"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
@ -2203,8 +2236,15 @@ func validateStorageClassUpgrade(oldAnnotations, newAnnotations map[string]strin
|
||||
(!newAnnotationExist || newScInAnnotation == oldSc) /* condition 4 */
|
||||
}
|
||||
|
||||
var resizeStatusSet = sets.NewString(string(core.PersistentVolumeClaimNoExpansionInProgress),
|
||||
string(core.PersistentVolumeClaimControllerExpansionInProgress),
|
||||
string(core.PersistentVolumeClaimControllerExpansionFailed),
|
||||
string(core.PersistentVolumeClaimNodeExpansionPending),
|
||||
string(core.PersistentVolumeClaimNodeExpansionInProgress),
|
||||
string(core.PersistentVolumeClaimNodeExpansionFailed))
|
||||
|
||||
// ValidatePersistentVolumeClaimStatusUpdate validates an update to status of a PersistentVolumeClaim
|
||||
func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *core.PersistentVolumeClaim) field.ErrorList {
|
||||
func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *core.PersistentVolumeClaim, validationOpts PersistentVolumeClaimSpecValidationOptions) field.ErrorList {
|
||||
allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata"))
|
||||
if len(newPvc.ResourceVersion) == 0 {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), ""))
|
||||
@ -2212,10 +2252,32 @@ func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *core.PersistentVo
|
||||
if len(newPvc.Spec.AccessModes) == 0 {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("Spec", "accessModes"), ""))
|
||||
}
|
||||
|
||||
capPath := field.NewPath("status", "capacity")
|
||||
for r, qty := range newPvc.Status.Capacity {
|
||||
allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...)
|
||||
}
|
||||
if validationOpts.EnableRecoverFromExpansionFailure {
|
||||
resizeStatusPath := field.NewPath("status", "resizeStatus")
|
||||
if newPvc.Status.ResizeStatus != nil {
|
||||
resizeStatus := *newPvc.Status.ResizeStatus
|
||||
if !resizeStatusSet.Has(string(resizeStatus)) {
|
||||
allErrs = append(allErrs, field.NotSupported(resizeStatusPath, resizeStatus, resizeStatusSet.List()))
|
||||
}
|
||||
}
|
||||
allocPath := field.NewPath("status", "allocatedResources")
|
||||
for r, qty := range newPvc.Status.AllocatedResources {
|
||||
if r != core.ResourceStorage {
|
||||
allErrs = append(allErrs, field.NotSupported(allocPath, r, []string{string(core.ResourceStorage)}))
|
||||
continue
|
||||
}
|
||||
if errs := validateBasicResource(qty, allocPath.Key(string(r))); len(errs) > 0 {
|
||||
allErrs = append(allErrs, errs...)
|
||||
} else {
|
||||
allErrs = append(allErrs, ValidateResourceQuantityValue(string(core.ResourceStorage), qty, allocPath.Key(string(r)))...)
|
||||
}
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
@ -2285,8 +2347,6 @@ var validEnvDownwardAPIFieldPathExpressions = sets.NewString(
|
||||
"spec.serviceAccountName",
|
||||
"status.hostIP",
|
||||
"status.podIP",
|
||||
// status.podIPs is populated even if IPv6DualStack feature gate
|
||||
// is not enabled. This will work for single stack and dual stack.
|
||||
"status.podIPs")
|
||||
|
||||
var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "limits.ephemeral-storage", "requests.cpu", "requests.memory", "requests.ephemeral-storage")
|
||||
@ -2610,9 +2670,9 @@ func ValidateVolumeDevices(devices []core.VolumeDevice, volmounts map[string]str
|
||||
if devicename.Has(devName) {
|
||||
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must be unique"))
|
||||
}
|
||||
// Must be PersistentVolumeClaim volume source
|
||||
// Must be based on PersistentVolumeClaim (PVC reference or generic ephemeral inline volume)
|
||||
if didMatch && !isPVC {
|
||||
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "can only use volume source type of PersistentVolumeClaim for block mode"))
|
||||
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "can only use volume source type of PersistentVolumeClaim or Ephemeral for block mode"))
|
||||
}
|
||||
if !didMatch {
|
||||
allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), devName))
|
||||
@ -2648,7 +2708,7 @@ func validateProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList {
|
||||
if probe == nil {
|
||||
return allErrs
|
||||
}
|
||||
allErrs = append(allErrs, validateHandler(&probe.Handler, fldPath)...)
|
||||
allErrs = append(allErrs, validateHandler(handlerFromProbe(&probe.ProbeHandler), fldPath)...)
|
||||
|
||||
allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.InitialDelaySeconds), fldPath.Child("initialDelaySeconds"))...)
|
||||
allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.TimeoutSeconds), fldPath.Child("timeoutSeconds"))...)
|
||||
@ -2661,6 +2721,30 @@ func validateProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList {
|
||||
return allErrs
|
||||
}
|
||||
|
||||
type commonHandler struct {
|
||||
Exec *core.ExecAction
|
||||
HTTPGet *core.HTTPGetAction
|
||||
TCPSocket *core.TCPSocketAction
|
||||
GRPC *core.GRPCAction
|
||||
}
|
||||
|
||||
func handlerFromProbe(ph *core.ProbeHandler) commonHandler {
|
||||
return commonHandler{
|
||||
Exec: ph.Exec,
|
||||
HTTPGet: ph.HTTPGet,
|
||||
TCPSocket: ph.TCPSocket,
|
||||
GRPC: ph.GRPC,
|
||||
}
|
||||
}
|
||||
|
||||
func handlerFromLifecycle(lh *core.LifecycleHandler) commonHandler {
|
||||
return commonHandler{
|
||||
Exec: lh.Exec,
|
||||
HTTPGet: lh.HTTPGet,
|
||||
TCPSocket: lh.TCPSocket,
|
||||
}
|
||||
}
|
||||
|
||||
func validateClientIPAffinityConfig(config *core.SessionAffinityConfig, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if config == nil {
|
||||
@ -2766,8 +2850,10 @@ func ValidatePortNumOrName(port intstr.IntOrString, fldPath *field.Path) field.E
|
||||
func validateTCPSocketAction(tcp *core.TCPSocketAction, fldPath *field.Path) field.ErrorList {
|
||||
return ValidatePortNumOrName(tcp.Port, fldPath.Child("port"))
|
||||
}
|
||||
|
||||
func validateHandler(handler *core.Handler, fldPath *field.Path) field.ErrorList {
|
||||
func validateGRPCAction(grpc *core.GRPCAction, fldPath *field.Path) field.ErrorList {
|
||||
return ValidatePortNumOrName(intstr.FromInt(int(grpc.Port)), fldPath.Child("port"))
|
||||
}
|
||||
func validateHandler(handler commonHandler, fldPath *field.Path) field.ErrorList {
|
||||
numHandlers := 0
|
||||
allErrors := field.ErrorList{}
|
||||
if handler.Exec != nil {
|
||||
@ -2794,6 +2880,14 @@ func validateHandler(handler *core.Handler, fldPath *field.Path) field.ErrorList
|
||||
allErrors = append(allErrors, validateTCPSocketAction(handler.TCPSocket, fldPath.Child("tcpSocket"))...)
|
||||
}
|
||||
}
|
||||
if handler.GRPC != nil {
|
||||
if numHandlers > 0 {
|
||||
allErrors = append(allErrors, field.Forbidden(fldPath.Child("grpc"), "may not specify more than 1 handler type"))
|
||||
} else {
|
||||
numHandlers++
|
||||
allErrors = append(allErrors, validateGRPCAction(handler.GRPC, fldPath.Child("grpc"))...)
|
||||
}
|
||||
}
|
||||
if numHandlers == 0 {
|
||||
allErrors = append(allErrors, field.Required(fldPath, "must specify a handler type"))
|
||||
}
|
||||
@ -2803,10 +2897,10 @@ func validateHandler(handler *core.Handler, fldPath *field.Path) field.ErrorList
|
||||
func validateLifecycle(lifecycle *core.Lifecycle, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if lifecycle.PostStart != nil {
|
||||
allErrs = append(allErrs, validateHandler(lifecycle.PostStart, fldPath.Child("postStart"))...)
|
||||
allErrs = append(allErrs, validateHandler(handlerFromLifecycle(lifecycle.PostStart), fldPath.Child("postStart"))...)
|
||||
}
|
||||
if lifecycle.PreStop != nil {
|
||||
allErrs = append(allErrs, validateHandler(lifecycle.PreStop, fldPath.Child("preStop"))...)
|
||||
allErrs = append(allErrs, validateHandler(handlerFromLifecycle(lifecycle.PreStop), fldPath.Child("preStop"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
@ -2874,6 +2968,18 @@ func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer,
|
||||
// Lifecycle, probes, resources and ports should be disallowed. This is implemented as a list
|
||||
// of allowed fields so that new fields will be given consideration prior to inclusion in Ephemeral Containers.
|
||||
allErrs = append(allErrs, validateFieldAllowList(ec.EphemeralContainerCommon, allowedEphemeralContainerFields, "cannot be set for an Ephemeral Container", idxPath)...)
|
||||
|
||||
// VolumeMount subpaths have the potential to leak resources since they're implemented with bind mounts
|
||||
// that aren't cleaned up until the pod exits. Since they also imply that the container is being used
|
||||
// as part of the workload, they're disallowed entirely.
|
||||
for i, vm := range ec.VolumeMounts {
|
||||
if vm.SubPath != "" {
|
||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("volumeMounts").Index(i).Child("subPath"), "cannot be set for an Ephemeral Container"))
|
||||
}
|
||||
if vm.SubPathExpr != "" {
|
||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("volumeMounts").Index(i).Child("subPathExpr"), "cannot be set for an Ephemeral Container"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
@ -2902,7 +3008,7 @@ func validateFieldAllowList(value interface{}, allowedFields map[string]bool, er
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateInitContainers(containers, otherContainers []core.Container, deviceVolumes map[string]core.VolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
func validateInitContainers(containers []core.Container, otherContainers []core.Container, deviceVolumes map[string]core.VolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
if len(containers) > 0 {
|
||||
allErrs = append(allErrs, validateContainers(containers, true, deviceVolumes, fldPath, opts)...)
|
||||
@ -3112,7 +3218,7 @@ func validatePodDNSConfig(dnsConfig *core.PodDNSConfig, dnsPolicy *core.DNSPolic
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers"), dnsConfig.Nameservers, fmt.Sprintf("must not have more than %v nameservers", MaxDNSNameservers)))
|
||||
}
|
||||
for i, ns := range dnsConfig.Nameservers {
|
||||
if ip := net.ParseIP(ns); ip == nil {
|
||||
if ip := netutils.ParseIPSloppy(ns); ip == nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers").Index(i), ns, "must be valid IP address"))
|
||||
}
|
||||
}
|
||||
@ -3246,7 +3352,7 @@ func validateOnlyAddedTolerations(newTolerations []core.Toleration, oldToleratio
|
||||
func ValidateHostAliases(hostAliases []core.HostAlias, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for _, hostAlias := range hostAliases {
|
||||
if ip := net.ParseIP(hostAlias.IP); ip == nil {
|
||||
if ip := netutils.ParseIPSloppy(hostAlias.IP); ip == nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), hostAlias.IP, "must be valid IP address"))
|
||||
}
|
||||
for _, hostname := range hostAlias.Hostnames {
|
||||
@ -3326,6 +3432,10 @@ type PodValidationOptions struct {
|
||||
AllowWindowsHostProcessField bool
|
||||
// Allow more DNSSearchPaths and longer DNSSearchListChars
|
||||
AllowExpandedDNSConfig bool
|
||||
// Allow OSField to be set in the pod spec
|
||||
AllowOSField bool
|
||||
// Allow sysctl name to contain a slash
|
||||
AllowSysctlRegexContainSlash bool
|
||||
}
|
||||
|
||||
// validatePodMetadataAndSpec tests if required fields in the pod.metadata and pod.spec are set,
|
||||
@ -3422,7 +3532,7 @@ func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *fi
|
||||
allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...)
|
||||
allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...)
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"), opts)...)
|
||||
allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...)
|
||||
allErrs = append(allErrs, validateAffinity(spec.Affinity, fldPath.Child("affinity"))...)
|
||||
allErrs = append(allErrs, validatePodDNSConfig(spec.DNSConfig, &spec.DNSPolicy, fldPath.Child("dnsConfig"), opts)...)
|
||||
@ -3482,6 +3592,115 @@ func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *fi
|
||||
allErrs = append(allErrs, validateOverhead(spec.Overhead, fldPath.Child("overhead"), opts)...)
|
||||
}
|
||||
|
||||
if spec.OS != nil {
|
||||
osErrs := validateOS(spec, fldPath.Child("os"), opts)
|
||||
switch {
|
||||
case len(osErrs) > 0:
|
||||
allErrs = append(allErrs, osErrs...)
|
||||
case spec.OS.Name == core.Linux:
|
||||
allErrs = append(allErrs, validateLinux(spec, fldPath)...)
|
||||
case spec.OS.Name == core.Windows:
|
||||
allErrs = append(allErrs, validateWindows(spec, fldPath)...)
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateLinux(spec *core.PodSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
securityContext := spec.SecurityContext
|
||||
if securityContext != nil && securityContext.WindowsOptions != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("windowsOptions"), "windows options cannot be set for a linux pod"))
|
||||
}
|
||||
podshelper.VisitContainersWithPath(spec, fldPath, func(c *core.Container, cFldPath *field.Path) bool {
|
||||
sc := c.SecurityContext
|
||||
if sc != nil && sc.WindowsOptions != nil {
|
||||
fldPath := cFldPath.Child("securityContext")
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("windowsOptions"), "windows options cannot be set for a linux pod"))
|
||||
}
|
||||
return true
|
||||
})
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateWindows(spec *core.PodSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
securityContext := spec.SecurityContext
|
||||
// validate Pod SecurityContext
|
||||
if securityContext != nil {
|
||||
if securityContext.SELinuxOptions != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("seLinuxOptions"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if securityContext.HostPID {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostPID"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if securityContext.HostIPC {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostIPC"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if securityContext.SeccompProfile != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("seccompProfile"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if securityContext.FSGroup != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("fsGroup"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if securityContext.FSGroupChangePolicy != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("fsGroupChangePolicy"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if len(securityContext.Sysctls) > 0 {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("sysctls"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if securityContext.ShareProcessNamespace != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("shareProcessNamespace"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if securityContext.RunAsUser != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("runAsUser"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if securityContext.RunAsGroup != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("runAsGroup"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if securityContext.SupplementalGroups != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("supplementalGroups"), "cannot be set for a windows pod"))
|
||||
}
|
||||
}
|
||||
podshelper.VisitContainersWithPath(spec, fldPath, func(c *core.Container, cFldPath *field.Path) bool {
|
||||
// validate container security context
|
||||
sc := c.SecurityContext
|
||||
// OS based podSecurityContext validation
|
||||
// There is some naming overlap between Windows and Linux Security Contexts but all the Windows Specific options
|
||||
// are set via securityContext.WindowsOptions which we validate below
|
||||
// TODO: Think if we need to relax this restriction or some of the restrictions
|
||||
if sc != nil {
|
||||
fldPath := cFldPath.Child("securityContext")
|
||||
if sc.SELinuxOptions != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("seLinuxOptions"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if sc.SeccompProfile != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("seccompProfile"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if sc.Capabilities != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("capabilities"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if sc.ReadOnlyRootFilesystem != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("readOnlyRootFilesystem"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if sc.Privileged != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("privileged"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if sc.AllowPrivilegeEscalation != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("allowPrivilegeEscalation"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if sc.ProcMount != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("procMount"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if sc.RunAsUser != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("runAsUser"), "cannot be set for a windows pod"))
|
||||
}
|
||||
if sc.RunAsGroup != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("runAsGroup"), "cannot be set for a windows pod"))
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
return allErrs
|
||||
}
|
||||
|
||||
@ -3868,29 +4087,48 @@ const (
|
||||
// a sysctl name regex
|
||||
SysctlFmt string = "(" + SysctlSegmentFmt + "\\.)*" + SysctlSegmentFmt
|
||||
|
||||
// a sysctl name regex with slash allowed
|
||||
SysctlContainSlashFmt string = "(" + SysctlSegmentFmt + "[\\./])*" + SysctlSegmentFmt
|
||||
|
||||
// the maximal length of a sysctl name
|
||||
SysctlMaxLength int = 253
|
||||
)
|
||||
|
||||
var sysctlRegexp = regexp.MustCompile("^" + SysctlFmt + "$")
|
||||
|
||||
var sysctlContainSlashRegexp = regexp.MustCompile("^" + SysctlContainSlashFmt + "$")
|
||||
|
||||
// IsValidSysctlName checks that the given string is a valid sysctl name,
|
||||
// i.e. matches SysctlFmt.
|
||||
func IsValidSysctlName(name string) bool {
|
||||
// i.e. matches SysctlFmt (or SysctlContainSlashFmt if canContainSlash is true).
|
||||
// More info:
|
||||
// https://man7.org/linux/man-pages/man8/sysctl.8.html
|
||||
// https://man7.org/linux/man-pages/man5/sysctl.d.5.html
|
||||
func IsValidSysctlName(name string, canContainSlash bool) bool {
|
||||
if len(name) > SysctlMaxLength {
|
||||
return false
|
||||
}
|
||||
if canContainSlash {
|
||||
return sysctlContainSlashRegexp.MatchString(name)
|
||||
}
|
||||
return sysctlRegexp.MatchString(name)
|
||||
}
|
||||
|
||||
func validateSysctls(sysctls []core.Sysctl, fldPath *field.Path) field.ErrorList {
|
||||
func getSysctlFmt(canContainSlash bool) string {
|
||||
if canContainSlash {
|
||||
// use relaxed validation everywhere in 1.24
|
||||
return SysctlContainSlashFmt
|
||||
}
|
||||
// Will be removed in 1.24
|
||||
return SysctlFmt
|
||||
}
|
||||
func validateSysctls(sysctls []core.Sysctl, fldPath *field.Path, allowSysctlRegexContainSlash bool) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
names := make(map[string]struct{})
|
||||
for i, s := range sysctls {
|
||||
if len(s.Name) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("name"), ""))
|
||||
} else if !IsValidSysctlName(s.Name) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("name"), s.Name, fmt.Sprintf("must have at most %d characters and match regex %s", SysctlMaxLength, SysctlFmt)))
|
||||
} else if !IsValidSysctlName(s.Name, allowSysctlRegexContainSlash) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("name"), s.Name, fmt.Sprintf("must have at most %d characters and match regex %s", SysctlMaxLength, getSysctlFmt(allowSysctlRegexContainSlash))))
|
||||
} else if _, ok := names[s.Name]; ok {
|
||||
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i).Child("name"), s.Name))
|
||||
}
|
||||
@ -3900,7 +4138,7 @@ func validateSysctls(sysctls []core.Sysctl, fldPath *field.Path) field.ErrorList
|
||||
}
|
||||
|
||||
// ValidatePodSecurityContext test that the specified PodSecurityContext has valid data.
|
||||
func ValidatePodSecurityContext(securityContext *core.PodSecurityContext, spec *core.PodSpec, specPath, fldPath *field.Path) field.ErrorList {
|
||||
func ValidatePodSecurityContext(securityContext *core.PodSecurityContext, spec *core.PodSpec, specPath, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if securityContext != nil {
|
||||
@ -3930,7 +4168,7 @@ func ValidatePodSecurityContext(securityContext *core.PodSecurityContext, spec *
|
||||
}
|
||||
|
||||
if len(securityContext.Sysctls) != 0 {
|
||||
allErrs = append(allErrs, validateSysctls(securityContext.Sysctls, fldPath.Child("sysctls"))...)
|
||||
allErrs = append(allErrs, validateSysctls(securityContext.Sysctls, fldPath.Child("sysctls"), opts.AllowSysctlRegexContainSlash)...)
|
||||
}
|
||||
|
||||
if securityContext.FSGroupChangePolicy != nil {
|
||||
@ -4166,7 +4404,7 @@ func ValidateContainerStateTransition(newStatuses, oldStatuses []core.ContainerS
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodStatusUpdate tests to see if the update is legal for an end user to make.
|
||||
// ValidatePodStatusUpdate checks for changes to status that shouldn't occur in normal operation.
|
||||
func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList {
|
||||
fldPath := field.NewPath("metadata")
|
||||
allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath)
|
||||
@ -4188,6 +4426,8 @@ func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions
|
||||
// any terminated containers to a non-terminated state.
|
||||
allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.ContainerStatuses, oldPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), oldPod.Spec.RestartPolicy)...)
|
||||
allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), oldPod.Spec.RestartPolicy)...)
|
||||
// The kubelet will never restart ephemeral containers, so treat them like they have an implicit RestartPolicyNever.
|
||||
allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.EphemeralContainerStatuses, oldPod.Status.EphemeralContainerStatuses, fldPath.Child("ephemeralContainerStatuses"), core.RestartPolicyNever)...)
|
||||
|
||||
if newIPErrs := validatePodIPs(newPod); len(newIPErrs) > 0 {
|
||||
allErrs = append(allErrs, newIPErrs...)
|
||||
@ -4214,17 +4454,18 @@ func validatePodConditions(conditions []core.PodCondition, fldPath *field.Path)
|
||||
// ValidatePodEphemeralContainersUpdate tests that a user update to EphemeralContainers is valid.
|
||||
// newPod and oldPod must only differ in their EphemeralContainers.
|
||||
func ValidatePodEphemeralContainersUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList {
|
||||
spec := newPod.Spec
|
||||
specPath := field.NewPath("spec").Child("ephemeralContainers")
|
||||
|
||||
vols := make(map[string]core.VolumeSource)
|
||||
for _, vol := range spec.Volumes {
|
||||
vols[vol.Name] = vol.VolumeSource
|
||||
}
|
||||
allErrs := validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, specPath, opts)
|
||||
// Part 1: Validate newPod's spec and updates to metadata
|
||||
fldPath := field.NewPath("metadata")
|
||||
allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath)
|
||||
allErrs = append(allErrs, validatePodMetadataAndSpec(newPod, opts)...)
|
||||
allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"), opts)...)
|
||||
|
||||
// Part 2: Validate that the changes between oldPod.Spec.EphemeralContainers and
|
||||
// newPod.Spec.EphemeralContainers are allowed.
|
||||
//
|
||||
// Existing EphemeralContainers may not be changed. Order isn't preserved by patch, so check each individually.
|
||||
newContainerIndex := make(map[string]*core.EphemeralContainer)
|
||||
specPath := field.NewPath("spec").Child("ephemeralContainers")
|
||||
for i := range newPod.Spec.EphemeralContainers {
|
||||
newContainerIndex[newPod.Spec.EphemeralContainers[i].Name] = &newPod.Spec.EphemeralContainers[i]
|
||||
}
|
||||
@ -4357,7 +4598,7 @@ func ValidateService(service *core.Service) field.ErrorList {
|
||||
}
|
||||
|
||||
// dualstack <-> ClusterIPs <-> ipfamilies
|
||||
allErrs = append(allErrs, validateServiceClusterIPsRelatedFields(service)...)
|
||||
allErrs = append(allErrs, ValidateServiceClusterIPsRelatedFields(service)...)
|
||||
|
||||
ipPath := specPath.Child("externalIPs")
|
||||
for i, ip := range service.Spec.ExternalIPs {
|
||||
@ -4453,8 +4694,8 @@ func ValidateService(service *core.Service) field.ErrorList {
|
||||
// validate LoadBalancerClass field
|
||||
allErrs = append(allErrs, validateLoadBalancerClassField(nil, service)...)
|
||||
|
||||
// external traffic fields
|
||||
allErrs = append(allErrs, validateServiceExternalTrafficFieldsValue(service)...)
|
||||
// external traffic policy fields
|
||||
allErrs = append(allErrs, validateServiceExternalTrafficPolicy(service)...)
|
||||
|
||||
// internal traffic policy field
|
||||
allErrs = append(allErrs, validateServiceInternalTrafficFieldsValue(service)...)
|
||||
@ -4506,22 +4747,58 @@ func validateServicePort(sp *core.ServicePort, requireName, isHeadlessService bo
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateServiceExternalTrafficFieldsValue validates ExternalTraffic related annotations
|
||||
// have legal value.
|
||||
func validateServiceExternalTrafficFieldsValue(service *core.Service) field.ErrorList {
|
||||
func needsExternalTrafficPolicy(svc *api.Service) bool {
|
||||
return svc.Spec.Type == core.ServiceTypeLoadBalancer || svc.Spec.Type == core.ServiceTypeNodePort
|
||||
}
|
||||
|
||||
var validExternalTrafficPolicies = sets.NewString(
|
||||
string(core.ServiceExternalTrafficPolicyTypeCluster),
|
||||
string(core.ServiceExternalTrafficPolicyTypeLocal))
|
||||
|
||||
func validateServiceExternalTrafficPolicy(service *core.Service) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
// Check first class fields.
|
||||
if service.Spec.ExternalTrafficPolicy != "" &&
|
||||
service.Spec.ExternalTrafficPolicy != core.ServiceExternalTrafficPolicyTypeCluster &&
|
||||
service.Spec.ExternalTrafficPolicy != core.ServiceExternalTrafficPolicyTypeLocal {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy,
|
||||
fmt.Sprintf("ExternalTrafficPolicy must be empty, %v or %v", core.ServiceExternalTrafficPolicyTypeCluster, core.ServiceExternalTrafficPolicyTypeLocal)))
|
||||
fldPath := field.NewPath("spec")
|
||||
|
||||
if !needsExternalTrafficPolicy(service) {
|
||||
if service.Spec.ExternalTrafficPolicy != "" {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy,
|
||||
"may only be set when `type` is 'NodePort' or 'LoadBalancer'"))
|
||||
}
|
||||
} else {
|
||||
if service.Spec.ExternalTrafficPolicy == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("externalTrafficPolicy"), ""))
|
||||
} else if !validExternalTrafficPolicies.Has(string(service.Spec.ExternalTrafficPolicy)) {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("externalTrafficPolicy"),
|
||||
service.Spec.ExternalTrafficPolicy, validExternalTrafficPolicies.List()))
|
||||
}
|
||||
}
|
||||
|
||||
if service.Spec.HealthCheckNodePort < 0 {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("healthCheckNodePort"), service.Spec.HealthCheckNodePort,
|
||||
"HealthCheckNodePort must be not less than 0"))
|
||||
if !apiservice.NeedsHealthCheck(service) {
|
||||
if service.Spec.HealthCheckNodePort != 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("healthCheckNodePort"), service.Spec.HealthCheckNodePort,
|
||||
"may only be set when `type` is 'LoadBalancer' and `externalTrafficPolicy` is 'Local'"))
|
||||
}
|
||||
} else {
|
||||
if service.Spec.HealthCheckNodePort == 0 {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("healthCheckNodePort"), ""))
|
||||
} else {
|
||||
for _, msg := range validation.IsValidPortNum(int(service.Spec.HealthCheckNodePort)) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("healthCheckNodePort"), service.Spec.HealthCheckNodePort, msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateServiceExternalTrafficFieldsUpdate(before, after *api.Service) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if apiservice.NeedsHealthCheck(before) && apiservice.NeedsHealthCheck(after) {
|
||||
if after.Spec.HealthCheckNodePort != before.Spec.HealthCheckNodePort {
|
||||
allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "healthCheckNodePort"), "field is immutable"))
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
@ -4545,29 +4822,6 @@ func validateServiceInternalTrafficFieldsValue(service *core.Service) field.Erro
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateServiceExternalTrafficFieldsCombination validates if ExternalTrafficPolicy,
|
||||
// HealthCheckNodePort and Type combination are legal. For update, it should be called
|
||||
// after clearing externalTraffic related fields for the ease of transitioning between
|
||||
// different service types.
|
||||
func ValidateServiceExternalTrafficFieldsCombination(service *core.Service) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if service.Spec.Type != core.ServiceTypeLoadBalancer &&
|
||||
service.Spec.Type != core.ServiceTypeNodePort &&
|
||||
service.Spec.ExternalTrafficPolicy != "" {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy,
|
||||
"ExternalTrafficPolicy can only be set on NodePort and LoadBalancer service"))
|
||||
}
|
||||
|
||||
if !apiservice.NeedsHealthCheck(service) &&
|
||||
service.Spec.HealthCheckNodePort != 0 {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "healthCheckNodePort"), service.Spec.HealthCheckNodePort,
|
||||
"HealthCheckNodePort can only be set on LoadBalancer service with ExternalTrafficPolicy=Local"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateServiceCreate validates Services as they are created.
|
||||
func ValidateServiceCreate(service *core.Service) field.ErrorList {
|
||||
return ValidateService(service)
|
||||
@ -4591,6 +4845,8 @@ func ValidateServiceUpdate(service, oldService *core.Service) field.ErrorList {
|
||||
upgradeDowngradeLoadBalancerClassErrs := validateLoadBalancerClassField(oldService, service)
|
||||
allErrs = append(allErrs, upgradeDowngradeLoadBalancerClassErrs...)
|
||||
|
||||
allErrs = append(allErrs, validateServiceExternalTrafficFieldsUpdate(oldService, service)...)
|
||||
|
||||
return append(allErrs, ValidateService(service)...)
|
||||
}
|
||||
|
||||
@ -5840,7 +6096,7 @@ func validateEndpointAddress(address *core.EndpointAddress, fldPath *field.Path)
|
||||
// - https://www.iana.org/assignments/ipv6-multicast-addresses/ipv6-multicast-addresses.xhtml
|
||||
func ValidateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
ip := net.ParseIP(ipAddress)
|
||||
ip := netutils.ParseIPSloppy(ipAddress)
|
||||
if ip == nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "must be a valid IP address"))
|
||||
return allErrs
|
||||
@ -6135,6 +6391,26 @@ func validateWindowsHostProcessPod(podSpec *core.PodSpec, fieldPath *field.Path,
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateOS validates the OS field within pod spec
|
||||
func validateOS(podSpec *core.PodSpec, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
os := podSpec.OS
|
||||
if os == nil {
|
||||
return allErrs
|
||||
}
|
||||
if !opts.AllowOSField {
|
||||
return append(allErrs, field.Forbidden(fldPath, "cannot be set when IdentifyPodOS feature is not enabled"))
|
||||
}
|
||||
if len(os.Name) == 0 {
|
||||
return append(allErrs, field.Required(fldPath.Child("name"), "cannot be empty"))
|
||||
}
|
||||
osName := string(os.Name)
|
||||
if !validOS.Has(osName) {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath, osName, validOS.List()))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidatePodLogOptions(opts *core.PodLogOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if opts.TailLines != nil && *opts.TailLines < 0 {
|
||||
@ -6160,7 +6436,7 @@ func ValidateLoadBalancerStatus(status *core.LoadBalancerStatus, fldPath *field.
|
||||
for i, ingress := range status.Ingress {
|
||||
idxPath := fldPath.Child("ingress").Index(i)
|
||||
if len(ingress.IP) > 0 {
|
||||
if isIP := (net.ParseIP(ingress.IP) != nil); !isIP {
|
||||
if isIP := (netutils.ParseIPSloppy(ingress.IP) != nil); !isIP {
|
||||
allErrs = append(allErrs, field.Invalid(idxPath.Child("ip"), ingress.IP, "must be a valid IP address"))
|
||||
}
|
||||
}
|
||||
@ -6168,7 +6444,7 @@ func ValidateLoadBalancerStatus(status *core.LoadBalancerStatus, fldPath *field.
|
||||
for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) {
|
||||
allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg))
|
||||
}
|
||||
if isIP := (net.ParseIP(ingress.Hostname) != nil); isIP {
|
||||
if isIP := (netutils.ParseIPSloppy(ingress.Hostname) != nil); isIP {
|
||||
allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address"))
|
||||
}
|
||||
}
|
||||
@ -6198,7 +6474,7 @@ func validateVolumeNodeAffinity(nodeAffinity *core.VolumeNodeAffinity, fldPath *
|
||||
|
||||
// ValidateCIDR validates whether a CIDR matches the conventions expected by net.ParseCIDR
|
||||
func ValidateCIDR(cidr string) (*net.IPNet, error) {
|
||||
_, net, err := net.ParseCIDR(cidr)
|
||||
_, net, err := netutils.ParseCIDRSloppy(cidr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -6289,8 +6565,10 @@ func ValidateSpreadConstraintNotRepeat(fldPath *field.Path, constraint core.Topo
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateServiceClusterIPsRelatedFields validates .spec.ClusterIPs,, .spec.IPFamilies, .spec.ipFamilyPolicy
|
||||
func validateServiceClusterIPsRelatedFields(service *core.Service) field.ErrorList {
|
||||
// ValidateServiceClusterIPsRelatedFields validates .spec.ClusterIPs,,
|
||||
// .spec.IPFamilies, .spec.ipFamilyPolicy. This is exported because it is used
|
||||
// during IP init and allocation.
|
||||
func ValidateServiceClusterIPsRelatedFields(service *core.Service) field.ErrorList {
|
||||
// ClusterIP, ClusterIPs, IPFamilyPolicy and IPFamilies are validated prior (all must be unset) for ExternalName service
|
||||
if service.Spec.Type == core.ServiceTypeExternalName {
|
||||
return field.ErrorList{}
|
||||
@ -6312,12 +6590,12 @@ func validateServiceClusterIPsRelatedFields(service *core.Service) field.ErrorLi
|
||||
if len(service.Spec.ClusterIPs) == 0 {
|
||||
allErrs = append(allErrs, field.Required(clusterIPsField, ""))
|
||||
} else if service.Spec.ClusterIPs[0] != service.Spec.ClusterIP {
|
||||
allErrs = append(allErrs, field.Invalid(clusterIPsField, service.Spec.ClusterIPs, "element [0] must match clusterIP"))
|
||||
allErrs = append(allErrs, field.Invalid(clusterIPsField, service.Spec.ClusterIPs, "first value must match `clusterIP`"))
|
||||
}
|
||||
} else { // ClusterIP == ""
|
||||
// If ClusterIP is not set, ClusterIPs must also be unset.
|
||||
if len(service.Spec.ClusterIPs) != 0 {
|
||||
allErrs = append(allErrs, field.Invalid(clusterIPsField, service.Spec.ClusterIPs, "must be empty when clusterIP is empty"))
|
||||
allErrs = append(allErrs, field.Invalid(clusterIPsField, service.Spec.ClusterIPs, "must be empty when `clusterIP` is not specified"))
|
||||
}
|
||||
}
|
||||
|
||||
@ -6454,7 +6732,7 @@ func validateUpgradeDowngradeClusterIPs(oldService, service *core.Service) field
|
||||
// user *must* set IPFamilyPolicy == SingleStack
|
||||
if len(service.Spec.ClusterIPs) == 1 {
|
||||
if service.Spec.IPFamilyPolicy == nil || *(service.Spec.IPFamilyPolicy) != core.IPFamilyPolicySingleStack {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "clusterIPs").Index(0), service.Spec.ClusterIPs, "`ipFamilyPolicy` must be set to 'SingleStack' when releasing the secondary clusterIP"))
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ipFamilyPolicy"), service.Spec.IPFamilyPolicy, "must be set to 'SingleStack' when releasing the secondary clusterIP"))
|
||||
}
|
||||
}
|
||||
case len(oldService.Spec.ClusterIPs) < len(service.Spec.ClusterIPs):
|
||||
@ -6518,7 +6796,7 @@ func validateUpgradeDowngradeIPFamilies(oldService, service *core.Service) field
|
||||
// user *must* set IPFamilyPolicy == SingleStack
|
||||
if len(service.Spec.IPFamilies) == 1 {
|
||||
if service.Spec.IPFamilyPolicy == nil || *(service.Spec.IPFamilyPolicy) != core.IPFamilyPolicySingleStack {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "clusterIPs").Index(0), service.Spec.ClusterIPs, "`ipFamilyPolicy` must be set to 'SingleStack' when releasing the secondary ipFamily"))
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "ipFamilyPolicy"), service.Spec.IPFamilyPolicy, "must be set to 'SingleStack' when releasing the secondary ipFamily"))
|
||||
}
|
||||
}
|
||||
case len(oldService.Spec.IPFamilies) < len(service.Spec.IPFamilies):
|
||||
|
159
vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go
generated
vendored
159
vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
@ -1668,6 +1669,27 @@ func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSour
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GRPCAction) DeepCopyInto(out *GRPCAction) {
|
||||
*out = *in
|
||||
if in.Service != nil {
|
||||
in, out := &in.Service, &out.Service
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCAction.
|
||||
func (in *GRPCAction) DeepCopy() *GRPCAction {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GRPCAction)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) {
|
||||
*out = *in
|
||||
@ -1759,37 +1781,6 @@ func (in *HTTPHeader) DeepCopy() *HTTPHeader {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Handler) DeepCopyInto(out *Handler) {
|
||||
*out = *in
|
||||
if in.Exec != nil {
|
||||
in, out := &in.Exec, &out.Exec
|
||||
*out = new(ExecAction)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.HTTPGet != nil {
|
||||
in, out := &in.HTTPGet, &out.HTTPGet
|
||||
*out = new(HTTPGetAction)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.TCPSocket != nil {
|
||||
in, out := &in.TCPSocket, &out.TCPSocket
|
||||
*out = new(TCPSocketAction)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler.
|
||||
func (in *Handler) DeepCopy() *Handler {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Handler)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HostAlias) DeepCopyInto(out *HostAlias) {
|
||||
*out = *in
|
||||
@ -1920,12 +1911,12 @@ func (in *Lifecycle) DeepCopyInto(out *Lifecycle) {
|
||||
*out = *in
|
||||
if in.PostStart != nil {
|
||||
in, out := &in.PostStart, &out.PostStart
|
||||
*out = new(Handler)
|
||||
*out = new(LifecycleHandler)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.PreStop != nil {
|
||||
in, out := &in.PreStop, &out.PreStop
|
||||
*out = new(Handler)
|
||||
*out = new(LifecycleHandler)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
@ -1941,6 +1932,37 @@ func (in *Lifecycle) DeepCopy() *Lifecycle {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LifecycleHandler) DeepCopyInto(out *LifecycleHandler) {
|
||||
*out = *in
|
||||
if in.Exec != nil {
|
||||
in, out := &in.Exec, &out.Exec
|
||||
*out = new(ExecAction)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.HTTPGet != nil {
|
||||
in, out := &in.HTTPGet, &out.HTTPGet
|
||||
*out = new(HTTPGetAction)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.TCPSocket != nil {
|
||||
in, out := &in.TCPSocket, &out.TCPSocket
|
||||
*out = new(TCPSocketAction)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHandler.
|
||||
func (in *LifecycleHandler) DeepCopy() *LifecycleHandler {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(LifecycleHandler)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LimitRange) DeepCopyInto(out *LimitRange) {
|
||||
*out = *in
|
||||
@ -2976,6 +2998,18 @@ func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimSt
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.AllocatedResources != nil {
|
||||
in, out := &in.AllocatedResources, &out.AllocatedResources
|
||||
*out = make(ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.ResizeStatus != nil {
|
||||
in, out := &in.ResizeStatus, &out.ResizeStatus
|
||||
*out = new(PersistentVolumeClaimResizeStatus)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -3602,6 +3636,22 @@ func (in *PodLogOptions) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodOS) DeepCopyInto(out *PodOS) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodOS.
|
||||
func (in *PodOS) DeepCopy() *PodOS {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodOS)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) {
|
||||
*out = *in
|
||||
@ -3896,6 +3946,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.OS != nil {
|
||||
in, out := &in.OS, &out.OS
|
||||
*out = new(PodOS)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -4163,7 +4218,7 @@ func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Probe) DeepCopyInto(out *Probe) {
|
||||
*out = *in
|
||||
in.Handler.DeepCopyInto(&out.Handler)
|
||||
in.ProbeHandler.DeepCopyInto(&out.ProbeHandler)
|
||||
if in.TerminationGracePeriodSeconds != nil {
|
||||
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
|
||||
*out = new(int64)
|
||||
@ -4182,6 +4237,42 @@ func (in *Probe) DeepCopy() *Probe {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ProbeHandler) DeepCopyInto(out *ProbeHandler) {
|
||||
*out = *in
|
||||
if in.Exec != nil {
|
||||
in, out := &in.Exec, &out.Exec
|
||||
*out = new(ExecAction)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.HTTPGet != nil {
|
||||
in, out := &in.HTTPGet, &out.HTTPGet
|
||||
*out = new(HTTPGetAction)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.TCPSocket != nil {
|
||||
in, out := &in.TCPSocket, &out.TCPSocket
|
||||
*out = new(TCPSocketAction)
|
||||
**out = **in
|
||||
}
|
||||
if in.GRPC != nil {
|
||||
in, out := &in.GRPC, &out.GRPC
|
||||
*out = new(GRPCAction)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeHandler.
|
||||
func (in *ProbeHandler) DeepCopy() *ProbeHandler {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ProbeHandler)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) {
|
||||
*out = *in
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/apis/networking/types.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/apis/networking/types.go
generated
vendored
@ -339,15 +339,12 @@ type IngressClassParametersReference struct {
|
||||
Name string
|
||||
// Scope represents if this refers to a cluster or namespace scoped resource.
|
||||
// This may be set to "Cluster" (default) or "Namespace".
|
||||
// Field can be enabled with IngressClassNamespacedParams feature gate.
|
||||
// +optional
|
||||
// +featureGate=IngressClassNamespacedParams
|
||||
Scope *string
|
||||
// Namespace is the namespace of the resource being referenced. This field is
|
||||
// required when scope is set to "Namespace" and must be unset when scope is set to
|
||||
// "Cluster".
|
||||
// +optional
|
||||
// +featureGate=IngressClassNamespacedParams
|
||||
Namespace *string
|
||||
}
|
||||
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/apis/scheduling/zz_generated.deepcopy.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/scheduling/zz_generated.deepcopy.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
|
169
vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go
generated
vendored
169
vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
@ -38,13 +39,13 @@ type BaseControllerRefManager struct {
|
||||
|
||||
canAdoptErr error
|
||||
canAdoptOnce sync.Once
|
||||
CanAdoptFunc func() error
|
||||
CanAdoptFunc func(ctx context.Context) error
|
||||
}
|
||||
|
||||
func (m *BaseControllerRefManager) CanAdopt() error {
|
||||
func (m *BaseControllerRefManager) CanAdopt(ctx context.Context) error {
|
||||
m.canAdoptOnce.Do(func() {
|
||||
if m.CanAdoptFunc != nil {
|
||||
m.canAdoptErr = m.CanAdoptFunc()
|
||||
m.canAdoptErr = m.CanAdoptFunc(ctx)
|
||||
}
|
||||
})
|
||||
return m.canAdoptErr
|
||||
@ -65,7 +66,7 @@ func (m *BaseControllerRefManager) CanAdopt() error {
|
||||
// own the object.
|
||||
//
|
||||
// No reconciliation will be attempted if the controller is being deleted.
|
||||
func (m *BaseControllerRefManager) ClaimObject(obj metav1.Object, match func(metav1.Object) bool, adopt, release func(metav1.Object) error) (bool, error) {
|
||||
func (m *BaseControllerRefManager) ClaimObject(ctx context.Context, obj metav1.Object, match func(metav1.Object) bool, adopt, release func(context.Context, metav1.Object) error) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOfNoCopy(obj)
|
||||
if controllerRef != nil {
|
||||
if controllerRef.UID != m.Controller.GetUID() {
|
||||
@ -84,7 +85,7 @@ func (m *BaseControllerRefManager) ClaimObject(obj metav1.Object, match func(met
|
||||
if m.Controller.GetDeletionTimestamp() != nil {
|
||||
return false, nil
|
||||
}
|
||||
if err := release(obj); err != nil {
|
||||
if err := release(ctx, obj); err != nil {
|
||||
// If the pod no longer exists, ignore the error.
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
@ -106,8 +107,14 @@ func (m *BaseControllerRefManager) ClaimObject(obj metav1.Object, match func(met
|
||||
// Ignore if the object is being deleted
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if len(m.Controller.GetNamespace()) > 0 && m.Controller.GetNamespace() != obj.GetNamespace() {
|
||||
// Ignore if namespace not match
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Selector matches. Try to adopt.
|
||||
if err := adopt(obj); err != nil {
|
||||
if err := adopt(ctx, obj); err != nil {
|
||||
// If the pod no longer exists, ignore the error.
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
@ -143,7 +150,7 @@ func NewPodControllerRefManager(
|
||||
controller metav1.Object,
|
||||
selector labels.Selector,
|
||||
controllerKind schema.GroupVersionKind,
|
||||
canAdopt func() error,
|
||||
canAdopt func(ctx context.Context) error,
|
||||
finalizers ...string,
|
||||
) *PodControllerRefManager {
|
||||
return &PodControllerRefManager{
|
||||
@ -173,7 +180,7 @@ func NewPodControllerRefManager(
|
||||
//
|
||||
// If the error is nil, either the reconciliation succeeded, or no
|
||||
// reconciliation was necessary. The list of Pods that you now own is returned.
|
||||
func (m *PodControllerRefManager) ClaimPods(pods []*v1.Pod, filters ...func(*v1.Pod) bool) ([]*v1.Pod, error) {
|
||||
func (m *PodControllerRefManager) ClaimPods(ctx context.Context, pods []*v1.Pod, filters ...func(*v1.Pod) bool) ([]*v1.Pod, error) {
|
||||
var claimed []*v1.Pod
|
||||
var errlist []error
|
||||
|
||||
@ -190,15 +197,15 @@ func (m *PodControllerRefManager) ClaimPods(pods []*v1.Pod, filters ...func(*v1.
|
||||
}
|
||||
return true
|
||||
}
|
||||
adopt := func(obj metav1.Object) error {
|
||||
return m.AdoptPod(obj.(*v1.Pod))
|
||||
adopt := func(ctx context.Context, obj metav1.Object) error {
|
||||
return m.AdoptPod(ctx, obj.(*v1.Pod))
|
||||
}
|
||||
release := func(obj metav1.Object) error {
|
||||
return m.ReleasePod(obj.(*v1.Pod))
|
||||
release := func(ctx context.Context, obj metav1.Object) error {
|
||||
return m.ReleasePod(ctx, obj.(*v1.Pod))
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
ok, err := m.ClaimObject(pod, match, adopt, release)
|
||||
ok, err := m.ClaimObject(ctx, pod, match, adopt, release)
|
||||
if err != nil {
|
||||
errlist = append(errlist, err)
|
||||
continue
|
||||
@ -212,8 +219,8 @@ func (m *PodControllerRefManager) ClaimPods(pods []*v1.Pod, filters ...func(*v1.
|
||||
|
||||
// AdoptPod sends a patch to take control of the pod. It returns the error if
|
||||
// the patching fails.
|
||||
func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error {
|
||||
if err := m.CanAdopt(); err != nil {
|
||||
func (m *PodControllerRefManager) AdoptPod(ctx context.Context, pod *v1.Pod) error {
|
||||
if err := m.CanAdopt(ctx); err != nil {
|
||||
return fmt.Errorf("can't adopt Pod %v/%v (%v): %v", pod.Namespace, pod.Name, pod.UID, err)
|
||||
}
|
||||
// Note that ValidateOwnerReferences() will reject this patch if another
|
||||
@ -223,19 +230,19 @@ func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return m.podControl.PatchPod(pod.Namespace, pod.Name, patchBytes)
|
||||
return m.podControl.PatchPod(ctx, pod.Namespace, pod.Name, patchBytes)
|
||||
}
|
||||
|
||||
// ReleasePod sends a patch to free the pod from the control of the controller.
|
||||
// It returns the error if the patching fails. 404 and 422 errors are ignored.
|
||||
func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error {
|
||||
func (m *PodControllerRefManager) ReleasePod(ctx context.Context, pod *v1.Pod) error {
|
||||
klog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s",
|
||||
pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
|
||||
patchBytes, err := deleteOwnerRefStrategicMergePatch(pod.UID, m.Controller.GetUID(), m.finalizers...)
|
||||
patchBytes, err := GenerateDeleteOwnerRefStrategicMergeBytes(pod.UID, []types.UID{m.Controller.GetUID()}, m.finalizers...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = m.podControl.PatchPod(pod.Namespace, pod.Name, patchBytes)
|
||||
err = m.podControl.PatchPod(ctx, pod.Namespace, pod.Name, patchBytes)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
// If the pod no longer exists, ignore it.
|
||||
@ -283,7 +290,7 @@ func NewReplicaSetControllerRefManager(
|
||||
controller metav1.Object,
|
||||
selector labels.Selector,
|
||||
controllerKind schema.GroupVersionKind,
|
||||
canAdopt func() error,
|
||||
canAdopt func(ctx context.Context) error,
|
||||
) *ReplicaSetControllerRefManager {
|
||||
return &ReplicaSetControllerRefManager{
|
||||
BaseControllerRefManager: BaseControllerRefManager{
|
||||
@ -309,22 +316,22 @@ func NewReplicaSetControllerRefManager(
|
||||
// If the error is nil, either the reconciliation succeeded, or no
|
||||
// reconciliation was necessary. The list of ReplicaSets that you now own is
|
||||
// returned.
|
||||
func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*apps.ReplicaSet) ([]*apps.ReplicaSet, error) {
|
||||
func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(ctx context.Context, sets []*apps.ReplicaSet) ([]*apps.ReplicaSet, error) {
|
||||
var claimed []*apps.ReplicaSet
|
||||
var errlist []error
|
||||
|
||||
match := func(obj metav1.Object) bool {
|
||||
return m.Selector.Matches(labels.Set(obj.GetLabels()))
|
||||
}
|
||||
adopt := func(obj metav1.Object) error {
|
||||
return m.AdoptReplicaSet(obj.(*apps.ReplicaSet))
|
||||
adopt := func(ctx context.Context, obj metav1.Object) error {
|
||||
return m.AdoptReplicaSet(ctx, obj.(*apps.ReplicaSet))
|
||||
}
|
||||
release := func(obj metav1.Object) error {
|
||||
return m.ReleaseReplicaSet(obj.(*apps.ReplicaSet))
|
||||
release := func(ctx context.Context, obj metav1.Object) error {
|
||||
return m.ReleaseReplicaSet(ctx, obj.(*apps.ReplicaSet))
|
||||
}
|
||||
|
||||
for _, rs := range sets {
|
||||
ok, err := m.ClaimObject(rs, match, adopt, release)
|
||||
ok, err := m.ClaimObject(ctx, rs, match, adopt, release)
|
||||
if err != nil {
|
||||
errlist = append(errlist, err)
|
||||
continue
|
||||
@ -338,8 +345,8 @@ func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*apps.ReplicaSe
|
||||
|
||||
// AdoptReplicaSet sends a patch to take control of the ReplicaSet. It returns
|
||||
// the error if the patching fails.
|
||||
func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *apps.ReplicaSet) error {
|
||||
if err := m.CanAdopt(); err != nil {
|
||||
func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(ctx context.Context, rs *apps.ReplicaSet) error {
|
||||
if err := m.CanAdopt(ctx); err != nil {
|
||||
return fmt.Errorf("can't adopt ReplicaSet %v/%v (%v): %v", rs.Namespace, rs.Name, rs.UID, err)
|
||||
}
|
||||
// Note that ValidateOwnerReferences() will reject this patch if another
|
||||
@ -348,19 +355,19 @@ func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *apps.ReplicaSet) er
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return m.rsControl.PatchReplicaSet(rs.Namespace, rs.Name, patchBytes)
|
||||
return m.rsControl.PatchReplicaSet(ctx, rs.Namespace, rs.Name, patchBytes)
|
||||
}
|
||||
|
||||
// ReleaseReplicaSet sends a patch to free the ReplicaSet from the control of the Deployment controller.
|
||||
// It returns the error if the patching fails. 404 and 422 errors are ignored.
|
||||
func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *apps.ReplicaSet) error {
|
||||
func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(ctx context.Context, replicaSet *apps.ReplicaSet) error {
|
||||
klog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s",
|
||||
replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
|
||||
patchBytes, err := deleteOwnerRefStrategicMergePatch(replicaSet.UID, m.Controller.GetUID())
|
||||
patchBytes, err := GenerateDeleteOwnerRefStrategicMergeBytes(replicaSet.UID, []types.UID{m.Controller.GetUID()})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = m.rsControl.PatchReplicaSet(replicaSet.Namespace, replicaSet.Name, patchBytes)
|
||||
err = m.rsControl.PatchReplicaSet(ctx, replicaSet.Namespace, replicaSet.Name, patchBytes)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
// If the ReplicaSet no longer exists, ignore it.
|
||||
@ -381,9 +388,9 @@ func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *apps.Repl
|
||||
//
|
||||
// The CanAdopt() function calls getObject() to fetch the latest value,
|
||||
// and denies adoption attempts if that object has a non-nil DeletionTimestamp.
|
||||
func RecheckDeletionTimestamp(getObject func() (metav1.Object, error)) func() error {
|
||||
return func() error {
|
||||
obj, err := getObject()
|
||||
func RecheckDeletionTimestamp(getObject func(context.Context) (metav1.Object, error)) func(context.Context) error {
|
||||
return func(ctx context.Context) error {
|
||||
obj, err := getObject(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't recheck DeletionTimestamp: %v", err)
|
||||
}
|
||||
@ -421,7 +428,7 @@ func NewControllerRevisionControllerRefManager(
|
||||
controller metav1.Object,
|
||||
selector labels.Selector,
|
||||
controllerKind schema.GroupVersionKind,
|
||||
canAdopt func() error,
|
||||
canAdopt func(ctx context.Context) error,
|
||||
) *ControllerRevisionControllerRefManager {
|
||||
return &ControllerRevisionControllerRefManager{
|
||||
BaseControllerRefManager: BaseControllerRefManager{
|
||||
@ -447,22 +454,22 @@ func NewControllerRevisionControllerRefManager(
|
||||
// If the error is nil, either the reconciliation succeeded, or no
|
||||
// reconciliation was necessary. The list of ControllerRevisions that you now own is
|
||||
// returned.
|
||||
func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*apps.ControllerRevision) ([]*apps.ControllerRevision, error) {
|
||||
func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(ctx context.Context, histories []*apps.ControllerRevision) ([]*apps.ControllerRevision, error) {
|
||||
var claimed []*apps.ControllerRevision
|
||||
var errlist []error
|
||||
|
||||
match := func(obj metav1.Object) bool {
|
||||
return m.Selector.Matches(labels.Set(obj.GetLabels()))
|
||||
}
|
||||
adopt := func(obj metav1.Object) error {
|
||||
return m.AdoptControllerRevision(obj.(*apps.ControllerRevision))
|
||||
adopt := func(ctx context.Context, obj metav1.Object) error {
|
||||
return m.AdoptControllerRevision(ctx, obj.(*apps.ControllerRevision))
|
||||
}
|
||||
release := func(obj metav1.Object) error {
|
||||
return m.ReleaseControllerRevision(obj.(*apps.ControllerRevision))
|
||||
release := func(ctx context.Context, obj metav1.Object) error {
|
||||
return m.ReleaseControllerRevision(ctx, obj.(*apps.ControllerRevision))
|
||||
}
|
||||
|
||||
for _, h := range histories {
|
||||
ok, err := m.ClaimObject(h, match, adopt, release)
|
||||
ok, err := m.ClaimObject(ctx, h, match, adopt, release)
|
||||
if err != nil {
|
||||
errlist = append(errlist, err)
|
||||
continue
|
||||
@ -476,8 +483,8 @@ func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histor
|
||||
|
||||
// AdoptControllerRevision sends a patch to take control of the ControllerRevision. It returns the error if
|
||||
// the patching fails.
|
||||
func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *apps.ControllerRevision) error {
|
||||
if err := m.CanAdopt(); err != nil {
|
||||
func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(ctx context.Context, history *apps.ControllerRevision) error {
|
||||
if err := m.CanAdopt(ctx); err != nil {
|
||||
return fmt.Errorf("can't adopt ControllerRevision %v/%v (%v): %v", history.Namespace, history.Name, history.UID, err)
|
||||
}
|
||||
// Note that ValidateOwnerReferences() will reject this patch if another
|
||||
@ -486,20 +493,20 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return m.crControl.PatchControllerRevision(history.Namespace, history.Name, patchBytes)
|
||||
return m.crControl.PatchControllerRevision(ctx, history.Namespace, history.Name, patchBytes)
|
||||
}
|
||||
|
||||
// ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller.
|
||||
// It returns the error if the patching fails. 404 and 422 errors are ignored.
|
||||
func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *apps.ControllerRevision) error {
|
||||
func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(ctx context.Context, history *apps.ControllerRevision) error {
|
||||
klog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s",
|
||||
history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
|
||||
patchBytes, err := deleteOwnerRefStrategicMergePatch(history.UID, m.Controller.GetUID())
|
||||
patchBytes, err := GenerateDeleteOwnerRefStrategicMergeBytes(history.UID, []types.UID{m.Controller.GetUID()})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = m.crControl.PatchControllerRevision(history.Namespace, history.Name, patchBytes)
|
||||
err = m.crControl.PatchControllerRevision(ctx, history.Namespace, history.Name, patchBytes)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
// If the ControllerRevision no longer exists, ignore it.
|
||||
@ -516,36 +523,6 @@ func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(histo
|
||||
return err
|
||||
}
|
||||
|
||||
type objectForDeleteOwnerRefStrategicMergePatch struct {
|
||||
Metadata objectMetaForMergePatch `json:"metadata"`
|
||||
}
|
||||
|
||||
type objectMetaForMergePatch struct {
|
||||
UID types.UID `json:"uid"`
|
||||
OwnerReferences []map[string]string `json:"ownerReferences"`
|
||||
DeleteFinalizers []string `json:"$deleteFromPrimitiveList/finalizers,omitempty"`
|
||||
}
|
||||
|
||||
func deleteOwnerRefStrategicMergePatch(dependentUID types.UID, ownerUID types.UID, finalizers ...string) ([]byte, error) {
|
||||
patch := objectForDeleteOwnerRefStrategicMergePatch{
|
||||
Metadata: objectMetaForMergePatch{
|
||||
UID: dependentUID,
|
||||
OwnerReferences: []map[string]string{
|
||||
{
|
||||
"$patch": "delete",
|
||||
"uid": string(ownerUID),
|
||||
},
|
||||
},
|
||||
DeleteFinalizers: finalizers,
|
||||
},
|
||||
}
|
||||
patchBytes, err := json.Marshal(&patch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return patchBytes, nil
|
||||
}
|
||||
|
||||
type objectForAddOwnerRefPatch struct {
|
||||
Metadata objectMetaForPatch `json:"metadata"`
|
||||
}
|
||||
@ -581,3 +558,39 @@ func ownerRefControllerPatch(controller metav1.Object, controllerKind schema.Gro
|
||||
}
|
||||
return patchBytes, nil
|
||||
}
|
||||
|
||||
type objectForDeleteOwnerRefStrategicMergePatch struct {
|
||||
Metadata objectMetaForMergePatch `json:"metadata"`
|
||||
}
|
||||
|
||||
type objectMetaForMergePatch struct {
|
||||
UID types.UID `json:"uid"`
|
||||
OwnerReferences []map[string]string `json:"ownerReferences"`
|
||||
DeleteFinalizers []string `json:"$deleteFromPrimitiveList/finalizers,omitempty"`
|
||||
}
|
||||
|
||||
func GenerateDeleteOwnerRefStrategicMergeBytes(dependentUID types.UID, ownerUIDs []types.UID, finalizers ...string) ([]byte, error) {
|
||||
var ownerReferences []map[string]string
|
||||
for _, ownerUID := range ownerUIDs {
|
||||
ownerReferences = append(ownerReferences, ownerReference(ownerUID, "delete"))
|
||||
}
|
||||
patch := objectForDeleteOwnerRefStrategicMergePatch{
|
||||
Metadata: objectMetaForMergePatch{
|
||||
UID: dependentUID,
|
||||
OwnerReferences: ownerReferences,
|
||||
DeleteFinalizers: finalizers,
|
||||
},
|
||||
}
|
||||
patchBytes, err := json.Marshal(&patch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return patchBytes, nil
|
||||
}
|
||||
|
||||
func ownerReference(uid types.UID, patchType string) map[string]string {
|
||||
return map[string]string{
|
||||
"$patch": patchType,
|
||||
"uid": string(uid),
|
||||
}
|
||||
}
|
||||
|
90
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
90
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
@ -35,7 +35,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
@ -52,6 +51,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
"k8s.io/utils/clock"
|
||||
"k8s.io/utils/integer"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
@ -406,7 +406,7 @@ const (
|
||||
// ReplicaSets, as well as increment or decrement them. It is used
|
||||
// by the deployment controller to ease testing of actions that it takes.
|
||||
type RSControlInterface interface {
|
||||
PatchReplicaSet(namespace, name string, data []byte) error
|
||||
PatchReplicaSet(ctx context.Context, namespace, name string, data []byte) error
|
||||
}
|
||||
|
||||
// RealRSControl is the default implementation of RSControllerInterface.
|
||||
@ -417,8 +417,8 @@ type RealRSControl struct {
|
||||
|
||||
var _ RSControlInterface = &RealRSControl{}
|
||||
|
||||
func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error {
|
||||
_, err := r.KubeClient.AppsV1().ReplicaSets(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
|
||||
func (r RealRSControl) PatchReplicaSet(ctx context.Context, namespace, name string, data []byte) error {
|
||||
_, err := r.KubeClient.AppsV1().ReplicaSets(namespace).Patch(ctx, name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
@ -427,7 +427,7 @@ func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) erro
|
||||
// ControllerRevisions, as well as increment or decrement them. It is used
|
||||
// by the daemonset controller to ease testing of actions that it takes.
|
||||
type ControllerRevisionControlInterface interface {
|
||||
PatchControllerRevision(namespace, name string, data []byte) error
|
||||
PatchControllerRevision(ctx context.Context, namespace, name string, data []byte) error
|
||||
}
|
||||
|
||||
// RealControllerRevisionControl is the default implementation of ControllerRevisionControlInterface.
|
||||
@ -437,8 +437,8 @@ type RealControllerRevisionControl struct {
|
||||
|
||||
var _ ControllerRevisionControlInterface = &RealControllerRevisionControl{}
|
||||
|
||||
func (r RealControllerRevisionControl) PatchControllerRevision(namespace, name string, data []byte) error {
|
||||
_, err := r.KubeClient.AppsV1().ControllerRevisions(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
|
||||
func (r RealControllerRevisionControl) PatchControllerRevision(ctx context.Context, namespace, name string, data []byte) error {
|
||||
_, err := r.KubeClient.AppsV1().ControllerRevisions(namespace).Patch(ctx, name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
@ -446,13 +446,13 @@ func (r RealControllerRevisionControl) PatchControllerRevision(namespace, name s
|
||||
// created as an interface to allow testing.
|
||||
type PodControlInterface interface {
|
||||
// CreatePods creates new pods according to the spec, and sets object as the pod's controller.
|
||||
CreatePods(namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error
|
||||
CreatePods(ctx context.Context, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error
|
||||
// CreatePodsWithGenerateName creates new pods according to the spec, sets object as the pod's controller and sets pod's generateName.
|
||||
CreatePodsWithGenerateName(namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference, generateName string) error
|
||||
CreatePodsWithGenerateName(ctx context.Context, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference, generateName string) error
|
||||
// DeletePod deletes the pod identified by podID.
|
||||
DeletePod(namespace string, podID string, object runtime.Object) error
|
||||
DeletePod(ctx context.Context, namespace string, podID string, object runtime.Object) error
|
||||
// PatchPod patches the pod.
|
||||
PatchPod(namespace, name string, data []byte) error
|
||||
PatchPod(ctx context.Context, namespace, name string, data []byte) error
|
||||
}
|
||||
|
||||
// RealPodControl is the default implementation of PodControlInterface.
|
||||
@ -513,11 +513,11 @@ func validateControllerRef(controllerRef *metav1.OwnerReference) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r RealPodControl) CreatePods(namespace string, template *v1.PodTemplateSpec, controllerObject runtime.Object, controllerRef *metav1.OwnerReference) error {
|
||||
return r.CreatePodsWithGenerateName(namespace, template, controllerObject, controllerRef, "")
|
||||
func (r RealPodControl) CreatePods(ctx context.Context, namespace string, template *v1.PodTemplateSpec, controllerObject runtime.Object, controllerRef *metav1.OwnerReference) error {
|
||||
return r.CreatePodsWithGenerateName(ctx, namespace, template, controllerObject, controllerRef, "")
|
||||
}
|
||||
|
||||
func (r RealPodControl) CreatePodsWithGenerateName(namespace string, template *v1.PodTemplateSpec, controllerObject runtime.Object, controllerRef *metav1.OwnerReference, generateName string) error {
|
||||
func (r RealPodControl) CreatePodsWithGenerateName(ctx context.Context, namespace string, template *v1.PodTemplateSpec, controllerObject runtime.Object, controllerRef *metav1.OwnerReference, generateName string) error {
|
||||
if err := validateControllerRef(controllerRef); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -528,11 +528,11 @@ func (r RealPodControl) CreatePodsWithGenerateName(namespace string, template *v
|
||||
if len(generateName) > 0 {
|
||||
pod.ObjectMeta.GenerateName = generateName
|
||||
}
|
||||
return r.createPods(namespace, pod, controllerObject)
|
||||
return r.createPods(ctx, namespace, pod, controllerObject)
|
||||
}
|
||||
|
||||
func (r RealPodControl) PatchPod(namespace, name string, data []byte) error {
|
||||
_, err := r.KubeClient.CoreV1().Pods(namespace).Patch(context.TODO(), name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
|
||||
func (r RealPodControl) PatchPod(ctx context.Context, namespace, name string, data []byte) error {
|
||||
_, err := r.KubeClient.CoreV1().Pods(namespace).Patch(ctx, name, types.StrategicMergePatchType, data, metav1.PatchOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
@ -561,11 +561,11 @@ func GetPodFromTemplate(template *v1.PodTemplateSpec, parentObject runtime.Objec
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
func (r RealPodControl) createPods(namespace string, pod *v1.Pod, object runtime.Object) error {
|
||||
func (r RealPodControl) createPods(ctx context.Context, namespace string, pod *v1.Pod, object runtime.Object) error {
|
||||
if len(labels.Set(pod.Labels)) == 0 {
|
||||
return fmt.Errorf("unable to create pods, no labels")
|
||||
}
|
||||
newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
// only send an event if the namespace isn't terminating
|
||||
if !apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
||||
@ -584,13 +584,13 @@ func (r RealPodControl) createPods(namespace string, pod *v1.Pod, object runtime
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r RealPodControl) DeletePod(namespace string, podID string, object runtime.Object) error {
|
||||
func (r RealPodControl) DeletePod(ctx context.Context, namespace string, podID string, object runtime.Object) error {
|
||||
accessor, err := meta.Accessor(object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("object does not have ObjectMeta, %v", err)
|
||||
}
|
||||
klog.V(2).InfoS("Deleting pod", "controller", accessor.GetName(), "pod", klog.KRef(namespace, podID))
|
||||
if err := r.KubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), podID, metav1.DeleteOptions{}); err != nil {
|
||||
if err := r.KubeClient.CoreV1().Pods(namespace).Delete(ctx, podID, metav1.DeleteOptions{}); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.V(4).Infof("pod %v/%v has already been deleted.", namespace, podID)
|
||||
return err
|
||||
@ -616,7 +616,7 @@ type FakePodControl struct {
|
||||
|
||||
var _ PodControlInterface = &FakePodControl{}
|
||||
|
||||
func (f *FakePodControl) PatchPod(namespace, name string, data []byte) error {
|
||||
func (f *FakePodControl) PatchPod(ctx context.Context, namespace, name string, data []byte) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.Patches = append(f.Patches, data)
|
||||
@ -626,13 +626,18 @@ func (f *FakePodControl) PatchPod(namespace, name string, data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakePodControl) CreatePods(namespace string, spec *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
|
||||
func (f *FakePodControl) CreatePods(ctx context.Context, namespace string, spec *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
|
||||
return f.CreatePodsWithGenerateName(ctx, namespace, spec, object, controllerRef, "")
|
||||
}
|
||||
|
||||
func (f *FakePodControl) CreatePodsWithGenerateName(ctx context.Context, namespace string, spec *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference, generateNamePrefix string) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.CreateCallCount++
|
||||
if f.CreateLimit != 0 && f.CreateCallCount > f.CreateLimit {
|
||||
return fmt.Errorf("not creating pod, limit %d already reached (create call %d)", f.CreateLimit, f.CreateCallCount)
|
||||
}
|
||||
spec.GenerateName = generateNamePrefix
|
||||
f.Templates = append(f.Templates, *spec)
|
||||
f.ControllerRefs = append(f.ControllerRefs, *controllerRef)
|
||||
if f.Err != nil {
|
||||
@ -641,22 +646,7 @@ func (f *FakePodControl) CreatePods(namespace string, spec *v1.PodTemplateSpec,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakePodControl) CreatePodsWithGenerateName(namespace string, spec *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference, generateNamePrefix string) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.CreateCallCount++
|
||||
if f.CreateLimit != 0 && f.CreateCallCount > f.CreateLimit {
|
||||
return fmt.Errorf("not creating pod, limit %d already reached (create call %d)", f.CreateLimit, f.CreateCallCount)
|
||||
}
|
||||
f.Templates = append(f.Templates, *spec)
|
||||
f.ControllerRefs = append(f.ControllerRefs, *controllerRef)
|
||||
if f.Err != nil {
|
||||
return f.Err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakePodControl) DeletePod(namespace string, podID string, object runtime.Object) error {
|
||||
func (f *FakePodControl) DeletePod(ctx context.Context, namespace string, podID string, object runtime.Object) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.DeletePodName = append(f.DeletePodName, podID)
|
||||
@ -833,7 +823,7 @@ func (s ActivePodsWithRanks) Less(i, j int) bool {
|
||||
return !podutil.IsPodReady(s.Pods[i])
|
||||
}
|
||||
|
||||
// 4. higher pod-deletion-cost < lower pod-deletion cost
|
||||
// 4. lower pod-deletion-cost < higher pod-deletion cost
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodDeletionCost) {
|
||||
pi, _ := helper.GetDeletionCostFromPodAnnotations(s.Pods[i].Annotations)
|
||||
pj, _ := helper.GetDeletionCostFromPodAnnotations(s.Pods[j].Annotations)
|
||||
@ -1039,7 +1029,7 @@ func (o ReplicaSetsBySizeNewer) Less(i, j int) bool {
|
||||
|
||||
// AddOrUpdateTaintOnNode add taints to the node. If taint was added into node, it'll issue API calls
|
||||
// to update nodes; otherwise, no API calls. Return error if any.
|
||||
func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v1.Taint) error {
|
||||
func AddOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName string, taints ...*v1.Taint) error {
|
||||
if len(taints) == 0 {
|
||||
return nil
|
||||
}
|
||||
@ -1050,10 +1040,10 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v
|
||||
// First we try getting node from the API server cache, as it's cheaper. If it fails
|
||||
// we get it from etcd to be sure to have fresh data.
|
||||
if firstTry {
|
||||
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||
firstTry = false
|
||||
} else {
|
||||
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1074,7 +1064,7 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v
|
||||
if !updated {
|
||||
return nil
|
||||
}
|
||||
return PatchNodeTaints(c, nodeName, oldNode, newNode)
|
||||
return PatchNodeTaints(ctx, c, nodeName, oldNode, newNode)
|
||||
})
|
||||
}
|
||||
|
||||
@ -1082,7 +1072,7 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v
|
||||
// won't fail if target taint doesn't exist or has been removed.
|
||||
// If passed a node it'll check if there's anything to be done, if taint is not present it won't issue
|
||||
// any API calls.
|
||||
func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error {
|
||||
func RemoveTaintOffNode(ctx context.Context, c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error {
|
||||
if len(taints) == 0 {
|
||||
return nil
|
||||
}
|
||||
@ -1107,10 +1097,10 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, t
|
||||
// First we try getting node from the API server cache, as it's cheaper. If it fails
|
||||
// we get it from etcd to be sure to have fresh data.
|
||||
if firstTry {
|
||||
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||
firstTry = false
|
||||
} else {
|
||||
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1131,12 +1121,12 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, t
|
||||
if !updated {
|
||||
return nil
|
||||
}
|
||||
return PatchNodeTaints(c, nodeName, oldNode, newNode)
|
||||
return PatchNodeTaints(ctx, c, nodeName, oldNode, newNode)
|
||||
})
|
||||
}
|
||||
|
||||
// PatchNodeTaints patches node's taints.
|
||||
func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
|
||||
func PatchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
|
||||
oldData, err := json.Marshal(oldNode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err)
|
||||
@ -1155,7 +1145,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n
|
||||
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
|
||||
}
|
||||
|
||||
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
|
||||
_, err = c.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
|
34
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
34
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
@ -509,40 +509,6 @@ func getReplicaSetFraction(rs apps.ReplicaSet, d apps.Deployment) int32 {
|
||||
return integer.RoundToInt32(newRSsize) - *(rs.Spec.Replicas)
|
||||
}
|
||||
|
||||
// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface.
|
||||
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
|
||||
// The third returned value is the new replica set, and it may be nil if it doesn't exist yet.
|
||||
func GetAllReplicaSets(deployment *apps.Deployment, c appsclient.AppsV1Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, *apps.ReplicaSet, error) {
|
||||
rsList, err := ListReplicaSets(deployment, RsListFromClient(c))
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
oldRSes, allOldRSes := FindOldReplicaSets(deployment, rsList)
|
||||
newRS := FindNewReplicaSet(deployment, rsList)
|
||||
return oldRSes, allOldRSes, newRS, nil
|
||||
}
|
||||
|
||||
// GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface.
|
||||
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
|
||||
func GetOldReplicaSets(deployment *apps.Deployment, c appsclient.AppsV1Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, error) {
|
||||
rsList, err := ListReplicaSets(deployment, RsListFromClient(c))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
oldRSes, allOldRSes := FindOldReplicaSets(deployment, rsList)
|
||||
return oldRSes, allOldRSes, nil
|
||||
}
|
||||
|
||||
// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface.
|
||||
// Returns nil if the new replica set doesn't exist yet.
|
||||
func GetNewReplicaSet(deployment *apps.Deployment, c appsclient.AppsV1Interface) (*apps.ReplicaSet, error) {
|
||||
rsList, err := ListReplicaSets(deployment, RsListFromClient(c))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return FindNewReplicaSet(deployment, rsList), nil
|
||||
}
|
||||
|
||||
// RsListFromClient returns an rsListFunc that wraps the given client.
|
||||
func RsListFromClient(c appsclient.AppsV1Interface) RsListFunc {
|
||||
return func(namespace string, options metav1.ListOptions) ([]*apps.ReplicaSet, error) {
|
||||
|
318
vendor/k8s.io/kubernetes/pkg/features/kube_features.go
generated
vendored
318
vendor/k8s.io/kubernetes/pkg/features/kube_features.go
generated
vendored
@ -94,6 +94,7 @@ const (
|
||||
|
||||
// owner: @verb
|
||||
// alpha: v1.16
|
||||
// beta: v1.23
|
||||
//
|
||||
// Allows running an ephemeral container in pod namespaces to troubleshoot a running pod.
|
||||
EphemeralContainers featuregate.Feature = "EphemeralContainers"
|
||||
@ -132,54 +133,18 @@ const (
|
||||
// Allows setting memory affinity for a container based on NUMA topology
|
||||
MemoryManager featuregate.Feature = "MemoryManager"
|
||||
|
||||
// owner: @sjenning
|
||||
// alpha: v1.4
|
||||
// beta: v1.11
|
||||
// ga: v1.21
|
||||
//
|
||||
// Enable pods to set sysctls on a pod
|
||||
Sysctls featuregate.Feature = "Sysctls"
|
||||
|
||||
// owner: @pospispa
|
||||
// GA: v1.11
|
||||
//
|
||||
// Postpone deletion of a PV or a PVC when they are being used
|
||||
StorageObjectInUseProtection featuregate.Feature = "StorageObjectInUseProtection"
|
||||
|
||||
// owner: @dims, @derekwaynecarr
|
||||
// alpha: v1.10
|
||||
// beta: v1.14
|
||||
// GA: v1.20
|
||||
//
|
||||
// Implement support for limiting pids in pods
|
||||
SupportPodPidsLimit featuregate.Feature = "SupportPodPidsLimit"
|
||||
|
||||
// owner: @mikedanese
|
||||
// alpha: v1.13
|
||||
// beta: v1.21
|
||||
// ga: v1.22
|
||||
//
|
||||
// Migrate ServiceAccount volumes to use a projected volume consisting of a
|
||||
// ServiceAccountTokenVolumeProjection. This feature adds new required flags
|
||||
// to the API server.
|
||||
BoundServiceAccountTokenVolume featuregate.Feature = "BoundServiceAccountTokenVolume"
|
||||
|
||||
// owner: @mtaufen
|
||||
// alpha: v1.18
|
||||
// beta: v1.20
|
||||
// stable: v1.21
|
||||
//
|
||||
// Enable OIDC discovery endpoints (issuer and JWKS URLs) for the service
|
||||
// account issuer in the API server.
|
||||
// Note these endpoints serve minimally-compliant discovery docs that are
|
||||
// intended to be used for service account token verification.
|
||||
ServiceAccountIssuerDiscovery featuregate.Feature = "ServiceAccountIssuerDiscovery"
|
||||
|
||||
// owner: @saad-ali
|
||||
// ga: v1.10
|
||||
//
|
||||
// Allow mounting a subpath of a volume in a container
|
||||
// Do not remove this feature gate even though it's GA
|
||||
// NOTE: This feature gate has been deprecated and is no longer enforced.
|
||||
// It will be completely removed in 1.25. Until then, it's still visible in `kubelet --help`
|
||||
VolumeSubpath featuregate.Feature = "VolumeSubpath"
|
||||
|
||||
// owner: @pohly
|
||||
@ -206,6 +171,7 @@ const (
|
||||
// owner: @pohly
|
||||
// alpha: v1.19
|
||||
// beta: v1.21
|
||||
// GA: v1.23
|
||||
//
|
||||
// Enables generic ephemeral inline volume support for pods
|
||||
GenericEphemeralVolume featuregate.Feature = "GenericEphemeralVolume"
|
||||
@ -229,15 +195,6 @@ const (
|
||||
// Enables RuntimeClass, for selecting between multiple runtimes to run a pod.
|
||||
RuntimeClass featuregate.Feature = "RuntimeClass"
|
||||
|
||||
// owner: @mtaufen
|
||||
// alpha: v1.12
|
||||
// beta: v1.14
|
||||
// GA: v1.17
|
||||
//
|
||||
// Kubelet uses the new Lease API to report node heartbeats,
|
||||
// (Kube) Node Lifecycle Controller uses these heartbeats as a node health signal.
|
||||
NodeLease featuregate.Feature = "NodeLease"
|
||||
|
||||
// owner: @rikatz
|
||||
// kep: http://kep.k8s.io/2079
|
||||
// alpha: v1.21
|
||||
@ -254,6 +211,7 @@ const (
|
||||
|
||||
// owner: @janetkuo
|
||||
// alpha: v1.12
|
||||
// beta: v1.21
|
||||
//
|
||||
// Allow TTL controller to clean up Pods and Jobs after they finish.
|
||||
TTLAfterFinished featuregate.Feature = "TTLAfterFinished"
|
||||
@ -267,6 +225,7 @@ const (
|
||||
|
||||
// owner: @alculquicondor
|
||||
// alpha: v1.22
|
||||
// beta: v1.23
|
||||
//
|
||||
// Track Job completion without relying on Pod remaining in the cluster
|
||||
// indefinitely. Pod finalizers, in addition to a field in the Job status
|
||||
@ -274,6 +233,12 @@ const (
|
||||
// yet.
|
||||
JobTrackingWithFinalizers featuregate.Feature = "JobTrackingWithFinalizers"
|
||||
|
||||
// owner: @alculquicondor
|
||||
// alpha: v1.23
|
||||
//
|
||||
// Track the number of pods with Ready condition in the Job status.
|
||||
JobReadyPods featuregate.Feature = "JobReadyPods"
|
||||
|
||||
// owner: @dashpole
|
||||
// alpha: v1.13
|
||||
// beta: v1.15
|
||||
@ -365,9 +330,34 @@ const (
|
||||
// Disables the OpenStack Cinder in-tree driver.
|
||||
InTreePluginOpenStackUnregister featuregate.Feature = "InTreePluginOpenStackUnregister"
|
||||
|
||||
// owner: @huffmanca
|
||||
// owner: @trierra
|
||||
// alpha: v1.23
|
||||
//
|
||||
// Enables the Portworx in-tree driver to Portworx migration feature.
|
||||
CSIMigrationPortworx featuregate.Feature = "CSIMigrationPortworx"
|
||||
|
||||
// owner: @trierra
|
||||
// alpha: v1.23
|
||||
//
|
||||
// Disables the Portworx in-tree driver.
|
||||
InTreePluginPortworxUnregister featuregate.Feature = "InTreePluginPortworxUnregister"
|
||||
|
||||
// owner: @humblec
|
||||
// alpha: v1.23
|
||||
//
|
||||
// Enables the RBD in-tree driver to RBD CSI Driver migration feature.
|
||||
CSIMigrationRBD featuregate.Feature = "csiMigrationRBD"
|
||||
|
||||
// owner: @humblec
|
||||
// alpha: v1.23
|
||||
//
|
||||
// Disables the RBD in-tree driver.
|
||||
InTreePluginRBDUnregister featuregate.Feature = "InTreePluginRBDUnregister"
|
||||
|
||||
// owner: @huffmanca, @dobsonj
|
||||
// alpha: v1.19
|
||||
// beta: v1.20
|
||||
// GA: v1.23
|
||||
//
|
||||
// Determines if a CSI Driver supports applying fsGroup.
|
||||
CSIVolumeFSGroupPolicy featuregate.Feature = "CSIVolumeFSGroupPolicy"
|
||||
@ -375,24 +365,19 @@ const (
|
||||
// owner: @gnufied
|
||||
// alpha: v1.18
|
||||
// beta: v1.20
|
||||
// GA: v1.23
|
||||
// Allows user to configure volume permission change policy for fsGroups when mounting
|
||||
// a volume in a Pod.
|
||||
ConfigurableFSGroupPolicy featuregate.Feature = "ConfigurableFSGroupPolicy"
|
||||
|
||||
// owner: @gnufied, @verult
|
||||
// alpha: v1.22
|
||||
// beta: v1.23
|
||||
// If supported by the CSI driver, delegates the role of applying FSGroup to
|
||||
// the driver by passing FSGroup through the NodeStageVolume and
|
||||
// NodePublishVolume calls.
|
||||
DelegateFSGroupToCSIDriver featuregate.Feature = "DelegateFSGroupToCSIDriver"
|
||||
|
||||
// owner: @RobertKrawitz, @derekwaynecarr
|
||||
// beta: v1.15
|
||||
// GA: v1.20
|
||||
//
|
||||
// Implement support for limiting pids in nodes
|
||||
SupportNodePidsLimit featuregate.Feature = "SupportNodePidsLimit"
|
||||
|
||||
// owner: @RobertKrawitz
|
||||
// alpha: v1.15
|
||||
//
|
||||
@ -449,21 +434,6 @@ const (
|
||||
// Enable Endpoint Slice consumption by kube-proxy in Windows for improved scalability.
|
||||
WindowsEndpointSliceProxying featuregate.Feature = "WindowsEndpointSliceProxying"
|
||||
|
||||
// owner: @matthyx
|
||||
// alpha: v1.16
|
||||
// beta: v1.18
|
||||
// GA: v1.20
|
||||
//
|
||||
// Enables the startupProbe in kubelet worker.
|
||||
StartupProbe featuregate.Feature = "StartupProbe"
|
||||
|
||||
// owner: @deads2k
|
||||
// beta: v1.17
|
||||
// GA: v1.21
|
||||
//
|
||||
// Enables the users to skip TLS verification of kubelets on pod logs requests
|
||||
AllowInsecureBackendProxy featuregate.Feature = "AllowInsecureBackendProxy"
|
||||
|
||||
// owner: @mortent
|
||||
// alpha: v1.3
|
||||
// beta: v1.5
|
||||
@ -471,15 +441,6 @@ const (
|
||||
// Enable all logic related to the PodDisruptionBudget API object in policy
|
||||
PodDisruptionBudget featuregate.Feature = "PodDisruptionBudget"
|
||||
|
||||
// owner: @alaypatel07, @soltysh
|
||||
// alpha: v1.20
|
||||
// beta: v1.21
|
||||
// GA: v1.22
|
||||
//
|
||||
// CronJobControllerV2 controls whether the controller manager starts old cronjob
|
||||
// controller or new one which is implemented with informers and delaying queue
|
||||
CronJobControllerV2 featuregate.Feature = "CronJobControllerV2"
|
||||
|
||||
// owner: @smarterclayton
|
||||
// alpha: v1.21
|
||||
// beta: v1.22
|
||||
@ -603,6 +564,12 @@ const (
|
||||
// Disable any functionality in kube-apiserver, kube-controller-manager and kubelet related to the `--cloud-provider` component flag.
|
||||
DisableCloudProviders featuregate.Feature = "DisableCloudProviders"
|
||||
|
||||
// owner: @andrewsykim
|
||||
// alpha: v1.23
|
||||
//
|
||||
// Disable in-tree functionality in kubelet to authenticate to cloud provider container registries for image pull credentials.
|
||||
DisableKubeletCloudCredentialProviders featuregate.Feature = "DisableKubeletCloudCredentialProviders"
|
||||
|
||||
// owner: @zshihang
|
||||
// alpha: v1.20
|
||||
// beta: v1.21
|
||||
@ -618,6 +585,12 @@ const (
|
||||
// Adds support for kubelet to detect node shutdown and gracefully terminate pods prior to the node being shutdown.
|
||||
GracefulNodeShutdown featuregate.Feature = "GracefulNodeShutdown"
|
||||
|
||||
// owner: @wzshiming
|
||||
// alpha: v1.23
|
||||
//
|
||||
// Make the kubelet use shutdown configuration based on pod priority values for graceful shutdown.
|
||||
GracefulNodeShutdownBasedOnPodPriority featuregate.Feature = "GracefulNodeShutdownBasedOnPodPriority"
|
||||
|
||||
// owner: @andrewsykim @uablrek
|
||||
// kep: http://kep.k8s.io/1864
|
||||
// alpha: v1.20
|
||||
@ -637,6 +610,12 @@ const (
|
||||
// alpha: v1.21
|
||||
VolumeCapacityPriority featuregate.Feature = "VolumeCapacityPriority"
|
||||
|
||||
// owner: @mattcary
|
||||
// alpha: v1.22
|
||||
//
|
||||
// Enables policies controlling deletion of PVCs created by a StatefulSet.
|
||||
StatefulSetAutoDeletePVC featuregate.Feature = "StatefulSetAutoDeletePVC"
|
||||
|
||||
// owner: @ahg-g
|
||||
// alpha: v1.21
|
||||
// beta: v1.22
|
||||
@ -647,6 +626,7 @@ const (
|
||||
// owner: @robscott
|
||||
// kep: http://kep.k8s.io/2433
|
||||
// alpha: v1.21
|
||||
// beta: v1.23
|
||||
//
|
||||
// Enables topology aware hints for EndpointSlices
|
||||
TopologyAwareHints featuregate.Feature = "TopologyAwareHints"
|
||||
@ -689,6 +669,7 @@ const (
|
||||
// kep: http://kep.k8s.io/2365
|
||||
// alpha: v1.21
|
||||
// beta: v1.22
|
||||
// GA: v1.23
|
||||
//
|
||||
// Enable Scope and Namespace fields on IngressClassParametersReference.
|
||||
IngressClassNamespacedParams featuregate.Feature = "IngressClassNamespacedParams"
|
||||
@ -696,6 +677,7 @@ const (
|
||||
// owner: @maplain @andrewsykim
|
||||
// kep: http://kep.k8s.io/2086
|
||||
// alpha: v1.21
|
||||
// beta: v1.22
|
||||
//
|
||||
// Enables node-local routing for Service internal traffic
|
||||
ServiceInternalTrafficPolicy featuregate.Feature = "ServiceInternalTrafficPolicy"
|
||||
@ -709,7 +691,7 @@ const (
|
||||
|
||||
// owner: @fromanirh
|
||||
// alpha: v1.21
|
||||
//
|
||||
// beta: v1.23
|
||||
// Enable POD resources API to return allocatable resources
|
||||
KubeletPodResourcesGetAllocatable featuregate.Feature = "KubeletPodResourcesGetAllocatable"
|
||||
|
||||
@ -729,16 +711,25 @@ const (
|
||||
|
||||
// owner: @marosset
|
||||
// alpha: v1.22
|
||||
// beta: v1.23
|
||||
//
|
||||
// Enables support for 'HostProcess' containers on Windows nodes.
|
||||
WindowsHostProcessContainers featuregate.Feature = "WindowsHostProcessContainers"
|
||||
|
||||
// owner: @ravig
|
||||
// kep: https://kep.k8s.io/2607
|
||||
// alpha: v1.22
|
||||
//
|
||||
// beta: v1.23
|
||||
// StatefulSetMinReadySeconds allows minReadySeconds to be respected by StatefulSet controller
|
||||
StatefulSetMinReadySeconds featuregate.Feature = "StatefulSetMinReadySeconds"
|
||||
|
||||
// owner: @ravig
|
||||
// alpha: v1.23
|
||||
//
|
||||
// IdentifyPodOS allows user to specify OS on which they'd like the Pod run. The user should still set the nodeSelector
|
||||
// with appropriate `kubernetes.io/os` label for scheduler to identify appropriate node for the pod to run.
|
||||
IdentifyPodOS featuregate.Feature = "IdentifyPodOS"
|
||||
|
||||
// owner: @gjkim42
|
||||
// kep: http://kep.k8s.io/2595
|
||||
// alpha: v1.22
|
||||
@ -754,6 +745,7 @@ const (
|
||||
|
||||
// owner: @liggitt, @tallclair, sig-auth
|
||||
// alpha: v1.22
|
||||
// beta: v1.23
|
||||
//
|
||||
// Enables the PodSecurity admission plugin
|
||||
PodSecurity featuregate.Feature = "PodSecurity"
|
||||
@ -787,8 +779,9 @@ const (
|
||||
|
||||
// owner: @fromanirh
|
||||
// alpha: v1.22
|
||||
// beta: v1.23
|
||||
//
|
||||
// Allow fine-tuning of cpumanager policies
|
||||
// Allow the usage of options to fine-tune the cpumanager policies.
|
||||
CPUManagerPolicyOptions featuregate.Feature = "CPUManagerPolicyOptions"
|
||||
|
||||
// owner: @jiahuif
|
||||
@ -797,6 +790,71 @@ const (
|
||||
//
|
||||
// Enables Leader Migration for kube-controller-manager and cloud-controller-manager
|
||||
ControllerManagerLeaderMigration featuregate.Feature = "ControllerManagerLeaderMigration"
|
||||
|
||||
// owner: @fromanirh
|
||||
// alpha: v1.23
|
||||
// beta: see below.
|
||||
//
|
||||
// Allow fine-tuning of cpumanager policies, experimental, alpha-quality options
|
||||
// Per https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ
|
||||
// We want to avoid a proliferation of feature gates. This feature gate:
|
||||
// - will guard *a group* of cpumanager options whose quality level is alpha.
|
||||
// - will never graduate to beta or stable.
|
||||
// See https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ
|
||||
// for details about the removal of this feature gate.
|
||||
CPUManagerPolicyAlphaOptions featuregate.Feature = "CPUManagerPolicyAlphaOptions"
|
||||
|
||||
// owner: @fromanirh
|
||||
// beta: v1.23
|
||||
// beta: see below.
|
||||
//
|
||||
// Allow fine-tuning of cpumanager policies, experimental, beta-quality options
|
||||
// Per https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ
|
||||
// We want to avoid a proliferation of feature gates. This feature gate:
|
||||
// - will guard *a group* of cpumanager options whose quality level is beta.
|
||||
// - is thus *introduced* as beta
|
||||
// - will never graduate to stable.
|
||||
// See https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ
|
||||
// for details about the removal of this feature gate.
|
||||
CPUManagerPolicyBetaOptions featuregate.Feature = "CPUManagerPolicyBetaOptions"
|
||||
|
||||
// owner: @ahg
|
||||
// beta: v1.23
|
||||
//
|
||||
// Allow updating node scheduling directives in the pod template of jobs. Specifically,
|
||||
// node affinity, selector and tolerations. This is allowed only for suspended jobs
|
||||
// that have never been unsuspended before.
|
||||
JobMutableNodeSchedulingDirectives featuregate.Feature = "JobMutableNodeSchedulingDirectives"
|
||||
|
||||
// owner: @haircommander
|
||||
// kep: http://kep.k8s.io/2364
|
||||
// alpha: v1.23
|
||||
//
|
||||
// Configures the Kubelet to use the CRI to populate pod and container stats, instead of supplimenting with stats from cAdvisor.
|
||||
// Requires the CRI implementation supports supplying the required stats.
|
||||
PodAndContainerStatsFromCRI featuregate.Feature = "PodAndContainerStatsFromCRI"
|
||||
|
||||
// owner: @deepakkinni @xing-yang
|
||||
// kep: http://kep.k8s.io/2680
|
||||
// alpha: v1.23
|
||||
//
|
||||
// Honor Persistent Volume Reclaim Policy when it is "Delete" irrespective of PV-PVC
|
||||
// deletion ordering.
|
||||
HonorPVReclaimPolicy featuregate.Feature = "HonorPVReclaimPolicy"
|
||||
|
||||
// owner: @gnufied
|
||||
// kep: http://kep.k8s.io/1790
|
||||
// alpha: v1.23
|
||||
//
|
||||
// Allow users to recover from volume expansion failure
|
||||
RecoverVolumeExpansionFailure featuregate.Feature = "RecoverVolumeExpansionFailure"
|
||||
|
||||
// owner: @yuzhiquan, @bowei, @PxyUp
|
||||
// kep: http://kep.k8s.io/2727
|
||||
// alpha: v1.23
|
||||
//
|
||||
// Enables GRPC probe method for {Liveness,Readiness,Startup}Probe.
|
||||
GRPCContainerProbe featuregate.Feature = "GRPCContainerProbe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -813,8 +871,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
DevicePlugins: {Default: true, PreRelease: featuregate.Beta},
|
||||
RotateKubeletServerCertificate: {Default: true, PreRelease: featuregate.Beta},
|
||||
LocalStorageCapacityIsolation: {Default: true, PreRelease: featuregate.Beta},
|
||||
Sysctls: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
|
||||
EphemeralContainers: {Default: false, PreRelease: featuregate.Alpha},
|
||||
EphemeralContainers: {Default: true, PreRelease: featuregate.Beta},
|
||||
QOSReserved: {Default: false, PreRelease: featuregate.Alpha},
|
||||
ExpandPersistentVolumes: {Default: true, PreRelease: featuregate.Beta},
|
||||
ExpandInUsePersistentVolumes: {Default: true, PreRelease: featuregate.Beta},
|
||||
@ -823,17 +880,13 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
MemoryManager: {Default: true, PreRelease: featuregate.Beta},
|
||||
CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha},
|
||||
TopologyManager: {Default: true, PreRelease: featuregate.Beta},
|
||||
StorageObjectInUseProtection: {Default: true, PreRelease: featuregate.GA},
|
||||
SupportPodPidsLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
|
||||
SupportNodePidsLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
|
||||
BoundServiceAccountTokenVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
|
||||
ServiceAccountIssuerDiscovery: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.22
|
||||
StorageObjectInUseProtection: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
CSIMigration: {Default: true, PreRelease: featuregate.Beta},
|
||||
CSIMigrationGCE: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires GCE PD CSI Driver)
|
||||
CSIMigrationGCE: {Default: true, PreRelease: featuregate.Beta}, // On by default in 1.23 (requires GCE PD CSI Driver)
|
||||
InTreePluginGCEUnregister: {Default: false, PreRelease: featuregate.Alpha},
|
||||
CSIMigrationAWS: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires AWS EBS CSI driver)
|
||||
CSIMigrationAWS: {Default: true, PreRelease: featuregate.Beta},
|
||||
InTreePluginAWSUnregister: {Default: false, PreRelease: featuregate.Alpha},
|
||||
CSIMigrationAzureDisk: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires Azure Disk CSI driver)
|
||||
CSIMigrationAzureDisk: {Default: true, PreRelease: featuregate.Beta}, // On by default in 1.23 (requires Azure Disk CSI driver)
|
||||
InTreePluginAzureDiskUnregister: {Default: false, PreRelease: featuregate.Alpha},
|
||||
CSIMigrationAzureFile: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires Azure File CSI driver)
|
||||
InTreePluginAzureFileUnregister: {Default: false, PreRelease: featuregate.Alpha},
|
||||
@ -841,42 +894,43 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
InTreePluginvSphereUnregister: {Default: false, PreRelease: featuregate.Alpha},
|
||||
CSIMigrationOpenStack: {Default: true, PreRelease: featuregate.Beta},
|
||||
InTreePluginOpenStackUnregister: {Default: false, PreRelease: featuregate.Alpha},
|
||||
VolumeSubpath: {Default: true, PreRelease: featuregate.GA},
|
||||
ConfigurableFSGroupPolicy: {Default: true, PreRelease: featuregate.Beta},
|
||||
CSIMigrationRBD: {Default: false, PreRelease: featuregate.Alpha}, // Off by default (requires RBD CSI driver)
|
||||
InTreePluginRBDUnregister: {Default: false, PreRelease: featuregate.Alpha},
|
||||
ConfigurableFSGroupPolicy: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
CSIMigrationPortworx: {Default: false, PreRelease: featuregate.Alpha}, // Off by default (requires Portworx CSI driver)
|
||||
InTreePluginPortworxUnregister: {Default: false, PreRelease: featuregate.Alpha},
|
||||
CSIInlineVolume: {Default: true, PreRelease: featuregate.Beta},
|
||||
CSIStorageCapacity: {Default: true, PreRelease: featuregate.Beta},
|
||||
CSIServiceAccountToken: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
|
||||
GenericEphemeralVolume: {Default: true, PreRelease: featuregate.Beta},
|
||||
CSIVolumeFSGroupPolicy: {Default: true, PreRelease: featuregate.Beta},
|
||||
GenericEphemeralVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
CSIVolumeFSGroupPolicy: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
VolumeSubpath: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
RuntimeClass: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
|
||||
NodeLease: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
NetworkPolicyEndPort: {Default: true, PreRelease: featuregate.Beta},
|
||||
ProcMountType: {Default: false, PreRelease: featuregate.Alpha},
|
||||
TTLAfterFinished: {Default: true, PreRelease: featuregate.Beta},
|
||||
TTLAfterFinished: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
IndexedJob: {Default: true, PreRelease: featuregate.Beta},
|
||||
JobTrackingWithFinalizers: {Default: false, PreRelease: featuregate.Alpha},
|
||||
JobTrackingWithFinalizers: {Default: true, PreRelease: featuregate.Beta},
|
||||
JobReadyPods: {Default: false, PreRelease: featuregate.Alpha},
|
||||
KubeletPodResources: {Default: true, PreRelease: featuregate.Beta},
|
||||
LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha},
|
||||
NonPreemptingPriority: {Default: true, PreRelease: featuregate.Beta},
|
||||
PodOverhead: {Default: true, PreRelease: featuregate.Beta},
|
||||
IPv6DualStack: {Default: true, PreRelease: featuregate.Beta},
|
||||
IPv6DualStack: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
EndpointSlice: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
EndpointSliceProxying: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
EndpointSliceTerminatingCondition: {Default: true, PreRelease: featuregate.Beta},
|
||||
ProxyTerminatingEndpoints: {Default: false, PreRelease: featuregate.Alpha},
|
||||
EndpointSliceNodeName: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, //remove in 1.25
|
||||
EndpointSliceNodeName: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
WindowsEndpointSliceProxying: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
StartupProbe: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
|
||||
AllowInsecureBackendProxy: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
|
||||
PodDisruptionBudget: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
CronJobControllerV2: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
|
||||
DaemonSetUpdateSurge: {Default: true, PreRelease: featuregate.Beta}, // on by default in 1.22
|
||||
ImmutableEphemeralVolumes: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.24
|
||||
HugePageStorageMediumSize: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
|
||||
DownwardAPIHugePages: {Default: false, PreRelease: featuregate.Beta}, // on by default in 1.22
|
||||
HugePageStorageMediumSize: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.24
|
||||
DownwardAPIHugePages: {Default: true, PreRelease: featuregate.Beta}, // on by default in 1.22
|
||||
AnyVolumeDataSource: {Default: false, PreRelease: featuregate.Alpha},
|
||||
DefaultPodTopologySpread: {Default: true, PreRelease: featuregate.Beta},
|
||||
SetHostnameAsFQDN: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, //remove in 1.24
|
||||
SetHostnameAsFQDN: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.24
|
||||
WinOverlay: {Default: true, PreRelease: featuregate.Beta},
|
||||
WinDSR: {Default: false, PreRelease: featuregate.Alpha},
|
||||
DisableAcceleratorUsageMetrics: {Default: true, PreRelease: featuregate.Beta},
|
||||
@ -885,6 +939,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
ExecProbeTimeout: {Default: true, PreRelease: featuregate.GA}, // lock to default and remove after v1.22 based on KEP #1972 update
|
||||
KubeletCredentialProviders: {Default: false, PreRelease: featuregate.Alpha},
|
||||
GracefulNodeShutdown: {Default: true, PreRelease: featuregate.Beta},
|
||||
GracefulNodeShutdownBasedOnPodPriority: {Default: false, PreRelease: featuregate.Alpha},
|
||||
ServiceLBNodePortControl: {Default: true, PreRelease: featuregate.Beta},
|
||||
MixedProtocolLBService: {Default: false, PreRelease: featuregate.Alpha},
|
||||
VolumeCapacityPriority: {Default: false, PreRelease: featuregate.Alpha},
|
||||
@ -892,42 +947,55 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
ProbeTerminationGracePeriod: {Default: false, PreRelease: featuregate.Beta}, // Default to false in beta 1.22, set to true in 1.24
|
||||
NodeSwap: {Default: false, PreRelease: featuregate.Alpha},
|
||||
PodDeletionCost: {Default: true, PreRelease: featuregate.Beta},
|
||||
TopologyAwareHints: {Default: false, PreRelease: featuregate.Alpha},
|
||||
StatefulSetAutoDeletePVC: {Default: false, PreRelease: featuregate.Alpha},
|
||||
TopologyAwareHints: {Default: false, PreRelease: featuregate.Beta},
|
||||
PodAffinityNamespaceSelector: {Default: true, PreRelease: featuregate.Beta},
|
||||
ServiceLoadBalancerClass: {Default: true, PreRelease: featuregate.Beta},
|
||||
IngressClassNamespacedParams: {Default: true, PreRelease: featuregate.Beta},
|
||||
IngressClassNamespacedParams: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.24
|
||||
ServiceInternalTrafficPolicy: {Default: true, PreRelease: featuregate.Beta},
|
||||
LogarithmicScaleDown: {Default: true, PreRelease: featuregate.Beta},
|
||||
SuspendJob: {Default: true, PreRelease: featuregate.Beta},
|
||||
KubeletPodResourcesGetAllocatable: {Default: false, PreRelease: featuregate.Alpha},
|
||||
KubeletPodResourcesGetAllocatable: {Default: true, PreRelease: featuregate.Beta},
|
||||
NamespaceDefaultLabelName: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.24
|
||||
CSIVolumeHealth: {Default: false, PreRelease: featuregate.Alpha},
|
||||
WindowsHostProcessContainers: {Default: false, PreRelease: featuregate.Alpha},
|
||||
WindowsHostProcessContainers: {Default: true, PreRelease: featuregate.Beta},
|
||||
DisableCloudProviders: {Default: false, PreRelease: featuregate.Alpha},
|
||||
StatefulSetMinReadySeconds: {Default: false, PreRelease: featuregate.Alpha},
|
||||
DisableKubeletCloudCredentialProviders: {Default: false, PreRelease: featuregate.Alpha},
|
||||
StatefulSetMinReadySeconds: {Default: true, PreRelease: featuregate.Beta},
|
||||
ExpandedDNSConfig: {Default: false, PreRelease: featuregate.Alpha},
|
||||
SeccompDefault: {Default: false, PreRelease: featuregate.Alpha},
|
||||
PodSecurity: {Default: false, PreRelease: featuregate.Alpha},
|
||||
PodSecurity: {Default: true, PreRelease: featuregate.Beta},
|
||||
ReadWriteOncePod: {Default: false, PreRelease: featuregate.Alpha},
|
||||
CSRDuration: {Default: true, PreRelease: featuregate.Beta},
|
||||
DelegateFSGroupToCSIDriver: {Default: false, PreRelease: featuregate.Alpha},
|
||||
DelegateFSGroupToCSIDriver: {Default: true, PreRelease: featuregate.Beta},
|
||||
KubeletInUserNamespace: {Default: false, PreRelease: featuregate.Alpha},
|
||||
MemoryQoS: {Default: false, PreRelease: featuregate.Alpha},
|
||||
CPUManagerPolicyOptions: {Default: false, PreRelease: featuregate.Alpha},
|
||||
CPUManagerPolicyOptions: {Default: true, PreRelease: featuregate.Beta},
|
||||
ControllerManagerLeaderMigration: {Default: true, PreRelease: featuregate.Beta},
|
||||
CPUManagerPolicyAlphaOptions: {Default: false, PreRelease: featuregate.Alpha},
|
||||
CPUManagerPolicyBetaOptions: {Default: true, PreRelease: featuregate.Beta},
|
||||
JobMutableNodeSchedulingDirectives: {Default: true, PreRelease: featuregate.Beta},
|
||||
IdentifyPodOS: {Default: false, PreRelease: featuregate.Alpha},
|
||||
PodAndContainerStatsFromCRI: {Default: false, PreRelease: featuregate.Alpha},
|
||||
HonorPVReclaimPolicy: {Default: false, PreRelease: featuregate.Alpha},
|
||||
RecoverVolumeExpansionFailure: {Default: false, PreRelease: featuregate.Alpha},
|
||||
GRPCContainerProbe: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
|
||||
// unintentionally on either side:
|
||||
genericfeatures.StreamingProxyRedirects: {Default: false, PreRelease: featuregate.Deprecated}, // remove in 1.24
|
||||
genericfeatures.ValidateProxyRedirects: {Default: true, PreRelease: featuregate.Deprecated},
|
||||
genericfeatures.AdvancedAuditing: {Default: true, PreRelease: featuregate.GA},
|
||||
genericfeatures.APIResponseCompression: {Default: true, PreRelease: featuregate.Beta},
|
||||
genericfeatures.APIListChunking: {Default: true, PreRelease: featuregate.Beta},
|
||||
genericfeatures.DryRun: {Default: true, PreRelease: featuregate.GA},
|
||||
genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.GA},
|
||||
genericfeatures.APIPriorityAndFairness: {Default: true, PreRelease: featuregate.Beta},
|
||||
genericfeatures.WarningHeaders: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.24
|
||||
|
||||
genericfeatures.StreamingProxyRedirects: {Default: false, PreRelease: featuregate.Deprecated}, // remove in 1.24
|
||||
genericfeatures.ValidateProxyRedirects: {Default: true, PreRelease: featuregate.Deprecated},
|
||||
genericfeatures.AdvancedAuditing: {Default: true, PreRelease: featuregate.GA},
|
||||
genericfeatures.APIResponseCompression: {Default: true, PreRelease: featuregate.Beta},
|
||||
genericfeatures.APIListChunking: {Default: true, PreRelease: featuregate.Beta},
|
||||
genericfeatures.DryRun: {Default: true, PreRelease: featuregate.GA},
|
||||
genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.GA},
|
||||
genericfeatures.APIPriorityAndFairness: {Default: true, PreRelease: featuregate.Beta},
|
||||
genericfeatures.WarningHeaders: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.24
|
||||
genericfeatures.OpenAPIEnums: {Default: false, PreRelease: featuregate.Alpha},
|
||||
genericfeatures.CustomResourceValidationExpressions: {Default: false, PreRelease: featuregate.Alpha},
|
||||
genericfeatures.OpenAPIV3: {Default: false, PreRelease: featuregate.Alpha},
|
||||
genericfeatures.ServerSideFieldValidation: {Default: false, PreRelease: featuregate.Alpha},
|
||||
// features that enable backwards compatibility but are scheduled to be removed
|
||||
// ...
|
||||
HPAScaleToZero: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
28
vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go
generated
vendored
28
vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go
generated
vendored
@ -340,7 +340,7 @@ type KubeletConfiguration struct {
|
||||
ContainerLogMaxFiles int32
|
||||
// ConfigMapAndSecretChangeDetectionStrategy is a mode in which config map and secret managers are running.
|
||||
ConfigMapAndSecretChangeDetectionStrategy ResourceChangeDetectionStrategy
|
||||
// A comma separated whitelist of unsafe sysctls or sysctl patterns (ending in *).
|
||||
// A comma separated allowlist of unsafe sysctls or sysctl patterns (ending in *).
|
||||
// Unsafe sysctl groups are kernel.shm*, kernel.msg*, kernel.sem, fs.mqueue.*, and net.*.
|
||||
// These sysctls are namespaced but not allowed by default. For example: "kernel.msg*,net.ipv4.route.min_pmtu"
|
||||
// +optional
|
||||
@ -397,6 +397,15 @@ type KubeletConfiguration struct {
|
||||
// +featureGate=GracefulNodeShutdown
|
||||
// +optional
|
||||
ShutdownGracePeriodCriticalPods metav1.Duration
|
||||
// ShutdownGracePeriodByPodPriority specifies the shutdown grace period for Pods based
|
||||
// on their associated priority class value.
|
||||
// When a shutdown request is received, the Kubelet will initiate shutdown on all pods
|
||||
// running on the node with a grace period that depends on the priority of the pod,
|
||||
// and then wait for all pods to exit.
|
||||
// Each entry in the array represents the graceful shutdown time a pod with a priority
|
||||
// class value that lies in the range of that value and the next higher entry in the
|
||||
// list when the node is shutting down.
|
||||
ShutdownGracePeriodByPodPriority []ShutdownGracePeriodByPodPriority
|
||||
// ReservedMemory specifies a comma-separated list of memory reservations for NUMA nodes.
|
||||
// The parameter makes sense only in the context of the memory manager feature. The memory manager will not allocate reserved memory for container workloads.
|
||||
// For example, if you have a NUMA0 with 10Gi of memory and the ReservedMemory was specified to reserve 1Gi of memory at NUMA0,
|
||||
@ -426,6 +435,15 @@ type KubeletConfiguration struct {
|
||||
// +featureGate=MemoryQoS
|
||||
// +optional
|
||||
MemoryThrottlingFactor *float64
|
||||
// registerWithTaints are an array of taints to add to a node object when
|
||||
// the kubelet registers itself. This only takes effect when registerNode
|
||||
// is true and upon the initial registration of the node.
|
||||
// +optional
|
||||
RegisterWithTaints []v1.Taint
|
||||
|
||||
// registerNode enables automatic registration with the apiserver.
|
||||
// +optional
|
||||
RegisterNode bool
|
||||
}
|
||||
|
||||
// KubeletAuthorizationMode denotes the authorization mode for the kubelet
|
||||
@ -586,6 +604,14 @@ type MemoryReservation struct {
|
||||
Limits v1.ResourceList
|
||||
}
|
||||
|
||||
// ShutdownGracePeriodByPodPriority specifies the shutdown grace period for Pods based on their associated priority class value
|
||||
type ShutdownGracePeriodByPodPriority struct {
|
||||
// priority is the priority value associated with the shutdown grace period
|
||||
Priority int32
|
||||
// shutdownGracePeriodSeconds is the shutdown grace period in seconds
|
||||
ShutdownGracePeriodSeconds int64
|
||||
}
|
||||
|
||||
type MemorySwapConfiguration struct {
|
||||
// swapBehavior configures swap memory available to container workloads. May be one of
|
||||
// "", "LimitedSwap": workload combined memory and swap usage cannot exceed pod memory limit
|
||||
|
31
vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/zz_generated.deepcopy.go
generated
vendored
31
vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/zz_generated.deepcopy.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
@ -279,9 +280,14 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
out.Logging = in.Logging
|
||||
in.Logging.DeepCopyInto(&out.Logging)
|
||||
out.ShutdownGracePeriod = in.ShutdownGracePeriod
|
||||
out.ShutdownGracePeriodCriticalPods = in.ShutdownGracePeriodCriticalPods
|
||||
if in.ShutdownGracePeriodByPodPriority != nil {
|
||||
in, out := &in.ShutdownGracePeriodByPodPriority, &out.ShutdownGracePeriodByPodPriority
|
||||
*out = make([]ShutdownGracePeriodByPodPriority, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ReservedMemory != nil {
|
||||
in, out := &in.ReservedMemory, &out.ReservedMemory
|
||||
*out = make([]MemoryReservation, len(*in))
|
||||
@ -294,6 +300,13 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) {
|
||||
*out = new(float64)
|
||||
**out = **in
|
||||
}
|
||||
if in.RegisterWithTaints != nil {
|
||||
in, out := &in.RegisterWithTaints, &out.RegisterWithTaints
|
||||
*out = make([]corev1.Taint, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -430,3 +443,19 @@ func (in *SerializedNodeConfigSource) DeepCopyObject() runtime.Object {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ShutdownGracePeriodByPodPriority) DeepCopyInto(out *ShutdownGracePeriodByPodPriority) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShutdownGracePeriodByPodPriority.
|
||||
func (in *ShutdownGracePeriodByPodPriority) DeepCopy() *ShutdownGracePeriodByPodPriority {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ShutdownGracePeriodByPodPriority)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go
generated
vendored
@ -142,6 +142,9 @@ func (sp SyncPodType) String() string {
|
||||
|
||||
// IsMirrorPod returns true if the passed Pod is a Mirror Pod.
|
||||
func IsMirrorPod(pod *v1.Pod) bool {
|
||||
if pod.Annotations == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := pod.Annotations[ConfigMirrorAnnotationKey]
|
||||
return ok
|
||||
}
|
||||
|
55
vendor/k8s.io/kubernetes/pkg/kubelet/util/format/pod.go
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/pkg/kubelet/util/format/pod.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package format
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// Pod returns a string representing a pod in a consistent human readable format,
|
||||
// with pod UID as part of the string.
|
||||
func Pod(pod *v1.Pod) string {
|
||||
if pod == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return PodDesc(pod.Name, pod.Namespace, pod.UID)
|
||||
}
|
||||
|
||||
// PodDesc returns a string representing a pod in a consistent human readable format,
|
||||
// with pod UID as part of the string.
|
||||
func PodDesc(podName, podNamespace string, podUID types.UID) string {
|
||||
// Use underscore as the delimiter because it is not allowed in pod name
|
||||
// (DNS subdomain format), while allowed in the container name format.
|
||||
return fmt.Sprintf("%s_%s(%s)", podName, podNamespace, podUID)
|
||||
}
|
||||
|
||||
// PodWithDeletionTimestamp is the same as Pod. In addition, it prints the
|
||||
// deletion timestamp of the pod if it's not nil.
|
||||
func PodWithDeletionTimestamp(pod *v1.Pod) string {
|
||||
if pod == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
var deletionTimestamp string
|
||||
if pod.DeletionTimestamp != nil {
|
||||
deletionTimestamp = ":DeletionTimestamp=" + pod.DeletionTimestamp.UTC().Format(time.RFC3339)
|
||||
}
|
||||
return Pod(pod) + deletionTimestamp
|
||||
}
|
36
vendor/k8s.io/kubernetes/pkg/kubelet/util/format/resources.go
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/pkg/kubelet/util/format/resources.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package format
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// ResourceList returns a string representation of a resource list in a human readable format.
|
||||
func ResourceList(resources v1.ResourceList) string {
|
||||
resourceStrings := make([]string, 0, len(resources))
|
||||
for key, value := range resources {
|
||||
resourceStrings = append(resourceStrings, fmt.Sprintf("%v=%v", key, value.String()))
|
||||
}
|
||||
// sort the results for consistent log output
|
||||
sort.Strings(resourceStrings)
|
||||
return strings.Join(resourceStrings, ",")
|
||||
}
|
24
vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go
generated
vendored
@ -17,31 +17,31 @@ limitations under the License.
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
// IPPart returns just the IP part of an IP or IP:port or endpoint string. If the IP
|
||||
// part is an IPv6 address enclosed in brackets (e.g. "[fd00:1::5]:9999"),
|
||||
// then the brackets are stripped as well.
|
||||
func IPPart(s string) string {
|
||||
if ip := net.ParseIP(s); ip != nil {
|
||||
if ip := netutils.ParseIPSloppy(s); ip != nil {
|
||||
// IP address without port
|
||||
return s
|
||||
}
|
||||
// Must be IP:port
|
||||
host, _, err := net.SplitHostPort(s)
|
||||
if err != nil {
|
||||
klog.Errorf("Error parsing '%s': %v", s, err)
|
||||
klog.ErrorS(err, "Failed to parse host-port", "input", s)
|
||||
return ""
|
||||
}
|
||||
// Check if host string is a valid IP address
|
||||
ip := net.ParseIP(host)
|
||||
ip := netutils.ParseIPSloppy(host)
|
||||
if ip == nil {
|
||||
klog.Errorf("invalid IP part '%s'", host)
|
||||
klog.ErrorS(nil, "Failed to parse IP", "input", host)
|
||||
return ""
|
||||
}
|
||||
return ip.String()
|
||||
@ -52,23 +52,13 @@ func PortPart(s string) (int, error) {
|
||||
// Must be IP:port
|
||||
_, port, err := net.SplitHostPort(s)
|
||||
if err != nil {
|
||||
klog.Errorf("Error parsing '%s': %v", s, err)
|
||||
klog.ErrorS(err, "Failed to parse host-port", "input", s)
|
||||
return -1, err
|
||||
}
|
||||
portNumber, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
klog.Errorf("Error parsing '%s': %v", port, err)
|
||||
klog.ErrorS(err, "Failed to parse port", "input", port)
|
||||
return -1, err
|
||||
}
|
||||
return portNumber, nil
|
||||
}
|
||||
|
||||
// ToCIDR returns a host address of the form <ip-address>/32 for
|
||||
// IPv4 and <ip-address>/128 for IPv6
|
||||
func ToCIDR(ip net.IP) string {
|
||||
len := 32
|
||||
if ip.To4() == nil {
|
||||
len = 128
|
||||
}
|
||||
return fmt.Sprintf("%s/%d", ip.String(), len)
|
||||
}
|
||||
|
136
vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go
generated
vendored
136
vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go
generated
vendored
@ -24,15 +24,16 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilrand "k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/tools/events"
|
||||
utilsysctl "k8s.io/component-helpers/node/util/sysctl"
|
||||
helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
|
||||
utilnet "k8s.io/utils/net"
|
||||
netutils "k8s.io/utils/net"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
@ -50,7 +51,7 @@ var (
|
||||
ErrAddressNotAllowed = errors.New("address not allowed")
|
||||
|
||||
// ErrNoAddresses indicates there are no addresses for the hostname
|
||||
ErrNoAddresses = errors.New("No addresses for hostname")
|
||||
ErrNoAddresses = errors.New("no addresses for hostname")
|
||||
)
|
||||
|
||||
// isValidEndpoint checks that the given host / port pair are valid endpoint
|
||||
@ -88,7 +89,7 @@ func IsZeroCIDR(cidr string) bool {
|
||||
|
||||
// IsProxyableIP checks if a given IP address is permitted to be proxied
|
||||
func IsProxyableIP(ip string) error {
|
||||
netIP := net.ParseIP(ip)
|
||||
netIP := netutils.ParseIPSloppy(ip)
|
||||
if netIP == nil {
|
||||
return ErrAddressNotAllowed
|
||||
}
|
||||
@ -146,7 +147,7 @@ func GetLocalAddrs() ([]net.IP, error) {
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
ip, _, err := net.ParseCIDR(addr.String())
|
||||
ip, _, err := netutils.ParseCIDRSloppy(addr.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -159,7 +160,7 @@ func GetLocalAddrs() ([]net.IP, error) {
|
||||
|
||||
// GetLocalAddrSet return a local IPSet.
|
||||
// If failed to get local addr, will assume no local ips.
|
||||
func GetLocalAddrSet() utilnet.IPSet {
|
||||
func GetLocalAddrSet() netutils.IPSet {
|
||||
localAddrs, err := GetLocalAddrs()
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get local addresses assuming no local IPs")
|
||||
@ -167,7 +168,7 @@ func GetLocalAddrSet() utilnet.IPSet {
|
||||
klog.InfoS("No local addresses were found")
|
||||
}
|
||||
|
||||
localAddrSet := utilnet.IPSet{}
|
||||
localAddrSet := netutils.IPSet{}
|
||||
localAddrSet.Insert(localAddrs...)
|
||||
return localAddrSet
|
||||
}
|
||||
@ -176,12 +177,12 @@ func GetLocalAddrSet() utilnet.IPSet {
|
||||
func ShouldSkipService(service *v1.Service) bool {
|
||||
// if ClusterIP is "None" or empty, skip proxying
|
||||
if !helper.IsServiceIPSet(service) {
|
||||
klog.V(3).Infof("Skipping service %s in namespace %s due to clusterIP = %q", service.Name, service.Namespace, service.Spec.ClusterIP)
|
||||
klog.V(3).InfoS("Skipping service due to cluster IP", "service", klog.KObj(service), "clusterIP", service.Spec.ClusterIP)
|
||||
return true
|
||||
}
|
||||
// Even if ClusterIP is set, ServiceTypeExternalName services don't get proxied
|
||||
if service.Spec.Type == v1.ServiceTypeExternalName {
|
||||
klog.V(3).Infof("Skipping service %s in namespace %s due to Type=ExternalName", service.Name, service.Namespace)
|
||||
klog.V(3).InfoS("Skipping service due to Type=ExternalName", "service", klog.KObj(service))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@ -220,7 +221,7 @@ func GetNodeAddresses(cidrs []string, nw NetworkInterfacer) (sets.String, error)
|
||||
continue
|
||||
}
|
||||
|
||||
_, ipNet, _ := net.ParseCIDR(cidr)
|
||||
_, ipNet, _ := netutils.ParseCIDRSloppy(cidr)
|
||||
for _, addr := range addrs {
|
||||
var ip net.IP
|
||||
// nw.InterfaceAddrs may return net.IPAddr or net.IPNet on windows, and it will return net.IPNet on linux.
|
||||
@ -234,10 +235,10 @@ func GetNodeAddresses(cidrs []string, nw NetworkInterfacer) (sets.String, error)
|
||||
}
|
||||
|
||||
if ipNet.Contains(ip) {
|
||||
if utilnet.IsIPv6(ip) && !uniqueAddressList.Has(IPv6ZeroCIDR) {
|
||||
if netutils.IsIPv6(ip) && !uniqueAddressList.Has(IPv6ZeroCIDR) {
|
||||
uniqueAddressList.Insert(ip.String())
|
||||
}
|
||||
if !utilnet.IsIPv6(ip) && !uniqueAddressList.Has(IPv4ZeroCIDR) {
|
||||
if !netutils.IsIPv6(ip) && !uniqueAddressList.Has(IPv4ZeroCIDR) {
|
||||
uniqueAddressList.Insert(ip.String())
|
||||
}
|
||||
}
|
||||
@ -251,10 +252,31 @@ func GetNodeAddresses(cidrs []string, nw NetworkInterfacer) (sets.String, error)
|
||||
return uniqueAddressList, nil
|
||||
}
|
||||
|
||||
// AddressSet validates the addresses in the slice using the "isValid" function.
|
||||
// Addresses that pass the validation are returned as a string Set.
|
||||
func AddressSet(isValid func(ip net.IP) bool, addrs []net.Addr) sets.String {
|
||||
ips := sets.NewString()
|
||||
for _, a := range addrs {
|
||||
var ip net.IP
|
||||
switch v := a.(type) {
|
||||
case *net.IPAddr:
|
||||
ip = v.IP
|
||||
case *net.IPNet:
|
||||
ip = v.IP
|
||||
default:
|
||||
continue
|
||||
}
|
||||
if isValid(ip) {
|
||||
ips.Insert(ip.String())
|
||||
}
|
||||
}
|
||||
return ips
|
||||
}
|
||||
|
||||
// LogAndEmitIncorrectIPVersionEvent logs and emits incorrect IP version event.
|
||||
func LogAndEmitIncorrectIPVersionEvent(recorder events.EventRecorder, fieldName, fieldValue, svcNamespace, svcName string, svcUID types.UID) {
|
||||
errMsg := fmt.Sprintf("%s in %s has incorrect IP version", fieldValue, fieldName)
|
||||
klog.Errorf("%s (service %s/%s).", errMsg, svcNamespace, svcName)
|
||||
klog.ErrorS(nil, "Incorrect IP version", "service", klog.KRef(svcNamespace, svcName), "field", fieldName, "value", fieldValue)
|
||||
if recorder != nil {
|
||||
recorder.Eventf(
|
||||
&v1.ObjectReference{
|
||||
@ -274,7 +296,15 @@ func MapIPsByIPFamily(ipStrings []string) map[v1.IPFamily][]string {
|
||||
if ipFamily, err := getIPFamilyFromIP(ip); err == nil {
|
||||
ipFamilyMap[ipFamily] = append(ipFamilyMap[ipFamily], ip)
|
||||
} else {
|
||||
klog.Errorf("Skipping invalid IP: %s", ip)
|
||||
// this function is called in multiple places. All of which
|
||||
// have sanitized data. Except the case of ExternalIPs which is
|
||||
// not validated by api-server. Specifically empty strings
|
||||
// validation. Which yields into a lot of bad error logs.
|
||||
// check for empty string
|
||||
if len(strings.TrimSpace(ip)) != 0 {
|
||||
klog.ErrorS(nil, "Skipping invalid IP", "ip", ip)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
return ipFamilyMap
|
||||
@ -288,30 +318,30 @@ func MapCIDRsByIPFamily(cidrStrings []string) map[v1.IPFamily][]string {
|
||||
if ipFamily, err := getIPFamilyFromCIDR(cidr); err == nil {
|
||||
ipFamilyMap[ipFamily] = append(ipFamilyMap[ipFamily], cidr)
|
||||
} else {
|
||||
klog.Errorf("Skipping invalid cidr: %s", cidr)
|
||||
klog.ErrorS(nil, "Skipping invalid CIDR", "cidr", cidr)
|
||||
}
|
||||
}
|
||||
return ipFamilyMap
|
||||
}
|
||||
|
||||
func getIPFamilyFromIP(ipStr string) (v1.IPFamily, error) {
|
||||
netIP := net.ParseIP(ipStr)
|
||||
netIP := netutils.ParseIPSloppy(ipStr)
|
||||
if netIP == nil {
|
||||
return "", ErrAddressNotAllowed
|
||||
}
|
||||
|
||||
if utilnet.IsIPv6(netIP) {
|
||||
if netutils.IsIPv6(netIP) {
|
||||
return v1.IPv6Protocol, nil
|
||||
}
|
||||
return v1.IPv4Protocol, nil
|
||||
}
|
||||
|
||||
func getIPFamilyFromCIDR(cidrStr string) (v1.IPFamily, error) {
|
||||
_, netCIDR, err := net.ParseCIDR(cidrStr)
|
||||
_, netCIDR, err := netutils.ParseCIDRSloppy(cidrStr)
|
||||
if err != nil {
|
||||
return "", ErrAddressNotAllowed
|
||||
}
|
||||
if utilnet.IsIPv6CIDR(netCIDR) {
|
||||
if netutils.IsIPv6CIDR(netCIDR) {
|
||||
return v1.IPv6Protocol, nil
|
||||
}
|
||||
return v1.IPv4Protocol, nil
|
||||
@ -335,7 +365,7 @@ func AppendPortIfNeeded(addr string, port int32) string {
|
||||
}
|
||||
|
||||
// Simply return for invalid case. This should be caught by validation instead.
|
||||
ip := net.ParseIP(addr)
|
||||
ip := netutils.ParseIPSloppy(addr)
|
||||
if ip == nil {
|
||||
return addr
|
||||
}
|
||||
@ -367,7 +397,7 @@ func EnsureSysctl(sysctl utilsysctl.Interface, name string, newVal int) error {
|
||||
if err := sysctl.SetSysctl(name, newVal); err != nil {
|
||||
return fmt.Errorf("can't set sysctl %s to %d: %v", name, newVal, err)
|
||||
}
|
||||
klog.V(1).Infof("Changed sysctl %q: %d -> %d", name, oldVal, newVal)
|
||||
klog.V(1).InfoS("Changed sysctl", "name", name, "before", oldVal, "after", newVal)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -441,7 +471,7 @@ func GetClusterIPByFamily(ipFamily v1.IPFamily, service *v1.Service) string {
|
||||
}
|
||||
|
||||
IsIPv6Family := (ipFamily == v1.IPv6Protocol)
|
||||
if IsIPv6Family == utilnet.IsIPv6String(service.Spec.ClusterIP) {
|
||||
if IsIPv6Family == netutils.IsIPv6String(service.Spec.ClusterIP) {
|
||||
return service.Spec.ClusterIP
|
||||
}
|
||||
|
||||
@ -459,44 +489,56 @@ func GetClusterIPByFamily(ipFamily v1.IPFamily, service *v1.Service) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// WriteLine join all words with spaces, terminate with newline and write to buff.
|
||||
func WriteLine(buf *bytes.Buffer, words ...string) {
|
||||
// We avoid strings.Join for performance reasons.
|
||||
for i := range words {
|
||||
buf.WriteString(words[i])
|
||||
if i < len(words)-1 {
|
||||
buf.WriteByte(' ')
|
||||
} else {
|
||||
buf.WriteByte('\n')
|
||||
type LineBuffer struct {
|
||||
b bytes.Buffer
|
||||
}
|
||||
|
||||
// Write takes a list of arguments, each a string or []string, joins all the
|
||||
// individual strings with spaces, terminates with newline, and writes to buf.
|
||||
// Any other argument type will panic.
|
||||
func (buf *LineBuffer) Write(args ...interface{}) {
|
||||
for i, arg := range args {
|
||||
if i > 0 {
|
||||
buf.b.WriteByte(' ')
|
||||
}
|
||||
switch x := arg.(type) {
|
||||
case string:
|
||||
buf.b.WriteString(x)
|
||||
case []string:
|
||||
for j, s := range x {
|
||||
if j > 0 {
|
||||
buf.b.WriteByte(' ')
|
||||
}
|
||||
buf.b.WriteString(s)
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown argument type: %T", x))
|
||||
}
|
||||
}
|
||||
buf.b.WriteByte('\n')
|
||||
}
|
||||
|
||||
// WriteRuleLine prepends the strings "-A" and chainName to the buffer and calls
|
||||
// WriteLine to join all the words into the buffer and terminate with newline.
|
||||
func WriteRuleLine(buf *bytes.Buffer, chainName string, words ...string) {
|
||||
if len(words) == 0 {
|
||||
return
|
||||
}
|
||||
buf.WriteString("-A ")
|
||||
buf.WriteString(chainName)
|
||||
buf.WriteByte(' ')
|
||||
WriteLine(buf, words...)
|
||||
// WriteBytes writes bytes to buffer, and terminates with newline.
|
||||
func (buf *LineBuffer) WriteBytes(bytes []byte) {
|
||||
buf.b.Write(bytes)
|
||||
buf.b.WriteByte('\n')
|
||||
}
|
||||
|
||||
// WriteBytesLine write bytes to buffer, terminate with newline
|
||||
func WriteBytesLine(buf *bytes.Buffer, bytes []byte) {
|
||||
buf.Write(bytes)
|
||||
buf.WriteByte('\n')
|
||||
func (buf *LineBuffer) Reset() {
|
||||
buf.b.Reset()
|
||||
}
|
||||
|
||||
func (buf *LineBuffer) Bytes() []byte {
|
||||
return buf.b.Bytes()
|
||||
}
|
||||
|
||||
// RevertPorts is closing ports in replacementPortsMap but not in originalPortsMap. In other words, it only
|
||||
// closes the ports opened in this sync.
|
||||
func RevertPorts(replacementPortsMap, originalPortsMap map[utilnet.LocalPort]utilnet.Closeable) {
|
||||
func RevertPorts(replacementPortsMap, originalPortsMap map[netutils.LocalPort]netutils.Closeable) {
|
||||
for k, v := range replacementPortsMap {
|
||||
// Only close newly opened local ports - leave ones that were open before this update
|
||||
if originalPortsMap[k] == nil {
|
||||
klog.V(2).Infof("Closing local port %s", k.String())
|
||||
klog.V(2).InfoS("Closing local port", "port", k.String())
|
||||
v.Close()
|
||||
}
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/security/apparmor/validate_disabled.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/security/apparmor/validate_disabled.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/util/selinux/selinux_linux.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/util/selinux/selinux_linux.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/util/selinux/selinux_unsupported.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/util/selinux/selinux_unsupported.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
|
100
vendor/k8s.io/kubernetes/pkg/util/sysctl/sysctl.go
generated
vendored
100
vendor/k8s.io/kubernetes/pkg/util/sysctl/sysctl.go
generated
vendored
@ -1,100 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package sysctl
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
sysctlBase = "/proc/sys"
|
||||
// VMOvercommitMemory refers to the sysctl variable responsible for defining
|
||||
// the memory over-commit policy used by kernel.
|
||||
VMOvercommitMemory = "vm/overcommit_memory"
|
||||
// VMPanicOnOOM refers to the sysctl variable responsible for defining
|
||||
// the OOM behavior used by kernel.
|
||||
VMPanicOnOOM = "vm/panic_on_oom"
|
||||
// KernelPanic refers to the sysctl variable responsible for defining
|
||||
// the timeout after a panic for the kernel to reboot.
|
||||
KernelPanic = "kernel/panic"
|
||||
// KernelPanicOnOops refers to the sysctl variable responsible for defining
|
||||
// the kernel behavior when an oops or BUG is encountered.
|
||||
KernelPanicOnOops = "kernel/panic_on_oops"
|
||||
// RootMaxKeys refers to the sysctl variable responsible for defining
|
||||
// the maximum number of keys that the root user (UID 0 in the root user namespace) may own.
|
||||
RootMaxKeys = "kernel/keys/root_maxkeys"
|
||||
// RootMaxBytes refers to the sysctl variable responsible for defining
|
||||
// the maximum number of bytes of data that the root user (UID 0 in the root user namespace)
|
||||
// can hold in the payloads of the keys owned by root.
|
||||
RootMaxBytes = "kernel/keys/root_maxbytes"
|
||||
|
||||
// VMOvercommitMemoryAlways represents that kernel performs no memory over-commit handling.
|
||||
VMOvercommitMemoryAlways = 1
|
||||
// VMPanicOnOOMInvokeOOMKiller represents that kernel calls the oom_killer function when OOM occurs.
|
||||
VMPanicOnOOMInvokeOOMKiller = 0
|
||||
|
||||
// KernelPanicOnOopsAlways represents that kernel panics on kernel oops.
|
||||
KernelPanicOnOopsAlways = 1
|
||||
// KernelPanicRebootTimeout is the timeout seconds after a panic for the kernel to reboot.
|
||||
KernelPanicRebootTimeout = 10
|
||||
|
||||
// RootMaxKeysSetting is the maximum number of keys that the root user (UID 0 in the root user namespace) may own.
|
||||
// Needed since docker creates a new key per container.
|
||||
RootMaxKeysSetting = 1000000
|
||||
// RootMaxBytesSetting is the maximum number of bytes of data that the root user (UID 0 in the root user namespace)
|
||||
// can hold in the payloads of the keys owned by root.
|
||||
// Allocate 25 bytes per key * number of MaxKeys.
|
||||
RootMaxBytesSetting = RootMaxKeysSetting * 25
|
||||
)
|
||||
|
||||
// Interface is an injectable interface for running sysctl commands.
|
||||
type Interface interface {
|
||||
// GetSysctl returns the value for the specified sysctl setting
|
||||
GetSysctl(sysctl string) (int, error)
|
||||
// SetSysctl modifies the specified sysctl flag to the new value
|
||||
SetSysctl(sysctl string, newVal int) error
|
||||
}
|
||||
|
||||
// New returns a new Interface for accessing sysctl
|
||||
func New() Interface {
|
||||
return &procSysctl{}
|
||||
}
|
||||
|
||||
// procSysctl implements Interface by reading and writing files under /proc/sys
|
||||
type procSysctl struct {
|
||||
}
|
||||
|
||||
// GetSysctl returns the value for the specified sysctl setting
|
||||
func (*procSysctl) GetSysctl(sysctl string) (int, error) {
|
||||
data, err := ioutil.ReadFile(path.Join(sysctlBase, sysctl))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, err := strconv.Atoi(strings.Trim(string(data), " \n"))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// SetSysctl modifies the specified sysctl flag to the new value
|
||||
func (*procSysctl) SetSysctl(sysctl string, newVal int) error {
|
||||
return ioutil.WriteFile(path.Join(sysctlBase, sysctl), []byte(strconv.Itoa(newVal)), 0640)
|
||||
}
|
68
vendor/k8s.io/kubernetes/pkg/util/taints/taints.go
generated
vendored
68
vendor/k8s.io/kubernetes/pkg/util/taints/taints.go
generated
vendored
@ -21,11 +21,10 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
)
|
||||
|
||||
@ -88,51 +87,6 @@ func validateTaintEffect(effect v1.TaintEffect) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewTaintsVar wraps []api.Taint in a struct that implements flag.Value to allow taints to be
|
||||
// bound to command line flags.
|
||||
func NewTaintsVar(ptr *[]api.Taint) taintsVar {
|
||||
return taintsVar{
|
||||
ptr: ptr,
|
||||
}
|
||||
}
|
||||
|
||||
type taintsVar struct {
|
||||
ptr *[]api.Taint
|
||||
}
|
||||
|
||||
func (t taintsVar) Set(s string) error {
|
||||
if len(s) == 0 {
|
||||
*t.ptr = nil
|
||||
return nil
|
||||
}
|
||||
sts := strings.Split(s, ",")
|
||||
var taints []api.Taint
|
||||
for _, st := range sts {
|
||||
taint, err := parseTaint(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
taints = append(taints, api.Taint{Key: taint.Key, Value: taint.Value, Effect: api.TaintEffect(taint.Effect)})
|
||||
}
|
||||
*t.ptr = taints
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t taintsVar) String() string {
|
||||
if len(*t.ptr) == 0 {
|
||||
return ""
|
||||
}
|
||||
var taints []string
|
||||
for _, taint := range *t.ptr {
|
||||
taints = append(taints, fmt.Sprintf("%s=%s:%s", taint.Key, taint.Value, taint.Effect))
|
||||
}
|
||||
return strings.Join(taints, ",")
|
||||
}
|
||||
|
||||
func (t taintsVar) Type() string {
|
||||
return "[]api.Taint"
|
||||
}
|
||||
|
||||
// ParseTaints takes a spec which is an array and creates slices for new taints to be added, taints to be deleted.
|
||||
// It also validates the spec. For example, the form `<key>` may be used to remove a taint, but not to add one.
|
||||
func ParseTaints(spec []string) ([]v1.Taint, []v1.Taint, error) {
|
||||
@ -350,3 +304,23 @@ func TaintSetFilter(taints []v1.Taint, fn func(*v1.Taint) bool) []v1.Taint {
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// CheckTaintValidation checks if the given taint is valid.
|
||||
// Returns error if the given taint is invalid.
|
||||
func CheckTaintValidation(taint v1.Taint) error {
|
||||
if errs := validation.IsQualifiedName(taint.Key); len(errs) > 0 {
|
||||
return fmt.Errorf("invalid taint key: %s", strings.Join(errs, "; "))
|
||||
}
|
||||
if taint.Value != "" {
|
||||
if errs := validation.IsValidLabelValue(taint.Value); len(errs) > 0 {
|
||||
return fmt.Errorf("invalid taint value: %s", strings.Join(errs, "; "))
|
||||
}
|
||||
}
|
||||
if taint.Effect != "" {
|
||||
if err := validateTaintEffect(taint.Effect); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/volume/util/fs/fs.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/volume/util/fs/fs.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build linux || darwin
|
||||
// +build linux darwin
|
||||
|
||||
/*
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/volume/util/fs/fs_unsupported.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/volume/util/fs/fs_unsupported.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !linux && !darwin && !windows
|
||||
// +build !linux,!darwin,!windows
|
||||
|
||||
/*
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/volume/util/fs/fs_windows.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/volume/util/fs/fs_windows.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/common/quota_linux_common.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/common/quota_linux_common.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/common/quota_linux_common_impl.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/common/quota_linux_common_impl.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/project.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/project.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_linux.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_linux.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
@ -303,8 +304,7 @@ func SupportsQuotas(m mount.Interface, path string) (bool, error) {
|
||||
// AssignQuota chooses the quota ID based on the pod UID and path.
|
||||
// If the pod UID is identical to another one known, it may (but presently
|
||||
// doesn't) choose the same quota ID as other volumes in the pod.
|
||||
//lint:ignore SA4009 poduid is overwritten by design, see comment below
|
||||
func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resource.Quantity) error {
|
||||
func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resource.Quantity) error { //nolint:staticcheck // SA4009 poduid is overwritten by design, see comment below
|
||||
if bytes == nil {
|
||||
return fmt.Errorf("attempting to assign null quota to %s", path)
|
||||
}
|
||||
@ -319,7 +319,7 @@ func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resour
|
||||
// volumes in a pod, we can simply remove this line of code.
|
||||
// If and when we decide permanently that we're going to adopt
|
||||
// one quota per volume, we can rip all of the pod code out.
|
||||
poduid = types.UID(uuid.NewUUID())
|
||||
poduid = types.UID(uuid.NewUUID()) //nolint:staticcheck // SA4009 poduid is overwritten by design, see comment above
|
||||
if pod, ok := dirPodMap[path]; ok && pod != poduid {
|
||||
return fmt.Errorf("requesting quota on existing directory %s but different pod %s %s", path, pod, poduid)
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_unsupported.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/volume/util/fsquota/quota_unsupported.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_unsupported.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_unsupported.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !linux && !windows
|
||||
// +build !linux,!windows
|
||||
|
||||
/*
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_windows.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_windows.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_unsupported.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_unsupported.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !linux && !windows
|
||||
// +build !linux,!windows
|
||||
|
||||
/*
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_windows.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_windows.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
|
26
vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
@ -26,9 +27,7 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||
)
|
||||
|
||||
@ -46,25 +45,11 @@ func SetVolumeOwnership(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1
|
||||
return nil
|
||||
}
|
||||
|
||||
fsGroupPolicyEnabled := utilfeature.DefaultFeatureGate.Enabled(features.ConfigurableFSGroupPolicy)
|
||||
|
||||
timer := time.AfterFunc(30*time.Second, func() {
|
||||
klog.Warningf("Setting volume ownership for %s and fsGroup set. If the volume has a lot of files then setting volume ownership could be slow, see https://github.com/kubernetes/kubernetes/issues/69699", mounter.GetPath())
|
||||
})
|
||||
defer timer.Stop()
|
||||
|
||||
// This code exists for legacy purposes, so as old behaviour is entirely preserved when feature gate is disabled
|
||||
// TODO: remove this when ConfigurableFSGroupPolicy turns GA.
|
||||
if !fsGroupPolicyEnabled {
|
||||
err := legacyOwnershipChange(mounter, fsGroup)
|
||||
if completeFunc != nil {
|
||||
completeFunc(types.CompleteFuncParam{
|
||||
Err: &err,
|
||||
})
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if skipPermissionChange(mounter, fsGroup, fsGroupChangePolicy) {
|
||||
klog.V(3).InfoS("Skipping permission and ownership change for volume", "path", mounter.GetPath())
|
||||
return nil
|
||||
@ -84,15 +69,6 @@ func SetVolumeOwnership(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1
|
||||
return err
|
||||
}
|
||||
|
||||
func legacyOwnershipChange(mounter Mounter, fsGroup *int64) error {
|
||||
return filepath.Walk(mounter.GetPath(), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return changeFilePermission(path, fsGroup, mounter.GetAttributes().ReadOnly, info)
|
||||
})
|
||||
}
|
||||
|
||||
func changeFilePermission(filename string, fsGroup *int64, readonly bool, info os.FileInfo) error {
|
||||
err := os.Lchown(filename, -1, int(*fsGroup))
|
||||
if err != nil {
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/volume/volume_unsupported.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
@ -37,7 +37,6 @@ rules:
|
||||
- k8s.io/kubernetes/pkg/controller/service
|
||||
- k8s.io/kubernetes/pkg/controller/util/node
|
||||
- k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util
|
||||
- k8s.io/kubernetes/pkg/controller/volume/scheduling
|
||||
- k8s.io/kubernetes/pkg/credentialprovider
|
||||
- k8s.io/kubernetes/pkg/credentialprovider/aws
|
||||
- k8s.io/kubernetes/pkg/credentialprovider/azure
|
||||
@ -193,6 +192,7 @@ rules:
|
||||
- k8s.io/kubernetes/pkg/scheduler/internal/parallelize
|
||||
- k8s.io/kubernetes/pkg/scheduler/internal/queue
|
||||
- k8s.io/kubernetes/pkg/scheduler/listers
|
||||
- k8s.io/kubernetes/pkg/scheduler/testing
|
||||
- k8s.io/kubernetes/pkg/scheduler/metrics
|
||||
- k8s.io/kubernetes/pkg/scheduler/nodeinfo
|
||||
- k8s.io/kubernetes/pkg/scheduler/util
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/framework/cleanup.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/framework/cleanup.go
generated
vendored
@ -17,7 +17,6 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
@ -73,7 +72,7 @@ func RunCleanupActions() {
|
||||
}()
|
||||
// Run unlocked.
|
||||
for _, fn := range list {
|
||||
e2elog.Logf("Running Cleanup Action: %v", runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name())
|
||||
Logf("Running Cleanup Action: %v", runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name())
|
||||
fn()
|
||||
}
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/framework/exec_util.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/framework/exec_util.go
generated
vendored
@ -58,6 +58,7 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
|
||||
|
||||
const tty = false
|
||||
|
||||
Logf("ExecWithOptions: Clientset creation")
|
||||
req := f.ClientSet.CoreV1().RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(options.PodName).
|
||||
@ -74,8 +75,8 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
Logf("ExecWithOptions: execute(POST %s %s)", req.URL())
|
||||
err = execute("POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty)
|
||||
|
||||
if options.PreserveWhitespace {
|
||||
return stdout.String(), stderr.String(), err
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e/framework/log.go
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e/framework/log.go
generated
vendored
@ -49,6 +49,7 @@ func Failf(format string, args ...interface{}) {
|
||||
skip := 2
|
||||
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
|
||||
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Fail is a replacement for ginkgo.Fail which logs the problem as it occurs
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
@ -318,7 +318,7 @@ func (g *Grabber) GrabFromSnapshotController(podName string, port int) (Snapshot
|
||||
func (g *Grabber) GrabFromAPIServer() (APIServerMetrics, error) {
|
||||
output, err := g.getMetricsFromAPIServer()
|
||||
if err != nil {
|
||||
return APIServerMetrics{}, nil
|
||||
return APIServerMetrics{}, err
|
||||
}
|
||||
return parseAPIServerMetrics(output)
|
||||
}
|
||||
|
9
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
@ -563,18 +563,15 @@ func GetClusterZones(c clientset.Interface) (sets.String, error) {
|
||||
|
||||
// GetSchedulableClusterZones returns the values of zone label collected from all nodes which are schedulable.
|
||||
func GetSchedulableClusterZones(c clientset.Interface) (sets.String, error) {
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
// GetReadySchedulableNodes already filters our tainted and unschedulable nodes.
|
||||
nodes, err := GetReadySchedulableNodes(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
|
||||
return nil, fmt.Errorf("error getting nodes while attempting to list cluster zones: %v", err)
|
||||
}
|
||||
|
||||
// collect values of zone label from all nodes
|
||||
zones := sets.NewString()
|
||||
for _, node := range nodes.Items {
|
||||
// We should have at least 1 node in the zone which is schedulable.
|
||||
if !IsNodeSchedulable(&node) {
|
||||
continue
|
||||
}
|
||||
if zone, found := node.Labels[v1.LabelFailureDomainBetaZone]; found {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
@ -34,7 +34,7 @@ import (
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
const etcdImage = "3.5.0-0"
|
||||
const etcdImage = "3.5.1-0"
|
||||
|
||||
// EtcdUpgrade upgrades etcd on GCE.
|
||||
func EtcdUpgrade(targetStorage, targetVersion string) error {
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
@ -166,7 +166,7 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
|
||||
if podConfig.NS == "" {
|
||||
return nil, fmt.Errorf("Cannot create pod with empty namespace")
|
||||
}
|
||||
if len(podConfig.Command) == 0 && !NodeOSDistroIs("windows") {
|
||||
if len(podConfig.Command) == 0 {
|
||||
podConfig.Command = "trap exit TERM; while true; do sleep 1; done"
|
||||
}
|
||||
|
||||
|
91
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
91
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
@ -19,6 +19,8 @@ package pod
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -43,6 +45,11 @@ import (
|
||||
// the pod has already reached completed state.
|
||||
var errPodCompleted = fmt.Errorf("pod ran to completion")
|
||||
|
||||
// LabelLogOnPodFailure can be used to mark which Pods will have their logs logged in the case of
|
||||
// a test failure. By default, if there are no Pods with this label, only the first 5 Pods will
|
||||
// have their logs fetched.
|
||||
const LabelLogOnPodFailure = "log-on-pod-failure"
|
||||
|
||||
// TODO: Move to its own subpkg.
|
||||
// expectNoError checks if "err" is set, and if so, fails assertion while logging the error.
|
||||
func expectNoError(err error, explain ...interface{}) {
|
||||
@ -335,6 +342,23 @@ func podContainerStarted(c clientset.Interface, namespace, podName string, conta
|
||||
}
|
||||
}
|
||||
|
||||
func isContainerRunning(c clientset.Interface, namespace, podName, containerName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, statuses := range [][]v1.ContainerStatus{pod.Status.ContainerStatuses, pod.Status.InitContainerStatuses, pod.Status.EphemeralContainerStatuses} {
|
||||
for _, cs := range statuses {
|
||||
if cs.Name == containerName {
|
||||
return cs.State.Running != nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// LogPodStates logs basic info of provided pods for debugging.
|
||||
func LogPodStates(pods []v1.Pod) {
|
||||
// Find maximum widths for pod, node, and phase strings for column printing.
|
||||
@ -388,14 +412,68 @@ func logPodTerminationMessages(pods []v1.Pod) {
|
||||
}
|
||||
}
|
||||
|
||||
// logPodLogs logs the container logs from pods in the given namespace. This can be helpful for debugging
|
||||
// issues that do not cause the container to fail (e.g.: network connectivity issues)
|
||||
// We will log the Pods that have the LabelLogOnPodFailure label. If there aren't any, we default to
|
||||
// logging only the first 5 Pods. This requires the reportDir to be set, and the pods are logged into:
|
||||
// {report_dir}/pods/{namespace}/{pod}/{container_name}/logs.txt
|
||||
func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDir string) {
|
||||
if reportDir == "" {
|
||||
return
|
||||
}
|
||||
|
||||
var logPods []v1.Pod
|
||||
for _, pod := range pods {
|
||||
if _, ok := pod.Labels[LabelLogOnPodFailure]; ok {
|
||||
logPods = append(logPods, pod)
|
||||
}
|
||||
}
|
||||
maxPods := len(logPods)
|
||||
|
||||
// There are no pods with the LabelLogOnPodFailure label, we default to the first 5 Pods.
|
||||
if maxPods == 0 {
|
||||
logPods = pods
|
||||
maxPods = len(pods)
|
||||
if maxPods > 5 {
|
||||
maxPods = 5
|
||||
}
|
||||
}
|
||||
|
||||
tailLen := 42
|
||||
for i := 0; i < maxPods; i++ {
|
||||
pod := logPods[i]
|
||||
for _, container := range pod.Spec.Containers {
|
||||
logs, err := getPodLogsInternal(c, namespace, pod.Name, container.Name, false, nil, &tailLen)
|
||||
if err != nil {
|
||||
e2elog.Logf("Unable to fetch %s/%s/%s logs: %v", pod.Namespace, pod.Name, container.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logDir := filepath.Join(reportDir, namespace, pod.Name, container.Name)
|
||||
err = os.MkdirAll(logDir, 0755)
|
||||
if err != nil {
|
||||
e2elog.Logf("Unable to create path '%s'. Err: %v", logDir, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logPath := filepath.Join(logDir, "logs.txt")
|
||||
err = os.WriteFile(logPath, []byte(logs), 0644)
|
||||
if err != nil {
|
||||
e2elog.Logf("Could not write the container logs in: %s. Err: %v", logPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DumpAllPodInfoForNamespace logs all pod information for a given namespace.
|
||||
func DumpAllPodInfoForNamespace(c clientset.Interface, namespace string) {
|
||||
func DumpAllPodInfoForNamespace(c clientset.Interface, namespace, reportDir string) {
|
||||
pods, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("unable to fetch pod debug info: %v", err)
|
||||
}
|
||||
LogPodStates(pods.Items)
|
||||
logPodTerminationMessages(pods.Items)
|
||||
logPodLogs(c, namespace, pods.Items, reportDir)
|
||||
}
|
||||
|
||||
// FilterNonRestartablePods filters out pods that will never get recreated if
|
||||
@ -544,23 +622,23 @@ func checkPodsCondition(c clientset.Interface, ns string, podNames []string, tim
|
||||
|
||||
// GetPodLogs returns the logs of the specified container (namespace/pod/container).
|
||||
func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
|
||||
return getPodLogsInternal(c, namespace, podName, containerName, false, nil)
|
||||
return getPodLogsInternal(c, namespace, podName, containerName, false, nil, nil)
|
||||
}
|
||||
|
||||
// GetPodLogsSince returns the logs of the specified container (namespace/pod/container) since a timestamp.
|
||||
func GetPodLogsSince(c clientset.Interface, namespace, podName, containerName string, since time.Time) (string, error) {
|
||||
sinceTime := metav1.NewTime(since)
|
||||
return getPodLogsInternal(c, namespace, podName, containerName, false, &sinceTime)
|
||||
return getPodLogsInternal(c, namespace, podName, containerName, false, &sinceTime, nil)
|
||||
}
|
||||
|
||||
// GetPreviousPodLogs returns the logs of the previous instance of the
|
||||
// specified container (namespace/pod/container).
|
||||
func GetPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
|
||||
return getPodLogsInternal(c, namespace, podName, containerName, true, nil)
|
||||
return getPodLogsInternal(c, namespace, podName, containerName, true, nil, nil)
|
||||
}
|
||||
|
||||
// utility function for gomega Eventually
|
||||
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool, sinceTime *metav1.Time) (string, error) {
|
||||
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool, sinceTime *metav1.Time, tailLines *int) (string, error) {
|
||||
request := c.CoreV1().RESTClient().Get().
|
||||
Resource("pods").
|
||||
Namespace(namespace).
|
||||
@ -570,6 +648,9 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName
|
||||
if sinceTime != nil {
|
||||
request.Param("sinceTime", sinceTime.Format(time.RFC3339))
|
||||
}
|
||||
if tailLines != nil {
|
||||
request.Param("tailLines", strconv.Itoa(*tailLines))
|
||||
}
|
||||
logs, err := request.Do(context.TODO()).Raw()
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
26
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
26
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
@ -21,7 +21,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
@ -114,8 +113,6 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
||||
timeout, minPods, ns)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
var ignoreNotReady bool
|
||||
badPods := []v1.Pod{}
|
||||
desiredPods := 0
|
||||
@ -550,3 +547,26 @@ func WaitForPodContainerToFail(c clientset.Interface, namespace, podName string,
|
||||
func WaitForPodContainerStarted(c clientset.Interface, namespace, podName string, containerIndex int, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, podContainerStarted(c, namespace, podName, containerIndex))
|
||||
}
|
||||
|
||||
// WaitForPodFailedReason wait for pod failed reason in status, for example "SysctlForbidden".
|
||||
func WaitForPodFailedReason(c clientset.Interface, pod *v1.Pod, reason string, timeout time.Duration) error {
|
||||
waitErr := wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if pod.Status.Reason == reason {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if waitErr != nil {
|
||||
return fmt.Errorf("error waiting for pod SysctlForbidden status: %v", waitErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForContainerRunning waits for the given Pod container to have a state of running
|
||||
func WaitForContainerRunning(c clientset.Interface, namespace, podName, containerName string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, isContainerRunning(c, namespace, podName, containerName))
|
||||
}
|
||||
|
28
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
28
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
@ -18,6 +18,7 @@ package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sync"
|
||||
@ -27,7 +28,9 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
@ -37,6 +40,7 @@ import (
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
@ -146,6 +150,30 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
|
||||
}))
|
||||
}
|
||||
|
||||
// AddEphemeralContainerSync adds an EphemeralContainer to a pod and waits for it to be running.
|
||||
func (c *PodClient) AddEphemeralContainerSync(pod *v1.Pod, ec *v1.EphemeralContainer, timeout time.Duration) error {
|
||||
namespace := c.f.Namespace.Name
|
||||
|
||||
podJS, err := json.Marshal(pod)
|
||||
ExpectNoError(err, "error creating JSON for pod %q", format.Pod(pod))
|
||||
|
||||
ecPod := pod.DeepCopy()
|
||||
ecPod.Spec.EphemeralContainers = append(ecPod.Spec.EphemeralContainers, *ec)
|
||||
ecJS, err := json.Marshal(ecPod)
|
||||
ExpectNoError(err, "error creating JSON for pod with ephemeral container %q", format.Pod(pod))
|
||||
|
||||
patch, err := strategicpatch.CreateTwoWayMergePatch(podJS, ecJS, pod)
|
||||
ExpectNoError(err, "error creating patch to add ephemeral container %q", format.Pod(pod))
|
||||
|
||||
// Clients may optimistically attempt to add an ephemeral container to determine whether the EphemeralContainers feature is enabled.
|
||||
if _, err := c.Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "ephemeralcontainers"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ExpectNoError(e2epod.WaitForContainerRunning(c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't
|
||||
// disappear before the timeout, it will fail the test.
|
||||
func (c *PodClient) DeleteSync(name string, options metav1.DeleteOptions, timeout time.Duration) {
|
||||
|
50
vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go
generated
vendored
50
vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go
generated
vendored
@ -19,8 +19,11 @@ package framework
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -295,17 +298,40 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.Time
|
||||
}
|
||||
|
||||
// create the PV resource. Fails test on error.
|
||||
func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
|
||||
pv, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
|
||||
func createPV(c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
|
||||
var resultPV *v1.PersistentVolume
|
||||
var lastCreateErr error
|
||||
err := wait.PollImmediate(29*time.Second, timeouts.PVCreate, func() (done bool, err error) {
|
||||
resultPV, lastCreateErr = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
|
||||
if lastCreateErr != nil {
|
||||
// If we hit a quota problem, we are not done and should retry again. This happens to be the quota failure string for GCP.
|
||||
// If quota failure strings are found for other platforms, they can be added to improve reliability when running
|
||||
// many parallel test jobs in a single cloud account. This corresponds to controller-like behavior and
|
||||
// to what we would recommend for general clients.
|
||||
if strings.Contains(lastCreateErr.Error(), `googleapi: Error 403: Quota exceeded for quota group`) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// if it was not a quota failure, fail immediately
|
||||
return false, lastCreateErr
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
// if we have an error from creating the PV, use that instead of a timeout error
|
||||
if lastCreateErr != nil {
|
||||
return nil, fmt.Errorf("PV Create API error: %v", err)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("PV Create API error: %v", err)
|
||||
}
|
||||
return pv, nil
|
||||
|
||||
return resultPV, nil
|
||||
}
|
||||
|
||||
// CreatePV creates the PV resource. Fails test on error.
|
||||
func CreatePV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
|
||||
return createPV(c, pv)
|
||||
func CreatePV(c clientset.Interface, timeouts *framework.TimeoutContext, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
|
||||
return createPV(c, timeouts, pv)
|
||||
}
|
||||
|
||||
// CreatePVC creates the PVC resource. Fails test on error.
|
||||
@ -323,7 +349,7 @@ func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim)
|
||||
// Note: in the pre-bind case the real PVC name, which is generated, is not
|
||||
// known until after the PVC is instantiated. This is why the pvc is created
|
||||
// before the pv.
|
||||
func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
|
||||
func CreatePVCPV(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
|
||||
// make the pvc spec
|
||||
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
|
||||
preBindMsg := ""
|
||||
@ -344,7 +370,7 @@ func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
|
||||
if preBind {
|
||||
pv.Spec.ClaimRef.Name = pvc.Name
|
||||
}
|
||||
pv, err = createPV(c, pv)
|
||||
pv, err = createPV(c, timeouts, pv)
|
||||
if err != nil {
|
||||
return nil, pvc, err
|
||||
}
|
||||
@ -358,7 +384,7 @@ func CreatePVCPV(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
|
||||
// Note: in the pre-bind case the real PV name, which is generated, is not
|
||||
// known until after the PV is instantiated. This is why the pv is created
|
||||
// before the pvc.
|
||||
func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
|
||||
func CreatePVPVC(c clientset.Interface, timeouts *framework.TimeoutContext, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim, error) {
|
||||
preBindMsg := ""
|
||||
if preBind {
|
||||
preBindMsg = " pre-bound"
|
||||
@ -370,7 +396,7 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
|
||||
pvc := MakePersistentVolumeClaim(pvcConfig, ns)
|
||||
|
||||
// instantiate the pv
|
||||
pv, err := createPV(c, pv)
|
||||
pv, err := createPV(c, timeouts, pv)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -392,7 +418,7 @@ func CreatePVPVC(c clientset.Interface, pvConfig PersistentVolumeConfig, pvcConf
|
||||
// sees an error returned, it needs to decide what to do about entries in the maps.
|
||||
// Note: when the test suite deletes the namespace orphaned pvcs and pods are deleted. However,
|
||||
// orphaned pvs are not deleted and will remain after the suite completes.
|
||||
func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) {
|
||||
func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvConfig PersistentVolumeConfig, pvcConfig PersistentVolumeClaimConfig) (PVMap, PVCMap, error) {
|
||||
pvMap := make(PVMap, numpvs)
|
||||
pvcMap := make(PVCMap, numpvcs)
|
||||
extraPVCs := 0
|
||||
@ -405,7 +431,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
|
||||
|
||||
// create pvs and pvcs
|
||||
for i := 0; i < pvsToCreate; i++ {
|
||||
pv, pvc, err := CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
|
||||
pv, pvc, err := CreatePVPVC(c, timeouts, pvConfig, pvcConfig, ns, false)
|
||||
if err != nil {
|
||||
return pvMap, pvcMap, err
|
||||
}
|
||||
@ -416,7 +442,7 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
|
||||
// create extra pvs or pvcs as needed
|
||||
for i := 0; i < extraPVs; i++ {
|
||||
pv := MakePersistentVolume(pvConfig)
|
||||
pv, err := createPV(c, pv)
|
||||
pv, err := createPV(c, timeouts, pv)
|
||||
if err != nil {
|
||||
return pvMap, pvcMap, err
|
||||
}
|
||||
|
37
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
37
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
@ -43,15 +43,6 @@ import (
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
// New local storage types to support local storage capacity isolation
|
||||
var localStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation"
|
||||
|
||||
var (
|
||||
downwardAPIHugePages featuregate.Feature = "DownwardAPIHugePages"
|
||||
execProbeTimeout featuregate.Feature = "ExecProbeTimeout"
|
||||
csiMigration featuregate.Feature = "CSIMigration"
|
||||
)
|
||||
|
||||
func skipInternalf(caller int, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
framework.Logf(msg)
|
||||
@ -127,6 +118,7 @@ func pruneStack(skip int) string {
|
||||
// Skipf skips with information about why the test is being skipped.
|
||||
func Skipf(format string, args ...interface{}) {
|
||||
skipInternalf(1, format, args...)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// SkipUnlessAtLeast skips if the value is less than the minValue.
|
||||
@ -136,28 +128,17 @@ func SkipUnlessAtLeast(value int, minValue int, message string) {
|
||||
}
|
||||
}
|
||||
|
||||
// SkipUnlessLocalEphemeralStorageEnabled skips if the LocalStorageCapacityIsolation is not enabled.
|
||||
func SkipUnlessLocalEphemeralStorageEnabled() {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(localStorageCapacityIsolation) {
|
||||
skipInternalf(1, "Only supported when %v feature is enabled", localStorageCapacityIsolation)
|
||||
// SkipUnlessFeatureGateEnabled skips if the feature is disabled
|
||||
func SkipUnlessFeatureGateEnabled(gate featuregate.Feature) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(gate) {
|
||||
skipInternalf(1, "Only supported when %v feature is enabled", gate)
|
||||
}
|
||||
}
|
||||
|
||||
func SkipUnlessDownwardAPIHugePagesEnabled() {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(downwardAPIHugePages) {
|
||||
skipInternalf(1, "Only supported when %v feature is enabled", downwardAPIHugePages)
|
||||
}
|
||||
}
|
||||
|
||||
func SkipUnlessExecProbeTimeoutEnabled() {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(execProbeTimeout) {
|
||||
skipInternalf(1, "Only supported when %v feature is enabled", execProbeTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
func SkipIfCSIMigrationEnabled() {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(csiMigration) {
|
||||
skipInternalf(1, "Only supported when %v feature is disabled", csiMigration)
|
||||
// SkipIfFeatureGateEnabled skips if the feature is enabled
|
||||
func SkipIfFeatureGateEnabled(gate featuregate.Feature) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(gate) {
|
||||
skipInternalf(1, "Only supported when %v feature is disabled", gate)
|
||||
}
|
||||
}
|
||||
|
||||
|
9
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -135,7 +135,6 @@ type TestContextType struct {
|
||||
GatherMetricsAfterTest string
|
||||
GatherSuiteMetricsAfterTest bool
|
||||
MaxNodesToGather int
|
||||
AllowGatheringProfiles bool
|
||||
// If set to 'true' framework will gather ClusterAutoscaler metrics when gathering them for other components.
|
||||
IncludeClusterAutoscalerMetrics bool
|
||||
// Currently supported values are 'hr' for human-readable and 'json'. It's a comma separated list.
|
||||
@ -188,6 +187,10 @@ type TestContextType struct {
|
||||
|
||||
// SnapshotControllerHTTPPort the port used for communicating with the snapshot controller HTTP endpoint.
|
||||
SnapshotControllerHTTPPort int
|
||||
|
||||
// RequireDevices makes mandatory on the environment on which tests are run 1+ devices exposed through device plugins.
|
||||
// With this enabled The e2e tests requiring devices for their operation can assume that if devices aren't reported, the test can fail
|
||||
RequireDevices bool
|
||||
}
|
||||
|
||||
// NodeKillerConfig describes configuration of NodeKiller -- a utility to
|
||||
@ -292,7 +295,6 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
|
||||
flags.IntVar(&TestContext.MaxNodesToGather, "max-nodes-to-gather-from", 20, "The maximum number of nodes to gather extended info from on test failure.")
|
||||
flags.StringVar(&TestContext.GatherMetricsAfterTest, "gather-metrics-at-teardown", "false", "If set to 'true' framework will gather metrics from all components after each test. If set to 'master' only master component metrics would be gathered.")
|
||||
flags.BoolVar(&TestContext.GatherSuiteMetricsAfterTest, "gather-suite-metrics-at-teardown", false, "If set to true framwork will gather metrics from all components after the whole test suite completes.")
|
||||
flags.BoolVar(&TestContext.AllowGatheringProfiles, "allow-gathering-profiles", true, "If set to true framework will allow to gather CPU/memory allocation pprof profiles from the master.")
|
||||
flags.BoolVar(&TestContext.IncludeClusterAutoscalerMetrics, "include-cluster-autoscaler", false, "If set to true, framework will include Cluster Autoscaler when gathering metrics.")
|
||||
flags.StringVar(&TestContext.OutputPrintType, "output-print-type", "json", "Format in which summaries should be printed: 'hr' for human readable, 'json' for JSON ones.")
|
||||
flags.BoolVar(&TestContext.DumpLogsOnFailure, "dump-logs-on-failure", true, "If set to true test will dump data about the namespace in which test was running.")
|
||||
@ -338,6 +340,9 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
|
||||
flags.StringVar(&TestContext.KubeVolumeDir, "volume-dir", "/var/lib/kubelet", "Path to the directory containing the kubelet volumes.")
|
||||
flags.StringVar(&TestContext.CertDir, "cert-dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.")
|
||||
flags.StringVar(&TestContext.RepoRoot, "repo-root", "../../", "Root directory of kubernetes repository, for finding test files.")
|
||||
// NOTE: Node E2E tests have this flag defined as well, but true by default.
|
||||
// If this becomes true as well, they should be refactored into RegisterCommonFlags.
|
||||
flags.BoolVar(&TestContext.PrepullImages, "prepull-images", false, "If true, prepull images so image pull failures do not cause test failures.")
|
||||
flags.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, skeleton (the fallback if not set), etc.)")
|
||||
flags.StringVar(&TestContext.Tooling, "tooling", "", "The tooling in use (kops, gke, etc.)")
|
||||
flags.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.")
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e/framework/timeouts.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/framework/timeouts.go
generated
vendored
@ -29,6 +29,7 @@ const (
|
||||
claimBoundTimeout = 3 * time.Minute
|
||||
pvReclaimTimeout = 3 * time.Minute
|
||||
pvBoundTimeout = 3 * time.Minute
|
||||
pvCreateTimeout = 3 * time.Minute
|
||||
pvDeleteTimeout = 3 * time.Minute
|
||||
pvDeleteSlowTimeout = 20 * time.Minute
|
||||
snapshotCreateTimeout = 5 * time.Minute
|
||||
@ -67,6 +68,9 @@ type TimeoutContext struct {
|
||||
// PVBound is how long PVs have to become bound.
|
||||
PVBound time.Duration
|
||||
|
||||
// PVCreate is how long PVs have to be created.
|
||||
PVCreate time.Duration
|
||||
|
||||
// PVDelete is how long PVs have to become deleted.
|
||||
PVDelete time.Duration
|
||||
|
||||
@ -95,6 +99,7 @@ func NewTimeoutContextWithDefaults() *TimeoutContext {
|
||||
ClaimBound: claimBoundTimeout,
|
||||
PVReclaim: pvReclaimTimeout,
|
||||
PVBound: pvBoundTimeout,
|
||||
PVCreate: pvCreateTimeout,
|
||||
PVDelete: pvDeleteTimeout,
|
||||
PVDeleteSlow: pvDeleteSlowTimeout,
|
||||
SnapshotCreate: snapshotCreateTimeout,
|
||||
|
11
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
11
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
@ -63,6 +63,7 @@ import (
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
uexec "k8s.io/utils/exec"
|
||||
netutils "k8s.io/utils/net"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
@ -898,7 +899,7 @@ func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
|
||||
return c.CoreV1().Events(ns).List(context.TODO(), opts)
|
||||
}, namespace)
|
||||
|
||||
e2epod.DumpAllPodInfoForNamespace(c, namespace)
|
||||
e2epod.DumpAllPodInfoForNamespace(c, namespace, TestContext.ReportDir)
|
||||
|
||||
// If cluster is large, then the following logs are basically useless, because:
|
||||
// 1. it takes tens of minutes or hours to grab all of them
|
||||
@ -1265,7 +1266,7 @@ func getControlPlaneAddresses(c clientset.Interface) ([]string, []string, []stri
|
||||
if err != nil {
|
||||
Failf("Failed to parse hostname: %v", err)
|
||||
}
|
||||
if net.ParseIP(hostURL.Host) != nil {
|
||||
if netutils.ParseIPSloppy(hostURL.Host) != nil {
|
||||
externalIPs = append(externalIPs, hostURL.Host)
|
||||
} else {
|
||||
hostnames = append(hostnames, hostURL.Host)
|
||||
@ -1376,7 +1377,7 @@ retriesLoop:
|
||||
// NOTE the test may need access to the events to see what's going on, such as a change in status
|
||||
actualWatchEvents := scenario(resourceWatch)
|
||||
errs := sets.NewString()
|
||||
ExpectEqual(len(expectedWatchEvents) <= len(actualWatchEvents), true, "Error: actual watch events amount (%d) must be greater than or equal to expected watch events amount (%d)", len(actualWatchEvents), len(expectedWatchEvents))
|
||||
gomega.Expect(len(expectedWatchEvents)).To(gomega.BeNumerically("<=", len(actualWatchEvents)), "Did not get enough watch events")
|
||||
|
||||
totalValidWatchEvents := 0
|
||||
foundEventIndexes := map[int]*int{}
|
||||
@ -1405,7 +1406,9 @@ retriesLoop:
|
||||
fmt.Println("invariants violated:\n", strings.Join(errs.List(), "\n - "))
|
||||
continue retriesLoop
|
||||
}
|
||||
ExpectEqual(errs.Len() > 0, false, strings.Join(errs.List(), "\n - "))
|
||||
if errs.Len() > 0 {
|
||||
Failf("Unexpected error(s): %v", strings.Join(errs.List(), "\n - "))
|
||||
}
|
||||
ExpectEqual(totalValidWatchEvents, len(expectedWatchEvents), "Error: there must be an equal amount of total valid watch events (%d) and expected watch events (%d)", totalValidWatchEvents, len(expectedWatchEvents))
|
||||
break retriesLoop
|
||||
}
|
||||
|
34
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
34
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
@ -458,14 +458,14 @@ func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutC
|
||||
return clientPod, nil
|
||||
}
|
||||
|
||||
func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsType string, tests []Test) {
|
||||
func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string, fsGroup *int64, fsType string, tests []Test) {
|
||||
ginkgo.By("Checking that text file contents are perfect.")
|
||||
for i, test := range tests {
|
||||
if test.Mode == v1.PersistentVolumeBlock {
|
||||
// Block: check content
|
||||
deviceName := fmt.Sprintf("/opt/%d", i)
|
||||
commands := generateReadBlockCmd(deviceName, len(test.ExpectedContent))
|
||||
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
|
||||
commands := GenerateReadBlockCmd(deviceName, len(test.ExpectedContent))
|
||||
_, err := framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
|
||||
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
|
||||
|
||||
// Check that it's a real block device
|
||||
@ -474,7 +474,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsTy
|
||||
// Filesystem: check content
|
||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
||||
commands := GenerateReadFileCmd(fileName)
|
||||
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
|
||||
_, err := framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
|
||||
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
|
||||
|
||||
// Check that a directory has been mounted
|
||||
@ -485,14 +485,14 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsTy
|
||||
// Filesystem: check fsgroup
|
||||
if fsGroup != nil {
|
||||
ginkgo.By("Checking fsGroup is correct.")
|
||||
_, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
|
||||
_, err = framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
|
||||
framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
|
||||
}
|
||||
|
||||
// Filesystem: check fsType
|
||||
if fsType != "" {
|
||||
ginkgo.By("Checking fsType is correct.")
|
||||
_, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
|
||||
_, err = framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
|
||||
framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType)
|
||||
}
|
||||
}
|
||||
@ -531,7 +531,23 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
|
||||
e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
|
||||
}()
|
||||
|
||||
testVolumeContent(f, clientPod, fsGroup, fsType, tests)
|
||||
testVolumeContent(f, clientPod, "", fsGroup, fsType, tests)
|
||||
|
||||
ginkgo.By("Repeating the test on an ephemeral container (if enabled)")
|
||||
ec := &v1.EphemeralContainer{
|
||||
EphemeralContainerCommon: v1.EphemeralContainerCommon(clientPod.Spec.Containers[0]),
|
||||
}
|
||||
ec.Name = "volume-ephemeral-container"
|
||||
err = f.PodClient().AddEphemeralContainerSync(clientPod, ec, timeouts.PodStart)
|
||||
// The API server will return NotFound for the subresource when the feature is disabled
|
||||
// BEGIN TODO: remove after EphemeralContainers feature gate is retired
|
||||
if apierrors.IsNotFound(err) {
|
||||
framework.Logf("Skipping ephemeral container re-test because feature is disabled (error: %q)", err)
|
||||
return
|
||||
}
|
||||
// END TODO: remove after EphemeralContainers feature gate is retired
|
||||
framework.ExpectNoError(err, "failed to add ephemeral container for re-test")
|
||||
testVolumeContent(f, clientPod, ec.Name, fsGroup, fsType, tests)
|
||||
}
|
||||
|
||||
// InjectContent inserts index.html with given content into given volume. It does so by
|
||||
@ -572,7 +588,7 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
|
||||
|
||||
// Check that the data have been really written in this pod.
|
||||
// This tests non-persistent volume types
|
||||
testVolumeContent(f, injectorPod, fsGroup, fsType, tests)
|
||||
testVolumeContent(f, injectorPod, "", fsGroup, fsType, tests)
|
||||
}
|
||||
|
||||
// generateWriteCmd is used by generateWriteBlockCmd and generateWriteFileCmd
|
||||
@ -583,7 +599,7 @@ func generateWriteCmd(content, path string) []string {
|
||||
}
|
||||
|
||||
// generateReadBlockCmd generates the corresponding command lines to read from a block device with the given file path.
|
||||
func generateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
|
||||
func GenerateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
|
||||
var commands []string
|
||||
commands = []string{"head", "-c", strconv.Itoa(numberOfCharacters), fullPath}
|
||||
return commands
|
||||
|
16
vendor/k8s.io/kubernetes/test/e2e/storage/utils/local.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/storage/utils/local.go
generated
vendored
@ -70,6 +70,7 @@ type LocalTestResource struct {
|
||||
// LocalTestResourceManager represents interface to create/destroy local test resources on node
|
||||
type LocalTestResourceManager interface {
|
||||
Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource
|
||||
ExpandBlockDevice(ltr *LocalTestResource, mbToAdd int) error
|
||||
Remove(ltr *LocalTestResource)
|
||||
}
|
||||
|
||||
@ -289,6 +290,21 @@ func (l *ltrMgr) cleanupLocalVolumeGCELocalSSD(ltr *LocalTestResource) {
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func (l *ltrMgr) expandLocalVolumeBlockFS(ltr *LocalTestResource, mbToAdd int) error {
|
||||
ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file conv=notrunc oflag=append bs=1M count=%d", ltr.loopDir, mbToAdd)
|
||||
loopDev := l.findLoopDevice(ltr.loopDir, ltr.Node)
|
||||
losetupCmd := fmt.Sprintf("losetup -c %s", loopDev)
|
||||
return l.hostExec.IssueCommand(fmt.Sprintf("%s && %s", ddCmd, losetupCmd), ltr.Node)
|
||||
}
|
||||
|
||||
func (l *ltrMgr) ExpandBlockDevice(ltr *LocalTestResource, mbtoAdd int) error {
|
||||
switch ltr.VolumeType {
|
||||
case LocalVolumeBlockFS:
|
||||
return l.expandLocalVolumeBlockFS(ltr, mbtoAdd)
|
||||
}
|
||||
return fmt.Errorf("Failed to expand local test resource, unsupported volume type: %s", ltr.VolumeType)
|
||||
}
|
||||
|
||||
func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters map[string]string) *LocalTestResource {
|
||||
var ltr *LocalTestResource
|
||||
switch volumeType {
|
||||
|
4
vendor/k8s.io/kubernetes/test/utils/admission_webhook.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/utils/admission_webhook.go
generated
vendored
@ -53,7 +53,7 @@ func NewAdmissionWebhookServer(handler http.Handler) (string, func(), error) {
|
||||
// AdmissionWebhookHandler creates a HandlerFunc that decodes/encodes AdmissionReview and performs
|
||||
// given admit function
|
||||
func AdmissionWebhookHandler(t *testing.T, admit func(*v1beta1.AdmissionReview) error) http.HandlerFunc {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
@ -82,7 +82,7 @@ func AdmissionWebhookHandler(t *testing.T, admit func(*v1beta1.AdmissionReview)
|
||||
if err := json.NewEncoder(w).Encode(review); err != nil {
|
||||
t.Errorf("Marshal of response failed with error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// LocalhostCert was generated from crypto/tls/generate_cert.go with the following command:
|
||||
|
42
vendor/k8s.io/kubernetes/test/utils/deployment.go
generated
vendored
42
vendor/k8s.io/kubernetes/test/utils/deployment.go
generated
vendored
@ -117,7 +117,7 @@ func waitForDeploymentCompleteMaybeCheckRolling(c clientset.Interface, d *apps.D
|
||||
|
||||
func checkRollingUpdateStatus(c clientset.Interface, deployment *apps.Deployment, logf LogfFn) (string, error) {
|
||||
var reason string
|
||||
oldRSs, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1())
|
||||
oldRSs, allOldRSs, newRS, err := GetAllReplicaSets(deployment, c)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -152,6 +152,40 @@ func checkRollingUpdateStatus(c clientset.Interface, deployment *apps.Deployment
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface.
|
||||
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
|
||||
// The third returned value is the new replica set, and it may be nil if it doesn't exist yet.
|
||||
func GetAllReplicaSets(deployment *apps.Deployment, c clientset.Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, *apps.ReplicaSet, error) {
|
||||
rsList, err := deploymentutil.ListReplicaSets(deployment, deploymentutil.RsListFromClient(c.AppsV1()))
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
oldRSes, allOldRSes := deploymentutil.FindOldReplicaSets(deployment, rsList)
|
||||
newRS := deploymentutil.FindNewReplicaSet(deployment, rsList)
|
||||
return oldRSes, allOldRSes, newRS, nil
|
||||
}
|
||||
|
||||
// GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface.
|
||||
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
|
||||
func GetOldReplicaSets(deployment *apps.Deployment, c clientset.Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, error) {
|
||||
rsList, err := deploymentutil.ListReplicaSets(deployment, deploymentutil.RsListFromClient(c.AppsV1()))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
oldRSes, allOldRSes := deploymentutil.FindOldReplicaSets(deployment, rsList)
|
||||
return oldRSes, allOldRSes, nil
|
||||
}
|
||||
|
||||
// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface.
|
||||
// Returns nil if the new replica set doesn't exist yet.
|
||||
func GetNewReplicaSet(deployment *apps.Deployment, c clientset.Interface) (*apps.ReplicaSet, error) {
|
||||
rsList, err := deploymentutil.ListReplicaSets(deployment, deploymentutil.RsListFromClient(c.AppsV1()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return deploymentutil.FindNewReplicaSet(deployment, rsList), nil
|
||||
}
|
||||
|
||||
// Waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
|
||||
// Rolling update strategy should not be broken during a rolling update.
|
||||
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *apps.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
|
||||
@ -180,7 +214,7 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
|
||||
return false, err
|
||||
}
|
||||
// The new ReplicaSet needs to be non-nil and contain the pod-template-hash label
|
||||
newRS, err = deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
newRS, err = GetNewReplicaSet(deployment, c)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -223,7 +257,7 @@ func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName,
|
||||
}
|
||||
|
||||
// Check revision of the new replica set of this deployment
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
newRS, err := GetNewReplicaSet(deployment, c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get new replicaset of deployment %s during revision check: %v", deploymentName, err)
|
||||
}
|
||||
@ -344,7 +378,7 @@ func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, r
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("deployment %q never updated with the desired condition and reason, latest deployment conditions: %+v", deployment.Name, deployment.Status.Conditions)
|
||||
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1())
|
||||
_, allOldRSs, newRS, err := GetAllReplicaSets(deployment, c)
|
||||
if err == nil {
|
||||
LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, logf)
|
||||
LogPodsOfDeployment(c, deployment, append(allOldRSs, newRS), logf)
|
||||
|
64
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
64
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
@ -31,16 +31,13 @@ import (
|
||||
// RegistryList holds public and private image registries
|
||||
type RegistryList struct {
|
||||
GcAuthenticatedRegistry string `yaml:"gcAuthenticatedRegistry"`
|
||||
E2eRegistry string `yaml:"e2eRegistry"`
|
||||
PromoterE2eRegistry string `yaml:"promoterE2eRegistry"`
|
||||
BuildImageRegistry string `yaml:"buildImageRegistry"`
|
||||
InvalidRegistry string `yaml:"invalidRegistry"`
|
||||
GcEtcdRegistry string `yaml:"gcEtcdRegistry"`
|
||||
GcRegistry string `yaml:"gcRegistry"`
|
||||
SigStorageRegistry string `yaml:"sigStorageRegistry"`
|
||||
GcrReleaseRegistry string `yaml:"gcrReleaseRegistry"`
|
||||
PrivateRegistry string `yaml:"privateRegistry"`
|
||||
SampleRegistry string `yaml:"sampleRegistry"`
|
||||
MicrosoftRegistry string `yaml:"microsoftRegistry"`
|
||||
DockerLibraryRegistry string `yaml:"dockerLibraryRegistry"`
|
||||
CloudProviderGcpRegistry string `yaml:"cloudProviderGcpRegistry"`
|
||||
@ -91,7 +88,6 @@ func initReg() RegistryList {
|
||||
var (
|
||||
initRegistry = RegistryList{
|
||||
GcAuthenticatedRegistry: "gcr.io/authenticated-image-pulling",
|
||||
E2eRegistry: "gcr.io/kubernetes-e2e-test-images",
|
||||
PromoterE2eRegistry: "k8s.gcr.io/e2e-test-images",
|
||||
BuildImageRegistry: "k8s.gcr.io/build-image",
|
||||
InvalidRegistry: "invalid.com/invalid",
|
||||
@ -99,8 +95,6 @@ var (
|
||||
GcRegistry: "k8s.gcr.io",
|
||||
SigStorageRegistry: "k8s.gcr.io/sig-storage",
|
||||
PrivateRegistry: "gcr.io/k8s-authenticated-test",
|
||||
SampleRegistry: "gcr.io/google-samples",
|
||||
GcrReleaseRegistry: "gcr.io/gke-release",
|
||||
MicrosoftRegistry: "mcr.microsoft.com",
|
||||
DockerLibraryRegistry: "docker.io/library",
|
||||
CloudProviderGcpRegistry: "k8s.gcr.io/cloud-provider-gcp",
|
||||
@ -204,48 +198,48 @@ const (
|
||||
|
||||
func initImageConfigs(list RegistryList) (map[int]Config, map[int]Config) {
|
||||
configs := map[int]Config{}
|
||||
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.32"}
|
||||
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.33"}
|
||||
configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"}
|
||||
configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"}
|
||||
configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"}
|
||||
configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.17.4"}
|
||||
configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.3"}
|
||||
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-1"}
|
||||
configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.17.5"}
|
||||
configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.4"}
|
||||
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-2"}
|
||||
configs[CheckMetadataConcealment] = Config{list.PromoterE2eRegistry, "metadata-concealment", "1.6"}
|
||||
configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"}
|
||||
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.2"}
|
||||
configs[DebianIptables] = Config{list.BuildImageRegistry, "debian-iptables", "buster-v1.6.7"}
|
||||
configs[EchoServer] = Config{list.PromoterE2eRegistry, "echoserver", "2.3"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.4.13-0"}
|
||||
configs[GlusterDynamicProvisioner] = Config{list.PromoterE2eRegistry, "glusterdynamic-provisioner", "v1.0"}
|
||||
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-1"}
|
||||
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-1"}
|
||||
configs[DebianIptables] = Config{list.BuildImageRegistry, "debian-iptables", "bullseye-v1.1.0"}
|
||||
configs[EchoServer] = Config{list.PromoterE2eRegistry, "echoserver", "2.4"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.1-0"}
|
||||
configs[GlusterDynamicProvisioner] = Config{list.PromoterE2eRegistry, "glusterdynamic-provisioner", "v1.3"}
|
||||
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-2"}
|
||||
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-2"}
|
||||
configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"}
|
||||
configs[IpcUtils] = Config{list.PromoterE2eRegistry, "ipc-utils", "1.2"}
|
||||
configs[JessieDnsutils] = Config{list.PromoterE2eRegistry, "jessie-dnsutils", "1.4"}
|
||||
configs[Kitten] = Config{list.PromoterE2eRegistry, "kitten", "1.4"}
|
||||
configs[Nautilus] = Config{list.PromoterE2eRegistry, "nautilus", "1.4"}
|
||||
configs[IpcUtils] = Config{list.PromoterE2eRegistry, "ipc-utils", "1.3"}
|
||||
configs[JessieDnsutils] = Config{list.PromoterE2eRegistry, "jessie-dnsutils", "1.5"}
|
||||
configs[Kitten] = Config{list.PromoterE2eRegistry, "kitten", "1.5"}
|
||||
configs[Nautilus] = Config{list.PromoterE2eRegistry, "nautilus", "1.5"}
|
||||
configs[NFSProvisioner] = Config{list.SigStorageRegistry, "nfs-provisioner", "v2.2.2"}
|
||||
configs[Nginx] = Config{list.PromoterE2eRegistry, "nginx", "1.14-1"}
|
||||
configs[NginxNew] = Config{list.PromoterE2eRegistry, "nginx", "1.15-1"}
|
||||
configs[NodePerfNpbEp] = Config{list.PromoterE2eRegistry, "node-perf/npb-ep", "1.1"}
|
||||
configs[NodePerfNpbIs] = Config{list.PromoterE2eRegistry, "node-perf/npb-is", "1.1"}
|
||||
configs[Nginx] = Config{list.PromoterE2eRegistry, "nginx", "1.14-2"}
|
||||
configs[NginxNew] = Config{list.PromoterE2eRegistry, "nginx", "1.15-2"}
|
||||
configs[NodePerfNpbEp] = Config{list.PromoterE2eRegistry, "node-perf/npb-ep", "1.2"}
|
||||
configs[NodePerfNpbIs] = Config{list.PromoterE2eRegistry, "node-perf/npb-is", "1.2"}
|
||||
configs[NodePerfTfWideDeep] = Config{list.PromoterE2eRegistry, "node-perf/tf-wide-deep", "1.1"}
|
||||
configs[Nonewprivs] = Config{list.PromoterE2eRegistry, "nonewprivs", "1.3"}
|
||||
configs[NonRoot] = Config{list.PromoterE2eRegistry, "nonroot", "1.1"}
|
||||
configs[NonRoot] = Config{list.PromoterE2eRegistry, "nonroot", "1.2"}
|
||||
// Pause - when these values are updated, also update cmd/kubelet/app/options/container_runtime.go
|
||||
configs[Pause] = Config{list.GcRegistry, "pause", "3.5"}
|
||||
configs[Pause] = Config{list.GcRegistry, "pause", "3.6"}
|
||||
configs[Perl] = Config{list.PromoterE2eRegistry, "perl", "5.26"}
|
||||
configs[PrometheusDummyExporter] = Config{list.GcRegistry, "prometheus-dummy-exporter", "v0.1.0"}
|
||||
configs[PrometheusToSd] = Config{list.GcRegistry, "prometheus-to-sd", "v0.5.0"}
|
||||
configs[Redis] = Config{list.PromoterE2eRegistry, "redis", "5.0.5-alpine"}
|
||||
configs[Redis] = Config{list.PromoterE2eRegistry, "redis", "5.0.5-1"}
|
||||
configs[RegressionIssue74839] = Config{list.PromoterE2eRegistry, "regression-issue-74839", "1.2"}
|
||||
configs[ResourceConsumer] = Config{list.PromoterE2eRegistry, "resource-consumer", "1.9"}
|
||||
configs[ResourceConsumer] = Config{list.PromoterE2eRegistry, "resource-consumer", "1.10"}
|
||||
configs[SdDummyExporter] = Config{list.GcRegistry, "sd-dummy-exporter", "v0.2.0"}
|
||||
configs[VolumeNFSServer] = Config{list.PromoterE2eRegistry, "volume/nfs", "1.2"}
|
||||
configs[VolumeISCSIServer] = Config{list.PromoterE2eRegistry, "volume/iscsi", "2.2"}
|
||||
configs[VolumeGlusterServer] = Config{list.PromoterE2eRegistry, "volume/gluster", "1.2"}
|
||||
configs[VolumeRBDServer] = Config{list.PromoterE2eRegistry, "volume/rbd", "1.0.3"}
|
||||
configs[VolumeNFSServer] = Config{list.PromoterE2eRegistry, "volume/nfs", "1.3"}
|
||||
configs[VolumeISCSIServer] = Config{list.PromoterE2eRegistry, "volume/iscsi", "2.3"}
|
||||
configs[VolumeGlusterServer] = Config{list.PromoterE2eRegistry, "volume/gluster", "1.3"}
|
||||
configs[VolumeRBDServer] = Config{list.PromoterE2eRegistry, "volume/rbd", "1.0.4"}
|
||||
configs[WindowsServer] = Config{list.MicrosoftRegistry, "windows", "1809"}
|
||||
|
||||
// if requested, map all the SHAs into a known format based on the input
|
||||
@ -383,18 +377,12 @@ func replaceRegistryInImageURLWithList(imageURL string, reg RegistryList) (strin
|
||||
}
|
||||
|
||||
switch registryAndUser {
|
||||
case initRegistry.E2eRegistry:
|
||||
registryAndUser = reg.E2eRegistry
|
||||
case initRegistry.GcRegistry:
|
||||
registryAndUser = reg.GcRegistry
|
||||
case initRegistry.SigStorageRegistry:
|
||||
registryAndUser = reg.SigStorageRegistry
|
||||
case initRegistry.PrivateRegistry:
|
||||
registryAndUser = reg.PrivateRegistry
|
||||
case initRegistry.SampleRegistry:
|
||||
registryAndUser = reg.SampleRegistry
|
||||
case initRegistry.GcrReleaseRegistry:
|
||||
registryAndUser = reg.GcrReleaseRegistry
|
||||
case initRegistry.InvalidRegistry:
|
||||
registryAndUser = reg.InvalidRegistry
|
||||
case initRegistry.MicrosoftRegistry:
|
||||
|
19
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
19
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
@ -181,6 +181,9 @@ type RCConfig struct {
|
||||
ConfigMapNames []string
|
||||
|
||||
ServiceAccountTokenProjections int
|
||||
|
||||
//Additional containers to run in the pod
|
||||
AdditionalContainers []v1.Container
|
||||
}
|
||||
|
||||
func (rc *RCConfig) RCConfigLog(fmt string, args ...interface{}) {
|
||||
@ -343,6 +346,10 @@ func (config *DeploymentConfig) create() error {
|
||||
},
|
||||
}
|
||||
|
||||
if len(config.AdditionalContainers) > 0 {
|
||||
deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, config.AdditionalContainers...)
|
||||
}
|
||||
|
||||
if len(config.SecretNames) > 0 {
|
||||
attachSecrets(&deployment.Spec.Template, config.SecretNames)
|
||||
}
|
||||
@ -425,6 +432,10 @@ func (config *ReplicaSetConfig) create() error {
|
||||
},
|
||||
}
|
||||
|
||||
if len(config.AdditionalContainers) > 0 {
|
||||
rs.Spec.Template.Spec.Containers = append(rs.Spec.Template.Spec.Containers, config.AdditionalContainers...)
|
||||
}
|
||||
|
||||
if len(config.SecretNames) > 0 {
|
||||
attachSecrets(&rs.Spec.Template, config.SecretNames)
|
||||
}
|
||||
@ -618,6 +629,10 @@ func (config *RCConfig) create() error {
|
||||
},
|
||||
}
|
||||
|
||||
if len(config.AdditionalContainers) > 0 {
|
||||
rc.Spec.Template.Spec.Containers = append(rc.Spec.Template.Spec.Containers, config.AdditionalContainers...)
|
||||
}
|
||||
|
||||
if len(config.SecretNames) > 0 {
|
||||
attachSecrets(rc.Spec.Template, config.SecretNames)
|
||||
}
|
||||
@ -1304,7 +1319,7 @@ func MakePodSpec() v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: "k8s.gcr.io/pause:3.5",
|
||||
Image: "k8s.gcr.io/pause:3.6",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
@ -1726,7 +1741,7 @@ type DaemonConfig struct {
|
||||
|
||||
func (config *DaemonConfig) Run() error {
|
||||
if config.Image == "" {
|
||||
config.Image = "k8s.gcr.io/pause:3.5"
|
||||
config.Image = "k8s.gcr.io/pause:3.6"
|
||||
}
|
||||
nameLabel := map[string]string{
|
||||
"name": config.Name + "-daemon",
|
||||
|
Reference in New Issue
Block a user