rebase: update kubernetes to latest

updating the kubernetes release to the
latest in main go.mod

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna
2024-08-19 10:01:33 +02:00
committed by mergify[bot]
parent 63c4c05b35
commit 5a66991bb3
2173 changed files with 98906 additions and 61334 deletions

View File

@ -236,9 +236,7 @@ type StatefulSetSpec struct {
// ordinals controls the numbering of replica indices in a StatefulSet. The
// default ordinals behavior assigns a "0" index to the first replica and
// increments the index by one for each additional replica requested. Using
// the ordinals field requires the StatefulSetStartOrdinal feature gate to be
// enabled, which is beta.
// increments the index by one for each additional replica requested.
// +optional
Ordinals *StatefulSetOrdinals
}
@ -331,7 +329,7 @@ type ControllerRevision struct {
metav1.ObjectMeta
// Data is the Object representing the state.
Data runtime.Object
Data runtime.RawExtension
// Revision indicates the revision of the state represented by Data.
Revision int64

View File

@ -33,9 +33,7 @@ func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Data != nil {
out.Data = in.Data.DeepCopyObject()
}
in.Data.DeepCopyInto(&out.Data)
return
}

View File

@ -330,8 +330,6 @@ type JobSpec struct {
// checked against the backoffLimit. This field cannot be used in combination
// with .spec.podTemplate.spec.restartPolicy=OnFailure.
//
// This field is beta-level. It can be used when the `JobPodFailurePolicy`
// feature gate is enabled (enabled by default).
// +optional
PodFailurePolicy *PodFailurePolicy
@ -341,8 +339,8 @@ type JobSpec struct {
// When the field is specified, it must be immutable and works only for the Indexed Jobs.
// Once the Job meets the SuccessPolicy, the lingering pods are terminated.
//
// This field is alpha-level. To use this field, you must enable the
// `JobSuccessPolicy` feature gate (disabled by default).
// This field is beta-level. To use this field, you must enable the
// `JobSuccessPolicy` feature gate (enabled by default).
// +optional
SuccessPolicy *SuccessPolicy
@ -476,7 +474,8 @@ type JobSpec struct {
// The value must be a valid domain-prefixed path (e.g. acme.io/foo) -
// all characters before the first "/" must be a valid subdomain as defined
// by RFC 1123. All characters trailing the first "/" must be valid HTTP Path
// characters as defined by RFC 3986. The value cannot exceed 64 characters.
// characters as defined by RFC 3986. The value cannot exceed 63 characters.
// This field is immutable.
//
// This field is alpha-level. The job controller accepts setting the field
// when the feature gate JobManagedBy is enabled (disabled by default).
@ -536,7 +535,8 @@ type JobStatus struct {
// +optional
Terminating *int32
// The number of active pods which have a Ready condition.
// The number of active pods which have a Ready condition and are not
// terminating (without a deletionTimestamp).
// +optional
Ready *int32

View File

@ -15,6 +15,7 @@ limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +groupName=
// Package core contains the latest (or "internal") version of the
// Kubernetes API objects. This is the API objects as represented in memory.

View File

@ -184,6 +184,23 @@ type VolumeSource struct {
//
// +optional
Ephemeral *EphemeralVolumeSource
// Image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine.
// The volume is resolved at pod startup depending on which PullPolicy value is provided:
//
// - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
// - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
// - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
//
// The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation.
// A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message.
// The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
// The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
// The volume will be mounted read-only (ro) and non-executable files (noexec).
// Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
// The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
// +featureGate=ImageVolume
// +optional
Image *ImageVolumeSource
}
// PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs.
@ -341,7 +358,7 @@ type PersistentVolumeSpec struct {
// after a volume has been updated successfully to a new class.
// For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
// PersistentVolumeClaims during the binding process.
// This is an alpha field and requires enabling VolumeAttributesClass feature.
// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
// +featureGate=VolumeAttributesClass
// +optional
VolumeAttributesClassName *string
@ -392,8 +409,6 @@ type PersistentVolumeStatus struct {
Reason string
// LastPhaseTransitionTime is the time the phase transitioned from one to another
// and automatically resets to current time everytime a volume phase transitions.
// This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).
// +featureGate=PersistentVolumeLastPhaseTransitionTime
// +optional
LastPhaseTransitionTime *metav1.Time
}
@ -509,7 +524,7 @@ type PersistentVolumeClaimSpec struct {
// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
// exists.
// More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
// (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
// (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
// +featureGate=VolumeAttributesClass
// +optional
VolumeAttributesClassName *string
@ -534,16 +549,28 @@ type TypedObjectReference struct {
}
// PersistentVolumeClaimConditionType defines the condition of PV claim.
// Valid values are either "Resizing" or "FileSystemResizePending".
// Valid values are:
// - "Resizing", "FileSystemResizePending"
//
// If RecoverVolumeExpansionFailure feature gate is enabled, then following additional values can be expected:
// - "ControllerResizeError", "NodeResizeError"
//
// If VolumeAttributesClass feature gate is enabled, then following additional values can be expected:
// - "ModifyVolumeError", "ModifyingVolume"
type PersistentVolumeClaimConditionType string
// These are valid conditions of Pvc
// These are valid conditions of PVC
const (
// An user trigger resize of pvc has been started
PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
// PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node
PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending"
// PersistentVolumeClaimControllerResizeError indicates an error while resizing volume for size in the controller
PersistentVolumeClaimControllerResizeError PersistentVolumeClaimConditionType = "ControllerResizeError"
// PersistentVolumeClaimNodeResizeError indicates an error while resizing volume for size in the node.
PersistentVolumeClaimNodeResizeError PersistentVolumeClaimConditionType = "NodeResizeError"
// Applying the target VolumeAttributesClass encountered an error
PersistentVolumeClaimVolumeModifyVolumeError PersistentVolumeClaimConditionType = "ModifyVolumeError"
// Volume is being modified
@ -560,18 +587,19 @@ const (
// State set when resize controller starts resizing the volume in control-plane
PersistentVolumeClaimControllerResizeInProgress ClaimResourceStatus = "ControllerResizeInProgress"
// State set when resize has failed in resize controller with a terminal error.
// State set when resize has failed in resize controller with a terminal unrecoverable error.
// Transient errors such as timeout should not set this status and should leave allocatedResourceStatus
// unmodified, so as resize controller can resume the volume expansion.
PersistentVolumeClaimControllerResizeFailed ClaimResourceStatus = "ControllerResizeFailed"
PersistentVolumeClaimControllerResizeInfeasible ClaimResourceStatus = "ControllerResizeInfeasible"
// State set when resize controller has finished resizing the volume but further resizing of volume
// is needed on the node.
PersistentVolumeClaimNodeResizePending ClaimResourceStatus = "NodeResizePending"
// State set when kubelet starts resizing the volume.
PersistentVolumeClaimNodeResizeInProgress ClaimResourceStatus = "NodeResizeInProgress"
// State set when resizing has failed in kubelet with a terminal error. Transient errors don't set NodeResizeFailed
PersistentVolumeClaimNodeResizeFailed ClaimResourceStatus = "NodeResizeFailed"
// State set when resizing has failed in kubelet with a terminal unrecoverable error. Transient errors
// shouldn't set this status
PersistentVolumeClaimNodeResizeInfeasible ClaimResourceStatus = "NodeResizeInfeasible"
)
// +enum
@ -699,13 +727,13 @@ type PersistentVolumeClaimStatus struct {
AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus
// currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
// When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
// This is an alpha field and requires enabling VolumeAttributesClass feature.
// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
// +featureGate=VolumeAttributesClass
// +optional
CurrentVolumeAttributesClassName *string
// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
// When this is unset, there is no ModifyVolume operation being attempted.
// This is an alpha field and requires enabling VolumeAttributesClass feature.
// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
// +featureGate=VolumeAttributesClass
// +optional
ModifyVolumeStatus *ModifyVolumeStatus
@ -2441,6 +2469,13 @@ type ResourceClaim struct {
// the Pod where this field is used. It makes that resource available
// inside a container.
Name string
// Request is the name chosen for a request in the referenced claim.
// If empty, everything from the claim is made available, otherwise
// only the result of this request.
//
// +optional
Request string
}
// Container represents a single container that is expected to be run on the host.
@ -2741,6 +2776,85 @@ type ContainerStatus struct {
// +optional
// +featureGate=RecursiveReadOnlyMounts
VolumeMounts []VolumeMountStatus
// User represents user identity information initially attached to the first process of the container
// +featureGate=SupplementalGroupsPolicy
// +optional
User *ContainerUser
// AllocatedResourcesStatus represents the status of various resources
// allocated for this Pod.
// +featureGate=ResourceHealthStatus
// +optional
AllocatedResourcesStatus []ResourceStatus
}
type ResourceStatus struct {
Name ResourceName
// List of unique Resources health. Each element in the list contains a unique resource ID and resource health.
// At a minimum, ResourceID must uniquely identify the Resource
// allocated to the Pod on the Node for the lifetime of a Pod.
// See ResourceID type for it's definition.
Resources []ResourceHealth
// allow to extend this struct in future with the overall health fields or things like Device Plugin version
}
// ResourceID is calculated based on the source of this resource health information.
// For DevicePlugin:
//
// deviceplugin:DeviceID, where DeviceID is from the Device structure of DevicePlugin's ListAndWatchResponse type: https://github.com/kubernetes/kubernetes/blob/eda1c780543a27c078450e2f17d674471e00f494/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.proto#L61-L73
//
// DevicePlugin ID is usually a constant for the lifetime of a Node and typically can be used to uniquely identify the device on the node.
// For DRA:
//
// dra:<driver name>/<pool name>/<device name>: such a device can be looked up in the information published by that DRA driver to learn more about it. It is designed to be globally unique in a cluster.
type ResourceID string
type ResourceHealthStatus string
const (
ResourceHealthStatusHealthy ResourceHealthStatus = "Healthy"
ResourceHealthStatusUnhealthy ResourceHealthStatus = "Unhealthy"
ResourceHealthStatusUnknown ResourceHealthStatus = "Unknown"
)
// ResourceHealth represents the health of a resource. It has the latest device health information.
// This is a part of KEP https://kep.k8s.io/4680 and historical health changes are planned to be added in future iterations of a KEP.
type ResourceHealth struct {
// ResourceID is the unique identifier of the resource. See the ResourceID type for more information.
ResourceID ResourceID
// Health of the resource.
// can be one of:
// - Healthy: operates as normal
// - Unhealthy: reported unhealthy. We consider this a temporary health issue
// since we do not have a mechanism today to distinguish
// temporary and permanent issues.
// - Unknown: The status cannot be determined.
// For example, Device Plugin got unregistered and hasn't been re-registered since.
//
// In future we may want to introduce the PermanentlyUnhealthy Status.
Health ResourceHealthStatus
}
// ContainerUser represents user identity information
type ContainerUser struct {
// Linux holds user identity information initially attached to the first process of the containers in Linux.
// Note that the actual running identity can be changed if the process has enough privilege to do so.
// +optional
Linux *LinuxContainerUser
// Windows holds user identity information of the first process of the containers in Windows
// This is just reserved for future use.
// Windows *WindowsContainerUser
}
// LinuxContainerUser represents user identity information in Linux containers
type LinuxContainerUser struct {
// UID is the primary uid initially attached to the first process in the container
UID int64
// GID is the primary gid initially attached to the first process in the container
GID int64
// SupplementalGroups are the supplemental groups initially attached to the first process in the container
SupplementalGroups []int64
}
// PodPhase is a label for the condition of a pod at the current time.
@ -3083,19 +3197,27 @@ type PodAffinityTerm struct {
NamespaceSelector *metav1.LabelSelector
// MatchLabelKeys is a set of pod label keys to select which pods will
// be taken into consideration. The keys are used to lookup values from the
// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`
// incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
// to select the group of existing pods which pods will be taken into consideration
// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
// pod labels will be ignored. The default value is empty.
// The same key is forbidden to exist in both matchLabelKeys and labelSelector.
// Also, matchLabelKeys cannot be set when labelSelector isn't set.
// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
//
// +listType=atomic
// +optional
MatchLabelKeys []string
// MismatchLabelKeys is a set of pod label keys to select which pods will
// be taken into consideration. The keys are used to lookup values from the
// incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`
// incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
// to select the group of existing pods which pods will be taken into consideration
// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
// pod labels will be ignored. The default value is empty.
// The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
// Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
//
// +listType=atomic
// +optional
MismatchLabelKeys []string
@ -3277,9 +3399,11 @@ type PodSpec struct {
// +optional
AutomountServiceAccountToken *bool
// NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
// the scheduler simply schedules this pod onto that node, assuming that it fits resource
// requirements.
// NodeName indicates in which node this pod is scheduled.
// If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName.
// Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod.
// This field should not be used to express a desire for the pod to be scheduled on a specific node.
// https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename
// +optional
NodeName string
// SecurityContext holds pod-level security attributes and common container settings.
@ -3395,6 +3519,7 @@ type PodSpec struct {
// - spec.securityContext.runAsUser
// - spec.securityContext.runAsGroup
// - spec.securityContext.supplementalGroups
// - spec.securityContext.supplementalGroupsPolicy
// - spec.containers[*].securityContext.appArmorProfile
// - spec.containers[*].securityContext.seLinuxOptions
// - spec.containers[*].securityContext.seccompProfile
@ -3439,17 +3564,11 @@ type PodResourceClaim struct {
// This must be a DNS_LABEL.
Name string
// Source describes where to find the ResourceClaim.
Source ClaimSource
}
// ClaimSource describes a reference to a ResourceClaim.
//
// Exactly one of these fields should be set. Consumers of this type must
// treat an empty object as if it has an unknown value.
type ClaimSource struct {
// ResourceClaimName is the name of a ResourceClaim object in the same
// namespace as this pod.
//
// Exactly one of ResourceClaimName and ResourceClaimTemplateName must
// be set.
ResourceClaimName *string
// ResourceClaimTemplateName is the name of a ResourceClaimTemplate
@ -3464,6 +3583,9 @@ type ClaimSource struct {
// This field is immutable and no changes will be made to the
// corresponding ResourceClaim by the control plane after creating the
// ResourceClaim.
//
// Exactly one of ResourceClaimName and ResourceClaimTemplateName must
// be set.
ResourceClaimTemplateName *string
}
@ -3477,7 +3599,7 @@ type PodResourceClaimStatus struct {
Name string
// ResourceClaimName is the name of the ResourceClaim that was
// generated for the Pod in the namespace of the Pod. It this is
// generated for the Pod in the namespace of the Pod. If this is
// unset, then generating a ResourceClaim was not necessary. The
// pod.spec.resourceClaims entry can be ignored in this case.
ResourceClaimName *string
@ -3539,6 +3661,22 @@ const (
FSGroupChangeAlways PodFSGroupChangePolicy = "Always"
)
// SupplementalGroupsPolicy defines how supplemental groups
// of the first container processes are calculated.
type SupplementalGroupsPolicy string
const (
// SupplementalGroupsPolicyMerge means that the container's provided
// SupplementalGroups and FsGroup (specified in SecurityContext) will be
// merged with the primary user's groups as defined in the container image
// (in /etc/group).
SupplementalGroupsPolicyMerge SupplementalGroupsPolicy = "Merge"
// SupplementalGroupsPolicyStrict means that the container's provided
// SupplementalGroups and FsGroup (specified in SecurityContext) will be
// used instead of any groups defined in the container image.
SupplementalGroupsPolicyStrict SupplementalGroupsPolicy = "Strict"
)
// PodSecurityContext holds pod-level security attributes and common container settings.
// Some fields are also present in container.securityContext. Field values of
// container.securityContext take precedence over field values of PodSecurityContext.
@ -3621,15 +3759,26 @@ type PodSecurityContext struct {
// for that container.
// +optional
RunAsNonRoot *bool
// A list of groups applied to the first process run in each container, in addition
// to the container's primary GID, the fsGroup (if specified), and group memberships
// defined in the container image for the uid of the container process. If unspecified,
// no additional groups are added to any container. Note that group memberships
// defined in the container image for the uid of the container process are still effective,
// even if they are not included in this list.
// A list of groups applied to the first process run in each container, in
// addition to the container's primary GID and fsGroup (if specified). If
// the SupplementalGroupsPolicy feature is enabled, the
// supplementalGroupsPolicy field determines whether these are in addition
// to or instead of any group memberships defined in the container image.
// If unspecified, no additional groups are added, though group memberships
// defined in the container image may still be used, depending on the
// supplementalGroupsPolicy field.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
SupplementalGroups []int64
// Defines how supplemental groups of the first container processes are calculated.
// Valid values are "Merge" and "Strict". If not specified, "Merge" is used.
// (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled
// and the container runtime must implement support for this feature.
// Note that this field cannot be set when spec.os.name is windows.
// TODO: update the default value to "Merge" when spec.os.name is not windows in v1.34
// +featureGate=SupplementalGroupsPolicy
// +optional
SupplementalGroupsPolicy *SupplementalGroupsPolicy
// A special supplemental group that applies to all containers in a pod.
// Some volume types allow the Kubelet to change the ownership of that volume
// to be owned by the pod:
@ -4833,13 +4982,16 @@ type NodeDaemonEndpoints struct {
KubeletEndpoint DaemonEndpoint
}
// NodeRuntimeHandlerFeatures is a set of runtime features.
// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.
type NodeRuntimeHandlerFeatures struct {
// RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.
// +featureGate=RecursiveReadOnlyMounts
// +optional
RecursiveReadOnlyMounts *bool
// Reserved: UserNamespaces *bool
// UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.
// +featureGate=UserNamespacesSupport
// +optional
UserNamespaces *bool
}
// NodeRuntimeHandler is a set of runtime handler information.
@ -4853,6 +5005,15 @@ type NodeRuntimeHandler struct {
Features *NodeRuntimeHandlerFeatures
}
// NodeFeatures describes the set of features implemented by the CRI implementation.
// The features contained in the NodeFeatures should depend only on the cri implementation
// independent of runtime handlers.
type NodeFeatures struct {
// SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.
// +optional
SupplementalGroupsPolicy *bool
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfo struct {
// MachineID reported by the node. For unique machine identification
@ -4965,8 +5126,13 @@ type NodeStatus struct {
Config *NodeConfigStatus
// The available runtime handlers.
// +featureGate=RecursiveReadOnlyMounts
// +featureGate=UserNamespacesSupport
// +optional
RuntimeHandlers []NodeRuntimeHandler
// Features describes the set of features implemented by the CRI implementation.
// +featureGate=SupplementalGroupsPolicy
// +optional
Features *NodeFeatures
}
// UniqueVolumeName defines the name of attached volume
@ -5700,6 +5866,8 @@ const (
ResourceLimitsMemory ResourceName = "limits.memory"
// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
// resource.k8s.io devices requested with a certain DeviceClass, number
ResourceClaimsPerClass string = ".deviceclass.resource.k8s.io/devices"
)
// The following identify resource prefix for Kubernetes object types
@ -6139,7 +6307,7 @@ type SecurityContext struct {
// +optional
AllowPrivilegeEscalation *bool
// ProcMount denotes the type of proc mount to use for the containers.
// The default is DefaultProcMount which uses the container runtime defaults for
// The default value is Default which uses the container runtime defaults for
// readonly paths and masked paths.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
@ -6432,3 +6600,23 @@ const (
// the destination set to the node's IP and port or the pod's IP and port.
LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy"
)
// ImageVolumeSource represents a image volume resource.
type ImageVolumeSource struct {
// Required: Image or artifact reference to be used.
// Behaves in the same way as pod.spec.containers[*].image.
// Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets.
// More info: https://kubernetes.io/docs/concepts/containers/images
// This field is optional to allow higher level config management to default or override
// container images in workload controllers like Deployments and StatefulSets.
// +optional
Reference string
// Policy for pulling OCI objects. Possible values are:
// Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
// Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
// IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// +optional
PullPolicy PullPolicy
}

View File

@ -97,6 +97,9 @@ func addConversionFuncs(scheme *runtime.Scheme) error {
if err := AddFieldLabelConversionsForSecret(scheme); err != nil {
return err
}
if err := AddFieldLabelConversionsForService(scheme); err != nil {
return err
}
return nil
}
@ -488,6 +491,21 @@ func AddFieldLabelConversionsForSecret(scheme *runtime.Scheme) error {
})
}
func AddFieldLabelConversionsForService(scheme *runtime.Scheme) error {
return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Service"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.namespace",
"metadata.name",
"spec.clusterIP",
"spec.type":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
}
var initContainerAnnotations = map[string]bool{
"pod.beta.kubernetes.io/init-containers": true,
"pod.alpha.kubernetes.io/init-containers": true,
@ -525,32 +543,14 @@ func dropInitContainerAnnotations(oldAnnotations map[string]string) map[string]s
return newAnnotations
}
// Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus is defined outside the autogenerated file for use by other API packages
func Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error {
return autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s)
}
// Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus is defined outside the autogenerated file for use by other API packages
func Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error {
return autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in, out, s)
}
// Convert_core_Volume_To_v1_Volume is defined outside the autogenerated file for use by other API packages
func Convert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s conversion.Scope) error {
return autoConvert_core_Volume_To_v1_Volume(in, out, s)
}
// Convert_v1_Volume_To_core_Volume is defined outside the autogenerated file for use by other API packages
func Convert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s conversion.Scope) error {
return autoConvert_v1_Volume_To_core_Volume(in, out, s)
}
// Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec is defined outside the autogenerated file for use by other API packages
// This is needed because it is referenced from other APIs, but is invisible at code-generation time because of the build tags.
func Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error {
return autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s)
}
// Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec is defined outside the autogenerated file for use by other API packages
// This is needed because it is referenced from other APIs, but is invisible at code-generation time because of the build tags.
func Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *core.PersistentVolumeSpec, s conversion.Scope) error {
return autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in, out, s)
}

View File

@ -69,6 +69,15 @@ func SetDefaults_Volume(obj *v1.Volume) {
EmptyDir: &v1.EmptyDirVolumeSource{},
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.ImageVolume) && obj.Image != nil && obj.Image.PullPolicy == "" {
// PullPolicy defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
_, tag, _, _ := parsers.ParseImageName(obj.Image.Reference)
if tag == "latest" {
obj.Image.PullPolicy = v1.PullAlways
} else {
obj.Image.PullPolicy = v1.PullIfNotPresent
}
}
}
func SetDefaults_Container(obj *v1.Container) {
if obj.ImagePullPolicy == "" {
@ -228,12 +237,6 @@ func SetDefaults_PodSpec(obj *v1.PodSpec) {
if obj.RestartPolicy == "" {
obj.RestartPolicy = v1.RestartPolicyAlways
}
if utilfeature.DefaultFeatureGate.Enabled(features.DefaultHostNetworkHostPortsInPodTemplates) {
if obj.HostNetwork {
defaultHostNetworkPorts(&obj.Containers)
defaultHostNetworkPorts(&obj.InitContainers)
}
}
if obj.SecurityContext == nil {
obj.SecurityContext = &v1.PodSecurityContext{}
}
@ -317,34 +320,6 @@ func SetDefaults_PersistentVolumeClaimSpec(obj *v1.PersistentVolumeClaimSpec) {
*obj.VolumeMode = v1.PersistentVolumeFilesystem
}
}
func SetDefaults_ISCSIVolumeSource(obj *v1.ISCSIVolumeSource) {
if obj.ISCSIInterface == "" {
obj.ISCSIInterface = "default"
}
}
func SetDefaults_ISCSIPersistentVolumeSource(obj *v1.ISCSIPersistentVolumeSource) {
if obj.ISCSIInterface == "" {
obj.ISCSIInterface = "default"
}
}
func SetDefaults_AzureDiskVolumeSource(obj *v1.AzureDiskVolumeSource) {
if obj.CachingMode == nil {
obj.CachingMode = new(v1.AzureDataDiskCachingMode)
*obj.CachingMode = v1.AzureDataDiskCachingReadWrite
}
if obj.Kind == nil {
obj.Kind = new(v1.AzureDataDiskKind)
*obj.Kind = v1.AzureSharedBlobDisk
}
if obj.FSType == nil {
obj.FSType = new(string)
*obj.FSType = "ext4"
}
if obj.ReadOnly == nil {
obj.ReadOnly = new(bool)
*obj.ReadOnly = false
}
}
func SetDefaults_Endpoints(obj *v1.Endpoints) {
for i := range obj.Subsets {
ss := &obj.Subsets[i]
@ -448,48 +423,6 @@ func defaultHostNetworkPorts(containers *[]v1.Container) {
}
}
func SetDefaults_RBDVolumeSource(obj *v1.RBDVolumeSource) {
if obj.RBDPool == "" {
obj.RBDPool = "rbd"
}
if obj.RadosUser == "" {
obj.RadosUser = "admin"
}
if obj.Keyring == "" {
obj.Keyring = "/etc/ceph/keyring"
}
}
func SetDefaults_RBDPersistentVolumeSource(obj *v1.RBDPersistentVolumeSource) {
if obj.RBDPool == "" {
obj.RBDPool = "rbd"
}
if obj.RadosUser == "" {
obj.RadosUser = "admin"
}
if obj.Keyring == "" {
obj.Keyring = "/etc/ceph/keyring"
}
}
func SetDefaults_ScaleIOVolumeSource(obj *v1.ScaleIOVolumeSource) {
if obj.StorageMode == "" {
obj.StorageMode = "ThinProvisioned"
}
if obj.FSType == "" {
obj.FSType = "xfs"
}
}
func SetDefaults_ScaleIOPersistentVolumeSource(obj *v1.ScaleIOPersistentVolumeSource) {
if obj.StorageMode == "" {
obj.StorageMode = "ThinProvisioned"
}
if obj.FSType == "" {
obj.FSType = "xfs"
}
}
func SetDefaults_HostPathVolumeSource(obj *v1.HostPathVolumeSource) {
typeVol := v1.HostPathUnset
if obj.Type == nil {

View File

@ -140,35 +140,6 @@ func IsServiceIPSet(service *v1.Service) bool {
return service.Spec.ClusterIP != v1.ClusterIPNone && service.Spec.ClusterIP != ""
}
// LoadBalancerStatusEqual evaluates the given load balancers' ingress IP addresses
// and hostnames and returns true if equal or false if otherwise
// TODO: make method on LoadBalancerStatus?
func LoadBalancerStatusEqual(l, r *v1.LoadBalancerStatus) bool {
return ingressSliceEqual(l.Ingress, r.Ingress)
}
func ingressSliceEqual(lhs, rhs []v1.LoadBalancerIngress) bool {
if len(lhs) != len(rhs) {
return false
}
for i := range lhs {
if !ingressEqual(&lhs[i], &rhs[i]) {
return false
}
}
return true
}
func ingressEqual(lhs, rhs *v1.LoadBalancerIngress) bool {
if lhs.IP != rhs.IP {
return false
}
if lhs.Hostname != rhs.Hostname {
return false
}
return true
}
// GetAccessModesAsString returns a string representation of an array of access modes.
// modes, when present, are always in the same order: RWO,ROX,RWX,RWOP.
func GetAccessModesAsString(modes []v1.PersistentVolumeAccessMode) string {

View File

@ -202,16 +202,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ClaimSource)(nil), (*core.ClaimSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ClaimSource_To_core_ClaimSource(a.(*v1.ClaimSource), b.(*core.ClaimSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ClaimSource)(nil), (*v1.ClaimSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ClaimSource_To_v1_ClaimSource(a.(*core.ClaimSource), b.(*v1.ClaimSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ClientIPConfig)(nil), (*core.ClientIPConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ClientIPConfig_To_core_ClientIPConfig(a.(*v1.ClientIPConfig), b.(*core.ClientIPConfig), scope)
}); err != nil {
@ -422,6 +412,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ContainerUser)(nil), (*core.ContainerUser)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerUser_To_core_ContainerUser(a.(*v1.ContainerUser), b.(*core.ContainerUser), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ContainerUser)(nil), (*v1.ContainerUser)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ContainerUser_To_v1_ContainerUser(a.(*core.ContainerUser), b.(*v1.ContainerUser), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.DaemonEndpoint)(nil), (*core.DaemonEndpoint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(a.(*v1.DaemonEndpoint), b.(*core.DaemonEndpoint), scope)
}); err != nil {
@ -792,6 +792,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ImageVolumeSource)(nil), (*core.ImageVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ImageVolumeSource_To_core_ImageVolumeSource(a.(*v1.ImageVolumeSource), b.(*core.ImageVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ImageVolumeSource)(nil), (*v1.ImageVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ImageVolumeSource_To_v1_ImageVolumeSource(a.(*core.ImageVolumeSource), b.(*v1.ImageVolumeSource), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.KeyToPath)(nil), (*core.KeyToPath)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_KeyToPath_To_core_KeyToPath(a.(*v1.KeyToPath), b.(*core.KeyToPath), scope)
}); err != nil {
@ -862,6 +872,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.LinuxContainerUser)(nil), (*core.LinuxContainerUser)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LinuxContainerUser_To_core_LinuxContainerUser(a.(*v1.LinuxContainerUser), b.(*core.LinuxContainerUser), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.LinuxContainerUser)(nil), (*v1.LinuxContainerUser)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_LinuxContainerUser_To_v1_LinuxContainerUser(a.(*core.LinuxContainerUser), b.(*v1.LinuxContainerUser), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.List)(nil), (*core.List)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_List_To_core_List(a.(*v1.List), b.(*core.List), scope)
}); err != nil {
@ -882,6 +902,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.LoadBalancerStatus)(nil), (*core.LoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(a.(*v1.LoadBalancerStatus), b.(*core.LoadBalancerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.LoadBalancerStatus)(nil), (*v1.LoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(a.(*core.LoadBalancerStatus), b.(*v1.LoadBalancerStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.LocalObjectReference)(nil), (*core.LocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LocalObjectReference_To_core_LocalObjectReference(a.(*v1.LocalObjectReference), b.(*core.LocalObjectReference), scope)
}); err != nil {
@ -1042,6 +1072,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.NodeFeatures)(nil), (*core.NodeFeatures)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeFeatures_To_core_NodeFeatures(a.(*v1.NodeFeatures), b.(*core.NodeFeatures), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.NodeFeatures)(nil), (*v1.NodeFeatures)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeFeatures_To_v1_NodeFeatures(a.(*core.NodeFeatures), b.(*v1.NodeFeatures), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.NodeList)(nil), (*core.NodeList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeList_To_core_NodeList(a.(*v1.NodeList), b.(*core.NodeList), scope)
}); err != nil {
@ -1682,6 +1722,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ResourceHealth)(nil), (*core.ResourceHealth)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceHealth_To_core_ResourceHealth(a.(*v1.ResourceHealth), b.(*core.ResourceHealth), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ResourceHealth)(nil), (*v1.ResourceHealth)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ResourceHealth_To_v1_ResourceHealth(a.(*core.ResourceHealth), b.(*v1.ResourceHealth), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ResourceQuota)(nil), (*core.ResourceQuota)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceQuota_To_core_ResourceQuota(a.(*v1.ResourceQuota), b.(*core.ResourceQuota), scope)
}); err != nil {
@ -1732,6 +1782,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ResourceStatus)(nil), (*core.ResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ResourceStatus_To_core_ResourceStatus(a.(*v1.ResourceStatus), b.(*core.ResourceStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ResourceStatus)(nil), (*v1.ResourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ResourceStatus_To_v1_ResourceStatus(a.(*core.ResourceStatus), b.(*v1.ResourceStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.SELinuxOptions)(nil), (*core.SELinuxOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SELinuxOptions_To_core_SELinuxOptions(a.(*v1.SELinuxOptions), b.(*core.SELinuxOptions), scope)
}); err != nil {
@ -2097,6 +2157,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.Volume)(nil), (*core.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Volume_To_core_Volume(a.(*v1.Volume), b.(*core.Volume), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.Volume)(nil), (*v1.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Volume_To_v1_Volume(a.(*core.Volume), b.(*v1.Volume), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.VolumeDevice)(nil), (*core.VolumeDevice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_VolumeDevice_To_core_VolumeDevice(a.(*v1.VolumeDevice), b.(*core.VolumeDevice), scope)
}); err != nil {
@ -2247,11 +2317,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddConversionFunc((*core.LoadBalancerStatus)(nil), (*v1.LoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(a.(*core.LoadBalancerStatus), b.(*v1.LoadBalancerStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*core.NodeSpec)(nil), (*v1.NodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_NodeSpec_To_v1_NodeSpec(a.(*core.NodeSpec), b.(*v1.NodeSpec), scope)
}); err != nil {
@ -2287,16 +2352,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddConversionFunc((*core.Volume)(nil), (*v1.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_Volume_To_v1_Volume(a.(*core.Volume), b.(*v1.Volume), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*v1.LoadBalancerStatus)(nil), (*core.LoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(a.(*v1.LoadBalancerStatus), b.(*core.LoadBalancerStatus), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*v1.NodeSpec)(nil), (*core.NodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_NodeSpec_To_core_NodeSpec(a.(*v1.NodeSpec), b.(*core.NodeSpec), scope)
}); err != nil {
@ -2357,11 +2412,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddConversionFunc((*v1.Volume)(nil), (*core.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Volume_To_core_Volume(a.(*v1.Volume), b.(*core.Volume), scope)
}); err != nil {
return err
}
return nil
}
@ -2785,28 +2835,6 @@ func Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVol
return autoConvert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s)
}
func autoConvert_v1_ClaimSource_To_core_ClaimSource(in *v1.ClaimSource, out *core.ClaimSource, s conversion.Scope) error {
out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName))
out.ResourceClaimTemplateName = (*string)(unsafe.Pointer(in.ResourceClaimTemplateName))
return nil
}
// Convert_v1_ClaimSource_To_core_ClaimSource is an autogenerated conversion function.
func Convert_v1_ClaimSource_To_core_ClaimSource(in *v1.ClaimSource, out *core.ClaimSource, s conversion.Scope) error {
return autoConvert_v1_ClaimSource_To_core_ClaimSource(in, out, s)
}
func autoConvert_core_ClaimSource_To_v1_ClaimSource(in *core.ClaimSource, out *v1.ClaimSource, s conversion.Scope) error {
out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName))
out.ResourceClaimTemplateName = (*string)(unsafe.Pointer(in.ResourceClaimTemplateName))
return nil
}
// Convert_core_ClaimSource_To_v1_ClaimSource is an autogenerated conversion function.
func Convert_core_ClaimSource_To_v1_ClaimSource(in *core.ClaimSource, out *v1.ClaimSource, s conversion.Scope) error {
return autoConvert_core_ClaimSource_To_v1_ClaimSource(in, out, s)
}
func autoConvert_v1_ClientIPConfig_To_core_ClientIPConfig(in *v1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error {
out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds))
return nil
@ -3370,6 +3398,8 @@ func autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in *v1.ContainerStat
out.AllocatedResources = *(*core.ResourceList)(unsafe.Pointer(&in.AllocatedResources))
out.Resources = (*core.ResourceRequirements)(unsafe.Pointer(in.Resources))
out.VolumeMounts = *(*[]core.VolumeMountStatus)(unsafe.Pointer(&in.VolumeMounts))
out.User = (*core.ContainerUser)(unsafe.Pointer(in.User))
out.AllocatedResourcesStatus = *(*[]core.ResourceStatus)(unsafe.Pointer(&in.AllocatedResourcesStatus))
return nil
}
@ -3395,6 +3425,8 @@ func autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerSt
out.AllocatedResources = *(*v1.ResourceList)(unsafe.Pointer(&in.AllocatedResources))
out.Resources = (*v1.ResourceRequirements)(unsafe.Pointer(in.Resources))
out.VolumeMounts = *(*[]v1.VolumeMountStatus)(unsafe.Pointer(&in.VolumeMounts))
out.User = (*v1.ContainerUser)(unsafe.Pointer(in.User))
out.AllocatedResourcesStatus = *(*[]v1.ResourceStatus)(unsafe.Pointer(&in.AllocatedResourcesStatus))
return nil
}
@ -3403,6 +3435,26 @@ func Convert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerStatus
return autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in, out, s)
}
func autoConvert_v1_ContainerUser_To_core_ContainerUser(in *v1.ContainerUser, out *core.ContainerUser, s conversion.Scope) error {
out.Linux = (*core.LinuxContainerUser)(unsafe.Pointer(in.Linux))
return nil
}
// Convert_v1_ContainerUser_To_core_ContainerUser is an autogenerated conversion function.
func Convert_v1_ContainerUser_To_core_ContainerUser(in *v1.ContainerUser, out *core.ContainerUser, s conversion.Scope) error {
return autoConvert_v1_ContainerUser_To_core_ContainerUser(in, out, s)
}
func autoConvert_core_ContainerUser_To_v1_ContainerUser(in *core.ContainerUser, out *v1.ContainerUser, s conversion.Scope) error {
out.Linux = (*v1.LinuxContainerUser)(unsafe.Pointer(in.Linux))
return nil
}
// Convert_core_ContainerUser_To_v1_ContainerUser is an autogenerated conversion function.
func Convert_core_ContainerUser_To_v1_ContainerUser(in *core.ContainerUser, out *v1.ContainerUser, s conversion.Scope) error {
return autoConvert_core_ContainerUser_To_v1_ContainerUser(in, out, s)
}
func autoConvert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in *v1.DaemonEndpoint, out *core.DaemonEndpoint, s conversion.Scope) error {
out.Port = in.Port
return nil
@ -4387,6 +4439,28 @@ func Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *core.ISCSIVolume
return autoConvert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s)
}
func autoConvert_v1_ImageVolumeSource_To_core_ImageVolumeSource(in *v1.ImageVolumeSource, out *core.ImageVolumeSource, s conversion.Scope) error {
out.Reference = in.Reference
out.PullPolicy = core.PullPolicy(in.PullPolicy)
return nil
}
// Convert_v1_ImageVolumeSource_To_core_ImageVolumeSource is an autogenerated conversion function.
func Convert_v1_ImageVolumeSource_To_core_ImageVolumeSource(in *v1.ImageVolumeSource, out *core.ImageVolumeSource, s conversion.Scope) error {
return autoConvert_v1_ImageVolumeSource_To_core_ImageVolumeSource(in, out, s)
}
func autoConvert_core_ImageVolumeSource_To_v1_ImageVolumeSource(in *core.ImageVolumeSource, out *v1.ImageVolumeSource, s conversion.Scope) error {
out.Reference = in.Reference
out.PullPolicy = v1.PullPolicy(in.PullPolicy)
return nil
}
// Convert_core_ImageVolumeSource_To_v1_ImageVolumeSource is an autogenerated conversion function.
func Convert_core_ImageVolumeSource_To_v1_ImageVolumeSource(in *core.ImageVolumeSource, out *v1.ImageVolumeSource, s conversion.Scope) error {
return autoConvert_core_ImageVolumeSource_To_v1_ImageVolumeSource(in, out, s)
}
func autoConvert_v1_KeyToPath_To_core_KeyToPath(in *v1.KeyToPath, out *core.KeyToPath, s conversion.Scope) error {
out.Key = in.Key
out.Path = in.Path
@ -4557,6 +4631,30 @@ func Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in *core.LimitRangeSpec, o
return autoConvert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in, out, s)
}
func autoConvert_v1_LinuxContainerUser_To_core_LinuxContainerUser(in *v1.LinuxContainerUser, out *core.LinuxContainerUser, s conversion.Scope) error {
out.UID = in.UID
out.GID = in.GID
out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups))
return nil
}
// Convert_v1_LinuxContainerUser_To_core_LinuxContainerUser is an autogenerated conversion function.
func Convert_v1_LinuxContainerUser_To_core_LinuxContainerUser(in *v1.LinuxContainerUser, out *core.LinuxContainerUser, s conversion.Scope) error {
return autoConvert_v1_LinuxContainerUser_To_core_LinuxContainerUser(in, out, s)
}
func autoConvert_core_LinuxContainerUser_To_v1_LinuxContainerUser(in *core.LinuxContainerUser, out *v1.LinuxContainerUser, s conversion.Scope) error {
out.UID = in.UID
out.GID = in.GID
out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups))
return nil
}
// Convert_core_LinuxContainerUser_To_v1_LinuxContainerUser is an autogenerated conversion function.
func Convert_core_LinuxContainerUser_To_v1_LinuxContainerUser(in *core.LinuxContainerUser, out *v1.LinuxContainerUser, s conversion.Scope) error {
return autoConvert_core_LinuxContainerUser_To_v1_LinuxContainerUser(in, out, s)
}
func autoConvert_v1_List_To_core_List(in *v1.List, out *core.List, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
@ -4630,11 +4728,21 @@ func autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *v1.LoadBal
return nil
}
// Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus is an autogenerated conversion function.
func Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error {
return autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in, out, s)
}
func autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error {
out.Ingress = *(*[]v1.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress))
return nil
}
// Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus is an autogenerated conversion function.
func Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error {
return autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s)
}
func autoConvert_v1_LocalObjectReference_To_core_LocalObjectReference(in *v1.LocalObjectReference, out *core.LocalObjectReference, s conversion.Scope) error {
out.Name = in.Name
return nil
@ -5023,6 +5131,26 @@ func Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *core.NodeDae
return autoConvert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s)
}
func autoConvert_v1_NodeFeatures_To_core_NodeFeatures(in *v1.NodeFeatures, out *core.NodeFeatures, s conversion.Scope) error {
out.SupplementalGroupsPolicy = (*bool)(unsafe.Pointer(in.SupplementalGroupsPolicy))
return nil
}
// Convert_v1_NodeFeatures_To_core_NodeFeatures is an autogenerated conversion function.
func Convert_v1_NodeFeatures_To_core_NodeFeatures(in *v1.NodeFeatures, out *core.NodeFeatures, s conversion.Scope) error {
return autoConvert_v1_NodeFeatures_To_core_NodeFeatures(in, out, s)
}
func autoConvert_core_NodeFeatures_To_v1_NodeFeatures(in *core.NodeFeatures, out *v1.NodeFeatures, s conversion.Scope) error {
out.SupplementalGroupsPolicy = (*bool)(unsafe.Pointer(in.SupplementalGroupsPolicy))
return nil
}
// Convert_core_NodeFeatures_To_v1_NodeFeatures is an autogenerated conversion function.
func Convert_core_NodeFeatures_To_v1_NodeFeatures(in *core.NodeFeatures, out *v1.NodeFeatures, s conversion.Scope) error {
return autoConvert_core_NodeFeatures_To_v1_NodeFeatures(in, out, s)
}
func autoConvert_v1_NodeList_To_core_NodeList(in *v1.NodeList, out *core.NodeList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
if in.Items != nil {
@ -5127,6 +5255,7 @@ func Convert_core_NodeRuntimeHandler_To_v1_NodeRuntimeHandler(in *core.NodeRunti
func autoConvert_v1_NodeRuntimeHandlerFeatures_To_core_NodeRuntimeHandlerFeatures(in *v1.NodeRuntimeHandlerFeatures, out *core.NodeRuntimeHandlerFeatures, s conversion.Scope) error {
out.RecursiveReadOnlyMounts = (*bool)(unsafe.Pointer(in.RecursiveReadOnlyMounts))
out.UserNamespaces = (*bool)(unsafe.Pointer(in.UserNamespaces))
return nil
}
@ -5137,6 +5266,7 @@ func Convert_v1_NodeRuntimeHandlerFeatures_To_core_NodeRuntimeHandlerFeatures(in
func autoConvert_core_NodeRuntimeHandlerFeatures_To_v1_NodeRuntimeHandlerFeatures(in *core.NodeRuntimeHandlerFeatures, out *v1.NodeRuntimeHandlerFeatures, s conversion.Scope) error {
out.RecursiveReadOnlyMounts = (*bool)(unsafe.Pointer(in.RecursiveReadOnlyMounts))
out.UserNamespaces = (*bool)(unsafe.Pointer(in.UserNamespaces))
return nil
}
@ -5249,6 +5379,7 @@ func autoConvert_v1_NodeStatus_To_core_NodeStatus(in *v1.NodeStatus, out *core.N
out.VolumesAttached = *(*[]core.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached))
out.Config = (*core.NodeConfigStatus)(unsafe.Pointer(in.Config))
out.RuntimeHandlers = *(*[]core.NodeRuntimeHandler)(unsafe.Pointer(&in.RuntimeHandlers))
out.Features = (*core.NodeFeatures)(unsafe.Pointer(in.Features))
return nil
}
@ -5274,6 +5405,7 @@ func autoConvert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *v1.N
out.VolumesAttached = *(*[]v1.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached))
out.Config = (*v1.NodeConfigStatus)(unsafe.Pointer(in.Config))
out.RuntimeHandlers = *(*[]v1.NodeRuntimeHandler)(unsafe.Pointer(&in.RuntimeHandlers))
out.Features = (*v1.NodeFeatures)(unsafe.Pointer(in.Features))
return nil
}
@ -6413,9 +6545,8 @@ func Convert_core_PodReadinessGate_To_v1_PodReadinessGate(in *core.PodReadinessG
func autoConvert_v1_PodResourceClaim_To_core_PodResourceClaim(in *v1.PodResourceClaim, out *core.PodResourceClaim, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_v1_ClaimSource_To_core_ClaimSource(&in.Source, &out.Source, s); err != nil {
return err
}
out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName))
out.ResourceClaimTemplateName = (*string)(unsafe.Pointer(in.ResourceClaimTemplateName))
return nil
}
@ -6426,9 +6557,8 @@ func Convert_v1_PodResourceClaim_To_core_PodResourceClaim(in *v1.PodResourceClai
func autoConvert_core_PodResourceClaim_To_v1_PodResourceClaim(in *core.PodResourceClaim, out *v1.PodResourceClaim, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_core_ClaimSource_To_v1_ClaimSource(&in.Source, &out.Source, s); err != nil {
return err
}
out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName))
out.ResourceClaimTemplateName = (*string)(unsafe.Pointer(in.ResourceClaimTemplateName))
return nil
}
@ -6486,6 +6616,7 @@ func autoConvert_v1_PodSecurityContext_To_core_PodSecurityContext(in *v1.PodSecu
out.RunAsGroup = (*int64)(unsafe.Pointer(in.RunAsGroup))
out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot))
out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups))
out.SupplementalGroupsPolicy = (*core.SupplementalGroupsPolicy)(unsafe.Pointer(in.SupplementalGroupsPolicy))
out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup))
out.Sysctls = *(*[]core.Sysctl)(unsafe.Pointer(&in.Sysctls))
out.FSGroupChangePolicy = (*core.PodFSGroupChangePolicy)(unsafe.Pointer(in.FSGroupChangePolicy))
@ -6511,6 +6642,7 @@ func autoConvert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSe
out.RunAsGroup = (*int64)(unsafe.Pointer(in.RunAsGroup))
out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot))
out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups))
out.SupplementalGroupsPolicy = (*v1.SupplementalGroupsPolicy)(unsafe.Pointer(in.SupplementalGroupsPolicy))
out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup))
out.FSGroupChangePolicy = (*v1.PodFSGroupChangePolicy)(unsafe.Pointer(in.FSGroupChangePolicy))
out.Sysctls = *(*[]v1.Sysctl)(unsafe.Pointer(&in.Sysctls))
@ -7329,6 +7461,7 @@ func Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(
func autoConvert_v1_ResourceClaim_To_core_ResourceClaim(in *v1.ResourceClaim, out *core.ResourceClaim, s conversion.Scope) error {
out.Name = in.Name
out.Request = in.Request
return nil
}
@ -7339,6 +7472,7 @@ func Convert_v1_ResourceClaim_To_core_ResourceClaim(in *v1.ResourceClaim, out *c
func autoConvert_core_ResourceClaim_To_v1_ResourceClaim(in *core.ResourceClaim, out *v1.ResourceClaim, s conversion.Scope) error {
out.Name = in.Name
out.Request = in.Request
return nil
}
@ -7371,6 +7505,28 @@ func Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *core.Res
return autoConvert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in, out, s)
}
func autoConvert_v1_ResourceHealth_To_core_ResourceHealth(in *v1.ResourceHealth, out *core.ResourceHealth, s conversion.Scope) error {
out.ResourceID = core.ResourceID(in.ResourceID)
out.Health = core.ResourceHealthStatus(in.Health)
return nil
}
// Convert_v1_ResourceHealth_To_core_ResourceHealth is an autogenerated conversion function.
func Convert_v1_ResourceHealth_To_core_ResourceHealth(in *v1.ResourceHealth, out *core.ResourceHealth, s conversion.Scope) error {
return autoConvert_v1_ResourceHealth_To_core_ResourceHealth(in, out, s)
}
func autoConvert_core_ResourceHealth_To_v1_ResourceHealth(in *core.ResourceHealth, out *v1.ResourceHealth, s conversion.Scope) error {
out.ResourceID = v1.ResourceID(in.ResourceID)
out.Health = v1.ResourceHealthStatus(in.Health)
return nil
}
// Convert_core_ResourceHealth_To_v1_ResourceHealth is an autogenerated conversion function.
func Convert_core_ResourceHealth_To_v1_ResourceHealth(in *core.ResourceHealth, out *v1.ResourceHealth, s conversion.Scope) error {
return autoConvert_core_ResourceHealth_To_v1_ResourceHealth(in, out, s)
}
func autoConvert_v1_ResourceQuota_To_core_ResourceQuota(in *v1.ResourceQuota, out *core.ResourceQuota, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil {
@ -7495,6 +7651,28 @@ func Convert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.Resou
return autoConvert_core_ResourceRequirements_To_v1_ResourceRequirements(in, out, s)
}
func autoConvert_v1_ResourceStatus_To_core_ResourceStatus(in *v1.ResourceStatus, out *core.ResourceStatus, s conversion.Scope) error {
out.Name = core.ResourceName(in.Name)
out.Resources = *(*[]core.ResourceHealth)(unsafe.Pointer(&in.Resources))
return nil
}
// Convert_v1_ResourceStatus_To_core_ResourceStatus is an autogenerated conversion function.
func Convert_v1_ResourceStatus_To_core_ResourceStatus(in *v1.ResourceStatus, out *core.ResourceStatus, s conversion.Scope) error {
return autoConvert_v1_ResourceStatus_To_core_ResourceStatus(in, out, s)
}
func autoConvert_core_ResourceStatus_To_v1_ResourceStatus(in *core.ResourceStatus, out *v1.ResourceStatus, s conversion.Scope) error {
out.Name = v1.ResourceName(in.Name)
out.Resources = *(*[]v1.ResourceHealth)(unsafe.Pointer(&in.Resources))
return nil
}
// Convert_core_ResourceStatus_To_v1_ResourceStatus is an autogenerated conversion function.
func Convert_core_ResourceStatus_To_v1_ResourceStatus(in *core.ResourceStatus, out *v1.ResourceStatus, s conversion.Scope) error {
return autoConvert_core_ResourceStatus_To_v1_ResourceStatus(in, out, s)
}
func autoConvert_v1_SELinuxOptions_To_core_SELinuxOptions(in *v1.SELinuxOptions, out *core.SELinuxOptions, s conversion.Scope) error {
out.User = in.User
out.Role = in.Role
@ -8553,6 +8731,11 @@ func autoConvert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s con
return nil
}
// Convert_v1_Volume_To_core_Volume is an autogenerated conversion function.
func Convert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s conversion.Scope) error {
return autoConvert_v1_Volume_To_core_Volume(in, out, s)
}
func autoConvert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s conversion.Scope) error {
out.Name = in.Name
if err := Convert_core_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil {
@ -8561,6 +8744,11 @@ func autoConvert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s con
return nil
}
// Convert_core_Volume_To_v1_Volume is an autogenerated conversion function.
func Convert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s conversion.Scope) error {
return autoConvert_core_Volume_To_v1_Volume(in, out, s)
}
func autoConvert_v1_VolumeDevice_To_core_VolumeDevice(in *v1.VolumeDevice, out *core.VolumeDevice, s conversion.Scope) error {
out.Name = in.Name
out.DevicePath = in.DevicePath
@ -8765,6 +8953,7 @@ func autoConvert_v1_VolumeSource_To_core_VolumeSource(in *v1.VolumeSource, out *
out.StorageOS = (*core.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS))
out.CSI = (*core.CSIVolumeSource)(unsafe.Pointer(in.CSI))
out.Ephemeral = (*core.EphemeralVolumeSource)(unsafe.Pointer(in.Ephemeral))
out.Image = (*core.ImageVolumeSource)(unsafe.Pointer(in.Image))
return nil
}
@ -8811,6 +9000,7 @@ func autoConvert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out
out.StorageOS = (*v1.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS))
out.CSI = (*v1.CSIVolumeSource)(unsafe.Pointer(in.CSI))
out.Ephemeral = (*v1.EphemeralVolumeSource)(unsafe.Pointer(in.Ephemeral))
out.Image = (*v1.ImageVolumeSource)(unsafe.Pointer(in.Image))
return nil
}

View File

@ -137,16 +137,46 @@ func SetObjectDefaults_PersistentVolume(in *v1.PersistentVolume) {
SetDefaults_HostPathVolumeSource(in.Spec.PersistentVolumeSource.HostPath)
}
if in.Spec.PersistentVolumeSource.RBD != nil {
SetDefaults_RBDPersistentVolumeSource(in.Spec.PersistentVolumeSource.RBD)
if in.Spec.PersistentVolumeSource.RBD.RBDPool == "" {
in.Spec.PersistentVolumeSource.RBD.RBDPool = "rbd"
}
if in.Spec.PersistentVolumeSource.RBD.RadosUser == "" {
in.Spec.PersistentVolumeSource.RBD.RadosUser = "admin"
}
if in.Spec.PersistentVolumeSource.RBD.Keyring == "" {
in.Spec.PersistentVolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if in.Spec.PersistentVolumeSource.ISCSI != nil {
SetDefaults_ISCSIPersistentVolumeSource(in.Spec.PersistentVolumeSource.ISCSI)
if in.Spec.PersistentVolumeSource.ISCSI.ISCSIInterface == "" {
in.Spec.PersistentVolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if in.Spec.PersistentVolumeSource.AzureDisk != nil {
SetDefaults_AzureDiskVolumeSource(in.Spec.PersistentVolumeSource.AzureDisk)
if in.Spec.PersistentVolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite)
in.Spec.PersistentVolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if in.Spec.PersistentVolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
in.Spec.PersistentVolumeSource.AzureDisk.FSType = &ptrVar1
}
if in.Spec.PersistentVolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
in.Spec.PersistentVolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if in.Spec.PersistentVolumeSource.AzureDisk.Kind == nil {
ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk)
in.Spec.PersistentVolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if in.Spec.PersistentVolumeSource.ScaleIO != nil {
SetDefaults_ScaleIOPersistentVolumeSource(in.Spec.PersistentVolumeSource.ScaleIO)
if in.Spec.PersistentVolumeSource.ScaleIO.StorageMode == "" {
in.Spec.PersistentVolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if in.Spec.PersistentVolumeSource.ScaleIO.FSType == "" {
in.Spec.PersistentVolumeSource.ScaleIO.FSType = "xfs"
}
}
}
@ -186,10 +216,20 @@ func SetObjectDefaults_Pod(in *v1.Pod) {
SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI)
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
SetDefaults_RBDVolumeSource(a.VolumeSource.RBD)
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
@ -204,7 +244,22 @@ func SetObjectDefaults_Pod(in *v1.Pod) {
SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
@ -224,7 +279,12 @@ func SetObjectDefaults_Pod(in *v1.Pod) {
}
}
if a.VolumeSource.ScaleIO != nil {
SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
@ -511,10 +571,20 @@ func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) {
SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI)
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
SetDefaults_RBDVolumeSource(a.VolumeSource.RBD)
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
@ -529,7 +599,22 @@ func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) {
SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
@ -549,7 +634,12 @@ func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) {
}
}
if a.VolumeSource.ScaleIO != nil {
SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
@ -787,10 +877,20 @@ func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) {
SetDefaults_SecretVolumeSource(a.VolumeSource.Secret)
}
if a.VolumeSource.ISCSI != nil {
SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI)
if a.VolumeSource.ISCSI.ISCSIInterface == "" {
a.VolumeSource.ISCSI.ISCSIInterface = "default"
}
}
if a.VolumeSource.RBD != nil {
SetDefaults_RBDVolumeSource(a.VolumeSource.RBD)
if a.VolumeSource.RBD.RBDPool == "" {
a.VolumeSource.RBD.RBDPool = "rbd"
}
if a.VolumeSource.RBD.RadosUser == "" {
a.VolumeSource.RBD.RadosUser = "admin"
}
if a.VolumeSource.RBD.Keyring == "" {
a.VolumeSource.RBD.Keyring = "/etc/ceph/keyring"
}
}
if a.VolumeSource.DownwardAPI != nil {
SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI)
@ -805,7 +905,22 @@ func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) {
SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap)
}
if a.VolumeSource.AzureDisk != nil {
SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk)
if a.VolumeSource.AzureDisk.CachingMode == nil {
ptrVar1 := v1.AzureDataDiskCachingMode(v1.AzureDataDiskCachingReadWrite)
a.VolumeSource.AzureDisk.CachingMode = &ptrVar1
}
if a.VolumeSource.AzureDisk.FSType == nil {
var ptrVar1 string = "ext4"
a.VolumeSource.AzureDisk.FSType = &ptrVar1
}
if a.VolumeSource.AzureDisk.ReadOnly == nil {
var ptrVar1 bool = false
a.VolumeSource.AzureDisk.ReadOnly = &ptrVar1
}
if a.VolumeSource.AzureDisk.Kind == nil {
ptrVar1 := v1.AzureDataDiskKind(v1.AzureSharedBlobDisk)
a.VolumeSource.AzureDisk.Kind = &ptrVar1
}
}
if a.VolumeSource.Projected != nil {
SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected)
@ -825,7 +940,12 @@ func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) {
}
}
if a.VolumeSource.ScaleIO != nil {
SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
if a.VolumeSource.ScaleIO.StorageMode == "" {
a.VolumeSource.ScaleIO.StorageMode = "ThinProvisioned"
}
if a.VolumeSource.ScaleIO.FSType == "" {
a.VolumeSource.ScaleIO.FSType = "xfs"
}
}
if a.VolumeSource.Ephemeral != nil {
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {

View File

@ -732,6 +732,14 @@ func validateVolumeSource(source *core.VolumeSource, fldPath *field.Path, volNam
}
}
}
if opts.AllowImageVolumeSource && source.Image != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("image"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateImageVolumeSource(source.Image, fldPath.Child("image"), opts)...)
}
}
if numVolumes == 0 {
allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type"))
@ -1157,7 +1165,7 @@ func validateProjectionSources(projection *core.ProjectedVolumeSource, projectio
allErrs = append(allErrs, ValidateLocalNonReservedPath(source.ServiceAccountToken.Path, fldPath.Child("path"))...)
}
}
if projPath := srcPath.Child("clusterTrustBundlePEM"); source.ClusterTrustBundle != nil {
if projPath := srcPath.Child("clusterTrustBundle"); source.ClusterTrustBundle != nil {
numSources++
usingName := source.ClusterTrustBundle.Name != nil
@ -1221,7 +1229,7 @@ func validateProjectionSources(projection *core.ProjectedVolumeSource, projectio
}
}
if numSources > 1 {
allErrs = append(allErrs, field.Forbidden(srcPath, "may not specify more than 1 volume type"))
allErrs = append(allErrs, field.Forbidden(srcPath, "may not specify more than 1 volume type per source"))
}
}
return allErrs
@ -2391,7 +2399,9 @@ func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeCl
newPvcClone.Spec.Resources.Requests["storage"] = oldPvc.Spec.Resources.Requests["storage"] // +k8s:verify-mutation:reason=clone
}
// lets make sure volume attributes class name is same.
newPvcClone.Spec.VolumeAttributesClassName = oldPvcClone.Spec.VolumeAttributesClassName // +k8s:verify-mutation:reason=clone
if newPvc.Status.Phase == core.ClaimBound && newPvcClone.Spec.VolumeAttributesClassName != nil {
newPvcClone.Spec.VolumeAttributesClassName = oldPvcClone.Spec.VolumeAttributesClassName // +k8s:verify-mutation:reason=clone
}
oldSize := oldPvc.Spec.Resources.Requests["storage"]
newSize := newPvc.Spec.Resources.Requests["storage"]
@ -2481,10 +2491,10 @@ func validatePersistentVolumeClaimResourceKey(value string, fldPath *field.Path)
}
var resizeStatusSet = sets.New(core.PersistentVolumeClaimControllerResizeInProgress,
core.PersistentVolumeClaimControllerResizeFailed,
core.PersistentVolumeClaimControllerResizeInfeasible,
core.PersistentVolumeClaimNodeResizePending,
core.PersistentVolumeClaimNodeResizeInProgress,
core.PersistentVolumeClaimNodeResizeFailed)
core.PersistentVolumeClaimNodeResizeInfeasible)
// ValidatePersistentVolumeClaimStatusUpdate validates an update to status of a PersistentVolumeClaim
func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *core.PersistentVolumeClaim, validationOpts PersistentVolumeClaimSpecValidationOptions) field.ErrorList {
@ -2892,7 +2902,7 @@ func GetVolumeDeviceMap(devices []core.VolumeDevice) map[string]string {
return volDevices
}
func ValidateVolumeMounts(mounts []core.VolumeMount, voldevices map[string]string, volumes map[string]core.VolumeSource, container *core.Container, fldPath *field.Path) field.ErrorList {
func ValidateVolumeMounts(mounts []core.VolumeMount, voldevices map[string]string, volumes map[string]core.VolumeSource, container *core.Container, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
mountpoints := sets.New[string]()
@ -2920,6 +2930,18 @@ func ValidateVolumeMounts(mounts []core.VolumeMount, voldevices map[string]strin
allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must not already exist as a path in volumeDevices"))
}
// Disallow subPath/subPathExpr for image volumes
if opts.AllowImageVolumeSource {
if v, ok := volumes[mnt.Name]; ok && v.Image != nil {
if len(mnt.SubPath) != 0 {
allErrs = append(allErrs, field.Invalid(idxPath.Child("subPath"), mnt.SubPath, "not allowed in image volume sources"))
}
if len(mnt.SubPathExpr) != 0 {
allErrs = append(allErrs, field.Invalid(idxPath.Child("subPathExpr"), mnt.SubPathExpr, "not allowed in image volume sources"))
}
}
}
if len(mnt.SubPath) > 0 {
allErrs = append(allErrs, validateLocalDescendingPath(mnt.SubPath, fldPath.Child("subPath"))...)
}
@ -3017,44 +3039,29 @@ func validatePodResourceClaim(podMeta *metav1.ObjectMeta, claim core.PodResource
} else if podClaimNames.Has(claim.Name) {
allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), claim.Name))
} else {
nameErrs := ValidateDNS1123Label(claim.Name, fldPath.Child("name"))
if len(nameErrs) > 0 {
allErrs = append(allErrs, nameErrs...)
} else if podMeta != nil && claim.Source.ResourceClaimTemplateName != nil {
claimName := podMeta.Name + "-" + claim.Name
for _, detail := range ValidateResourceClaimName(claimName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), claimName, "final ResourceClaim name: "+detail))
}
}
allErrs = append(allErrs, ValidateDNS1123Label(claim.Name, fldPath.Child("name"))...)
podClaimNames.Insert(claim.Name)
}
allErrs = append(allErrs, validatePodResourceClaimSource(claim.Source, fldPath.Child("source"))...)
return allErrs
}
func validatePodResourceClaimSource(claimSource core.ClaimSource, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if claimSource.ResourceClaimName != nil && claimSource.ResourceClaimTemplateName != nil {
allErrs = append(allErrs, field.Invalid(fldPath, claimSource, "at most one of `resourceClaimName` or `resourceClaimTemplateName` may be specified"))
if claim.ResourceClaimName != nil && claim.ResourceClaimTemplateName != nil {
allErrs = append(allErrs, field.Invalid(fldPath, claim, "at most one of `resourceClaimName` or `resourceClaimTemplateName` may be specified"))
}
if claimSource.ResourceClaimName == nil && claimSource.ResourceClaimTemplateName == nil {
allErrs = append(allErrs, field.Invalid(fldPath, claimSource, "must specify one of: `resourceClaimName`, `resourceClaimTemplateName`"))
if claim.ResourceClaimName == nil && claim.ResourceClaimTemplateName == nil {
allErrs = append(allErrs, field.Invalid(fldPath, claim, "must specify one of: `resourceClaimName`, `resourceClaimTemplateName`"))
}
if claimSource.ResourceClaimName != nil {
for _, detail := range ValidateResourceClaimName(*claimSource.ResourceClaimName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceClaimName"), *claimSource.ResourceClaimName, detail))
if claim.ResourceClaimName != nil {
for _, detail := range ValidateResourceClaimName(*claim.ResourceClaimName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceClaimName"), *claim.ResourceClaimName, detail))
}
}
if claimSource.ResourceClaimTemplateName != nil {
for _, detail := range ValidateResourceClaimTemplateName(*claimSource.ResourceClaimTemplateName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceClaimTemplateName"), *claimSource.ResourceClaimTemplateName, detail))
if claim.ResourceClaimTemplateName != nil {
for _, detail := range ValidateResourceClaimTemplateName(*claim.ResourceClaimTemplateName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceClaimTemplateName"), *claim.ResourceClaimTemplateName, detail))
}
}
return allErrs
}
func validateLivenessProbe(probe *core.Probe, gracePeriod int64, fldPath *field.Path) field.ErrorList {
func validateLivenessProbe(probe *core.Probe, gracePeriod *int64, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if probe == nil {
@ -3067,7 +3074,7 @@ func validateLivenessProbe(probe *core.Probe, gracePeriod int64, fldPath *field.
return allErrs
}
func validateReadinessProbe(probe *core.Probe, gracePeriod int64, fldPath *field.Path) field.ErrorList {
func validateReadinessProbe(probe *core.Probe, gracePeriod *int64, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if probe == nil {
@ -3080,7 +3087,7 @@ func validateReadinessProbe(probe *core.Probe, gracePeriod int64, fldPath *field
return allErrs
}
func validateStartupProbe(probe *core.Probe, gracePeriod int64, fldPath *field.Path) field.ErrorList {
func validateStartupProbe(probe *core.Probe, gracePeriod *int64, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if probe == nil {
@ -3093,7 +3100,7 @@ func validateStartupProbe(probe *core.Probe, gracePeriod int64, fldPath *field.P
return allErrs
}
func validateProbe(probe *core.Probe, gracePeriod int64, fldPath *field.Path) field.ErrorList {
func validateProbe(probe *core.Probe, gracePeriod *int64, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if probe == nil {
@ -3155,10 +3162,13 @@ func handlerFromLifecycle(lh *core.LifecycleHandler) commonHandler {
}
}
func validateSleepAction(sleep *core.SleepAction, gracePeriod int64, fldPath *field.Path) field.ErrorList {
func validateSleepAction(sleep *core.SleepAction, gracePeriod *int64, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
if sleep.Seconds <= 0 || sleep.Seconds > gracePeriod {
invalidStr := fmt.Sprintf("must be greater than 0 and less than terminationGracePeriodSeconds (%d)", gracePeriod)
// We allow gracePeriod to be nil here because the pod in which this SleepAction
// is defined might have an invalid grace period defined, and we don't want to
// flag another error here when the real problem will already be flagged.
if gracePeriod != nil && sleep.Seconds <= 0 || sleep.Seconds > *gracePeriod {
invalidStr := fmt.Sprintf("must be greater than 0 and less than terminationGracePeriodSeconds (%d)", *gracePeriod)
allErrors = append(allErrors, field.Invalid(fldPath, sleep.Seconds, invalidStr))
}
return allErrors
@ -3272,7 +3282,7 @@ func validateTCPSocketAction(tcp *core.TCPSocketAction, fldPath *field.Path) fie
func validateGRPCAction(grpc *core.GRPCAction, fldPath *field.Path) field.ErrorList {
return ValidatePortNumOrName(intstr.FromInt32(grpc.Port), fldPath.Child("port"))
}
func validateHandler(handler commonHandler, gracePeriod int64, fldPath *field.Path) field.ErrorList {
func validateHandler(handler commonHandler, gracePeriod *int64, fldPath *field.Path) field.ErrorList {
numHandlers := 0
allErrors := field.ErrorList{}
if handler.Exec != nil {
@ -3321,7 +3331,7 @@ func validateHandler(handler commonHandler, gracePeriod int64, fldPath *field.Pa
return allErrors
}
func validateLifecycle(lifecycle *core.Lifecycle, gracePeriod int64, fldPath *field.Path) field.ErrorList {
func validateLifecycle(lifecycle *core.Lifecycle, gracePeriod *int64, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if lifecycle.PostStart != nil {
allErrs = append(allErrs, validateHandler(handlerFromLifecycle(lifecycle.PostStart), gracePeriod, fldPath.Child("postStart"))...)
@ -3472,7 +3482,7 @@ func validateFieldAllowList(value interface{}, allowedFields map[string]bool, er
}
// validateInitContainers is called by pod spec and template validation to validate the list of init containers
func validateInitContainers(containers []core.Container, regularContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.Set[string], gracePeriod int64, fldPath *field.Path, opts PodValidationOptions, podRestartPolicy *core.RestartPolicy, hostUsers bool) field.ErrorList {
func validateInitContainers(containers []core.Container, regularContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.Set[string], gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions, podRestartPolicy *core.RestartPolicy, hostUsers bool) field.ErrorList {
var allErrs field.ErrorList
allNames := sets.Set[string]{}
@ -3572,7 +3582,7 @@ func validateContainerCommon(ctr *core.Container, volumes map[string]core.Volume
allErrs = append(allErrs, validateContainerPorts(ctr.Ports, path.Child("ports"))...)
allErrs = append(allErrs, ValidateEnv(ctr.Env, path.Child("env"), opts)...)
allErrs = append(allErrs, ValidateEnvFrom(ctr.EnvFrom, path.Child("envFrom"), opts)...)
allErrs = append(allErrs, ValidateVolumeMounts(ctr.VolumeMounts, volDevices, volumes, ctr, path.Child("volumeMounts"))...)
allErrs = append(allErrs, ValidateVolumeMounts(ctr.VolumeMounts, volDevices, volumes, ctr, path.Child("volumeMounts"), opts)...)
allErrs = append(allErrs, ValidateVolumeDevices(ctr.VolumeDevices, volMounts, volumes, path.Child("volumeDevices"))...)
allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, path.Child("imagePullPolicy"))...)
allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, podClaimNames, path.Child("resources"), opts)...)
@ -3610,7 +3620,7 @@ func validateHostUsers(spec *core.PodSpec, fldPath *field.Path) field.ErrorList
}
// validateContainers is called by pod spec and template validation to validate the list of regular containers.
func validateContainers(containers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.Set[string], gracePeriod int64, fldPath *field.Path, opts PodValidationOptions, podRestartPolicy *core.RestartPolicy, hostUsers bool) field.ErrorList {
func validateContainers(containers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.Set[string], gracePeriod *int64, fldPath *field.Path, opts PodValidationOptions, podRestartPolicy *core.RestartPolicy, hostUsers bool) field.ErrorList {
allErrs := field.ErrorList{}
if len(containers) == 0 {
@ -4022,6 +4032,8 @@ type PodValidationOptions struct {
ResourceIsPod bool
// Allow relaxed validation of environment variable names
AllowRelaxedEnvironmentVariableValidation bool
// Allow the use of the ImageVolumeSource API.
AllowImageVolumeSource bool
}
// validatePodMetadataAndSpec tests if required fields in the pod.metadata and pod.spec are set,
@ -4160,11 +4172,10 @@ func validateHostIPs(pod *core.Pod) field.ErrorList {
func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
var gracePeriod int64
if spec.TerminationGracePeriodSeconds != nil {
// this could happen in tests
gracePeriod = *spec.TerminationGracePeriodSeconds
if spec.TerminationGracePeriodSeconds == nil {
allErrs = append(allErrs, field.Required(fldPath.Child("terminationGracePeriodSeconds"), ""))
}
gracePeriod := spec.TerminationGracePeriodSeconds
// The default for hostUsers is true, so a spec with no SecurityContext or no HostUsers field will be true.
// If the default ever changes, this condition will need to be changed.
@ -4318,6 +4329,9 @@ func validateWindows(spec *core.PodSpec, fldPath *field.Path) field.ErrorList {
if securityContext.SupplementalGroups != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("supplementalGroups"), "cannot be set for a windows pod"))
}
if securityContext.SupplementalGroupsPolicy != nil {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("securityContext").Child("supplementalGroupsPolicy"), "cannot be set for a windows pod"))
}
}
podshelper.VisitContainersWithPath(spec, fldPath, func(c *core.Container, cFldPath *field.Path) bool {
// validate container security context
@ -4781,9 +4795,6 @@ func ValidateAppArmorProfileFormat(profile string) error {
// validateAppArmorAnnotationsAndFieldsMatchOnCreate validates that AppArmor fields and annotations are consistent.
func validateAppArmorAnnotationsAndFieldsMatchOnCreate(objectMeta metav1.ObjectMeta, podSpec *core.PodSpec, specPath *field.Path) field.ErrorList {
if !utilfeature.DefaultFeatureGate.Enabled(features.AppArmorFields) {
return nil
}
if podSpec.OS != nil && podSpec.OS.Name == core.Windows {
// Skip consistency check for windows pods.
return nil
@ -4948,6 +4959,10 @@ func validatePodSpecSecurityContext(securityContext *core.PodSecurityContext, sp
allErrs = append(allErrs, validateSeccompProfileField(securityContext.SeccompProfile, fldPath.Child("seccompProfile"))...)
allErrs = append(allErrs, validateWindowsSecurityContextOptions(securityContext.WindowsOptions, fldPath.Child("windowsOptions"))...)
allErrs = append(allErrs, ValidateAppArmorProfileField(securityContext.AppArmorProfile, fldPath.Child("appArmorProfile"))...)
if securityContext.SupplementalGroupsPolicy != nil {
allErrs = append(allErrs, validateSupplementalGroupsPolicy(securityContext.SupplementalGroupsPolicy, fldPath.Child("supplementalGroupsPolicy"))...)
}
}
return allErrs
@ -5359,6 +5374,15 @@ func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions
allErrs = append(allErrs, newIPErrs...)
}
allErrs = append(allErrs, validateContainerStatusUsers(newPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), newPod.Spec.OS)...)
allErrs = append(allErrs, validateContainerStatusUsers(newPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), newPod.Spec.OS)...)
allErrs = append(allErrs, validateContainerStatusUsers(newPod.Status.EphemeralContainerStatuses, fldPath.Child("ephemeralContainerStatuses"), newPod.Spec.OS)...)
allErrs = append(allErrs, validateContainerStatusAllocatedResourcesStatus(newPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), newPod.Spec.Containers)...)
allErrs = append(allErrs, validateContainerStatusAllocatedResourcesStatus(newPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), newPod.Spec.InitContainers)...)
// ephemeral containers are not allowed to have resources allocated
allErrs = append(allErrs, validateContainerStatusNoAllocatedResourcesStatus(newPod.Status.EphemeralContainerStatuses, fldPath.Child("ephemeralContainerStatuses"))...)
return allErrs
}
@ -5910,7 +5934,7 @@ func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path
}
// Validates the given template and ensures that it is in accordance with the desired selector and replicas.
func ValidatePodTemplateSpecForRC(template, oldTemplate *core.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
func ValidatePodTemplateSpecForRC(template *core.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if template == nil {
allErrs = append(allErrs, field.Required(fldPath, ""))
@ -5924,14 +5948,6 @@ func ValidatePodTemplateSpecForRC(template, oldTemplate *core.PodTemplateSpec, s
}
}
allErrs = append(allErrs, ValidatePodTemplateSpec(template, fldPath, opts)...)
// get rid of apivalidation.ValidateReadOnlyPersistentDisks,stop passing oldTemplate to this function
var oldVols []core.Volume
if oldTemplate != nil {
oldVols = oldTemplate.Spec.Volumes // +k8s:verify-mutation:reason=clone
}
if replicas > 1 {
allErrs = append(allErrs, ValidateReadOnlyPersistentDisks(template.Spec.Volumes, oldVols, fldPath.Child("spec", "volumes"))...)
}
// RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec().
if template.Spec.RestartPolicy != core.RestartPolicyAlways {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []core.RestartPolicy{core.RestartPolicyAlways}))
@ -5949,12 +5965,7 @@ func ValidateReplicationControllerSpec(spec, oldSpec *core.ReplicationController
allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...)
allErrs = append(allErrs, ValidateNonEmptySelector(spec.Selector, fldPath.Child("selector"))...)
allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...)
// oldSpec is not empty, pass oldSpec.template.
var oldTemplate *core.PodTemplateSpec
if oldSpec != nil {
oldTemplate = oldSpec.Template // +k8s:verify-mutation:reason=clone
}
allErrs = append(allErrs, ValidatePodTemplateSpecForRC(spec.Template, oldTemplate, spec.Selector, spec.Replicas, fldPath.Child("template"), opts)...)
allErrs = append(allErrs, ValidatePodTemplateSpecForRC(spec.Template, spec.Selector, spec.Replicas, fldPath.Child("template"), opts)...)
return allErrs
}
@ -5975,33 +5986,6 @@ func ValidatePodTemplateSpec(spec *core.PodTemplateSpec, fldPath *field.Path, op
return allErrs
}
// ValidateReadOnlyPersistentDisks stick this AFTER the short-circuit checks
func ValidateReadOnlyPersistentDisks(volumes, oldVolumes []core.Volume, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if utilfeature.DefaultFeatureGate.Enabled(features.SkipReadOnlyValidationGCE) {
return field.ErrorList{}
}
isWriteablePD := func(vol *core.Volume) bool {
return vol.GCEPersistentDisk != nil && !vol.GCEPersistentDisk.ReadOnly
}
for i := range oldVolumes {
if isWriteablePD(&oldVolumes[i]) {
return field.ErrorList{}
}
}
for i := range volumes {
idxPath := fldPath.Index(i)
if isWriteablePD(&volumes[i]) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("gcePersistentDisk", "readOnly"), false, "must be true for replicated pods > 1; GCE PD can only be mounted on multiple machines if it is read-only"))
}
}
return allErrs
}
// ValidateTaintsInNodeAnnotations tests that the serialized taints in Node.Annotations has valid data
func ValidateTaintsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
@ -6777,9 +6761,35 @@ func validateResourceClaimNames(claims []core.ResourceClaim, podClaimNames sets.
allErrs = append(allErrs, field.Required(fldPath.Index(i), ""))
} else {
if names.Has(name) {
// All requests of that claim already referenced.
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), name))
} else {
names.Insert(name)
key := name
if claim.Request != "" {
allErrs = append(allErrs, ValidateDNS1123Label(claim.Request, fldPath.Index(i).Child("request"))...)
key += "/" + claim.Request
}
if names.Has(key) {
// The exact same entry was already referenced.
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), key))
} else if claim.Request == "" {
// When referencing a claim, there's an
// overlap when previously some request
// in the claim was referenced. This
// cannot be checked with a map lookup,
// we need to iterate.
for key := range names {
index := strings.Index(key, "/")
if index < 0 {
continue
}
if key[0:index] == name {
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), name))
}
}
}
names.Insert(key)
}
if !podClaimNames.Has(name) {
// field.NotFound doesn't accept an
@ -6868,7 +6878,7 @@ func validateScopedResourceSelectorRequirement(resourceQuotaSpec *core.ResourceQ
case core.ResourceQuotaScopeBestEffort, core.ResourceQuotaScopeNotBestEffort, core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating, core.ResourceQuotaScopeCrossNamespacePodAffinity:
if req.Operator != core.ScopeSelectorOpExists {
allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), req.Operator,
"must be 'Exist' when scope is any of ResourceQuotaScopeTerminating, ResourceQuotaScopeNotTerminating, ResourceQuotaScopeBestEffort, ResourceQuotaScopeNotBestEffort or ResourceQuotaScopeCrossNamespacePodAffinity"))
"must be 'Exists' when scope is any of ResourceQuotaScopeTerminating, ResourceQuotaScopeNotTerminating, ResourceQuotaScopeBestEffort, ResourceQuotaScopeNotBestEffort or ResourceQuotaScopeCrossNamespacePodAffinity"))
}
}
@ -8161,3 +8171,144 @@ func validateNodeSelectorTermHasOnlyAdditions(newTerm, oldTerm core.NodeSelector
}
return true
}
var validSupplementalGroupsPolicies = sets.New(core.SupplementalGroupsPolicyMerge, core.SupplementalGroupsPolicyStrict)
func validateSupplementalGroupsPolicy(supplementalGroupsPolicy *core.SupplementalGroupsPolicy, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
if !validSupplementalGroupsPolicies.Has(*supplementalGroupsPolicy) {
allErrors = append(allErrors, field.NotSupported(fldPath, supplementalGroupsPolicy, sets.List(validSupplementalGroupsPolicies)))
}
return allErrors
}
func validateContainerStatusUsers(containerStatuses []core.ContainerStatus, fldPath *field.Path, podOS *core.PodOS) field.ErrorList {
allErrors := field.ErrorList{}
osName := core.Linux
if podOS != nil {
osName = podOS.Name
}
for i, containerStatus := range containerStatuses {
if containerStatus.User == nil {
continue
}
containerUser := containerStatus.User
switch osName {
case core.Windows:
if containerUser.Linux != nil {
allErrors = append(allErrors, field.Forbidden(fldPath.Index(i).Child("linux"), "cannot be set for a windows pod"))
}
case core.Linux:
allErrors = append(allErrors, validateLinuxContainerUser(containerUser.Linux, fldPath.Index(i).Child("linux"))...)
}
}
return allErrors
}
func validateContainerStatusNoAllocatedResourcesStatus(containerStatuses []core.ContainerStatus, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
for i, containerStatus := range containerStatuses {
if len(containerStatus.AllocatedResourcesStatus) == 0 {
continue
}
allErrors = append(allErrors, field.Forbidden(fldPath.Index(i).Child("allocatedResourcesStatus"), "must not be specified in container status"))
}
return allErrors
}
// validateContainerStatusAllocatedResourcesStatus iterate the allocated resources health and validate:
// - resourceName matches one of resources in container's resource requirements
// - resourceID is not empty and unique
func validateContainerStatusAllocatedResourcesStatus(containerStatuses []core.ContainerStatus, fldPath *field.Path, containers []core.Container) field.ErrorList {
allErrors := field.ErrorList{}
for i, containerStatus := range containerStatuses {
if containerStatus.AllocatedResourcesStatus == nil {
continue
}
allocatedResources := containerStatus.AllocatedResourcesStatus
for j, allocatedResource := range allocatedResources {
var container core.Container
containerFound := false
// get container by name
for _, c := range containers {
if c.Name == containerStatus.Name {
containerFound = true
container = c
break
}
}
// ignore missing container, see https://github.com/kubernetes/kubernetes/issues/124915
if containerFound {
found := false
// get container resources from the spec
containerResources := container.Resources
for resourceName := range containerResources.Requests {
if resourceName == allocatedResource.Name {
found = true
break
}
}
if !found {
allErrors = append(allErrors, field.Invalid(fldPath.Index(i).Child("allocatedResourcesStatus").Index(j).Child("name"), allocatedResource.Name, "must match one of the container's resource requirements"))
}
}
uniqueResources := sets.New[core.ResourceID]()
// check resource IDs are unique
for k, r := range allocatedResource.Resources {
var supportedResourceHealthValues = sets.New(
core.ResourceHealthStatusHealthy,
core.ResourceHealthStatusUnhealthy,
core.ResourceHealthStatusUnknown)
if !supportedResourceHealthValues.Has(r.Health) {
allErrors = append(allErrors, field.NotSupported(fldPath.Index(i).Child("allocatedResourcesStatus").Index(j).Child("resources").Index(k).Child("health"), r.Health, sets.List(supportedResourceHealthValues)))
}
if uniqueResources.Has(r.ResourceID) {
allErrors = append(allErrors, field.Duplicate(fldPath.Index(i).Child("allocatedResourcesStatus").Index(j).Child("resources").Index(k).Child("resourceID"), r.ResourceID))
} else {
uniqueResources.Insert(r.ResourceID)
}
}
}
}
return allErrors
}
func validateLinuxContainerUser(linuxContainerUser *core.LinuxContainerUser, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
if linuxContainerUser == nil {
return allErrors
}
for _, msg := range validation.IsValidUserID(linuxContainerUser.UID) {
allErrors = append(allErrors, field.Invalid(fldPath.Child("uid"), linuxContainerUser.UID, msg))
}
for _, msg := range validation.IsValidGroupID(linuxContainerUser.GID) {
allErrors = append(allErrors, field.Invalid(fldPath.Child("gid"), linuxContainerUser.GID, msg))
}
for g, gid := range linuxContainerUser.SupplementalGroups {
for _, msg := range validation.IsValidGroupID(gid) {
allErrors = append(allErrors, field.Invalid(fldPath.Child("supplementalGroups").Index(g), gid, msg))
}
}
return allErrors
}
func validateImageVolumeSource(imageVolume *core.ImageVolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
if opts.ResourceIsPod && len(imageVolume.Reference) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("reference"), ""))
}
allErrs = append(allErrs, validatePullPolicy(imageVolume.PullPolicy, fldPath.Child("pullPolicy"))...)
return allErrs
}

View File

@ -440,32 +440,6 @@ func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClaimSource) DeepCopyInto(out *ClaimSource) {
*out = *in
if in.ResourceClaimName != nil {
in, out := &in.ResourceClaimName, &out.ResourceClaimName
*out = new(string)
**out = **in
}
if in.ResourceClaimTemplateName != nil {
in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimSource.
func (in *ClaimSource) DeepCopy() *ClaimSource {
if in == nil {
return nil
}
out := new(ClaimSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) {
*out = *in
@ -1069,6 +1043,18 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.User != nil {
in, out := &in.User, &out.User
*out = new(ContainerUser)
(*in).DeepCopyInto(*out)
}
if in.AllocatedResourcesStatus != nil {
in, out := &in.AllocatedResourcesStatus, &out.AllocatedResourcesStatus
*out = make([]ResourceStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
@ -1082,6 +1068,27 @@ func (in *ContainerStatus) DeepCopy() *ContainerStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerUser) DeepCopyInto(out *ContainerUser) {
*out = *in
if in.Linux != nil {
in, out := &in.Linux, &out.Linux
*out = new(LinuxContainerUser)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerUser.
func (in *ContainerUser) DeepCopy() *ContainerUser {
if in == nil {
return nil
}
out := new(ContainerUser)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) {
*out = *in
@ -2044,6 +2051,22 @@ func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageVolumeSource) DeepCopyInto(out *ImageVolumeSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageVolumeSource.
func (in *ImageVolumeSource) DeepCopy() *ImageVolumeSource {
if in == nil {
return nil
}
out := new(ImageVolumeSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KeyToPath) DeepCopyInto(out *KeyToPath) {
*out = *in
@ -2261,6 +2284,27 @@ func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LinuxContainerUser) DeepCopyInto(out *LinuxContainerUser) {
*out = *in
if in.SupplementalGroups != nil {
in, out := &in.SupplementalGroups, &out.SupplementalGroups
*out = make([]int64, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxContainerUser.
func (in *LinuxContainerUser) DeepCopy() *LinuxContainerUser {
if in == nil {
return nil
}
out := new(LinuxContainerUser)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *List) DeepCopyInto(out *List) {
*out = *in
@ -2697,6 +2741,27 @@ func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeFeatures) DeepCopyInto(out *NodeFeatures) {
*out = *in
if in.SupplementalGroupsPolicy != nil {
in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFeatures.
func (in *NodeFeatures) DeepCopy() *NodeFeatures {
if in == nil {
return nil
}
out := new(NodeFeatures)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeList) DeepCopyInto(out *NodeList) {
*out = *in
@ -2784,6 +2849,11 @@ func (in *NodeRuntimeHandlerFeatures) DeepCopyInto(out *NodeRuntimeHandlerFeatur
*out = new(bool)
**out = **in
}
if in.UserNamespaces != nil {
in, out := &in.UserNamespaces, &out.UserNamespaces
*out = new(bool)
**out = **in
}
return
}
@ -2964,6 +3034,11 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Features != nil {
in, out := &in.Features, &out.Features
*out = new(NodeFeatures)
(*in).DeepCopyInto(*out)
}
return
}
@ -3973,7 +4048,16 @@ func (in *PodReadinessGate) DeepCopy() *PodReadinessGate {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodResourceClaim) DeepCopyInto(out *PodResourceClaim) {
*out = *in
in.Source.DeepCopyInto(&out.Source)
if in.ResourceClaimName != nil {
in, out := &in.ResourceClaimName, &out.ResourceClaimName
*out = new(string)
**out = **in
}
if in.ResourceClaimTemplateName != nil {
in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName
*out = new(string)
**out = **in
}
return
}
@ -4067,6 +4151,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
*out = make([]int64, len(*in))
copy(*out, *in)
}
if in.SupplementalGroupsPolicy != nil {
in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy
*out = new(SupplementalGroupsPolicy)
**out = **in
}
if in.FSGroup != nil {
in, out := &in.FSGroup, &out.FSGroup
*out = new(int64)
@ -4897,6 +4986,22 @@ func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceHealth) DeepCopyInto(out *ResourceHealth) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHealth.
func (in *ResourceHealth) DeepCopy() *ResourceHealth {
if in == nil {
return nil
}
out := new(ResourceHealth)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ResourceList) DeepCopyInto(out *ResourceList) {
{
@ -5078,6 +5183,27 @@ func (in *ResourceRequirements) DeepCopy() *ResourceRequirements {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) {
*out = *in
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = make([]ResourceHealth, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus.
func (in *ResourceStatus) DeepCopy() *ResourceStatus {
if in == nil {
return nil
}
out := new(ResourceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) {
*out = *in
@ -6411,6 +6537,11 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) {
*out = new(EphemeralVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.Image != nil {
in, out := &in.Image, &out.Image
*out = new(ImageVolumeSource)
**out = **in
}
return
}

View File

@ -45,6 +45,13 @@ const (
// Enable usage of Provision of PVCs from snapshots in other namespaces
CrossNamespaceVolumeDataSource featuregate.Feature = "CrossNamespaceVolumeDataSource"
// owner: @aojea
// Deprecated: v1.31
//
// Allow kubelet to request a certificate without any Node IP available, only
// with DNS names.
AllowDNSOnlyNodeCSR featuregate.Feature = "AllowDNSOnlyNodeCSR"
// owner: @thockin
// deprecated: v1.29
//
@ -61,12 +68,22 @@ const (
// owner: @tallclair
// beta: v1.4
// GA: v1.31
AppArmor featuregate.Feature = "AppArmor"
// owner: @tallclair
// beta: v1.30
// GA: v1.31
AppArmorFields featuregate.Feature = "AppArmorFields"
// owner: @liggitt
// kep:
// alpha: v1.31
//
// Make the Node authorizer use fine-grained selector authorization.
// Requires AuthorizeWithSelectors to be enabled.
AuthorizeNodeWithSelectors featuregate.Feature = "AuthorizeNodeWithSelectors"
// owner: @danwinship
// alpha: v1.27
// beta: v1.29
@ -135,34 +152,22 @@ const (
// Allow the usage of options to fine-tune the cpumanager policies.
CPUManagerPolicyOptions featuregate.Feature = "CPUManagerPolicyOptions"
// owner: @mfordjody
// alpha: v1.26
// owner: @jefftree
// kep: https://kep.k8s.io/4355
// alpha: v1.31
//
// Bypasses obsolete validation that GCP volumes are read-only when used in
// Deployments.
SkipReadOnlyValidationGCE featuregate.Feature = "SkipReadOnlyValidationGCE"
// Enables coordinated leader election in the API server
CoordinatedLeaderElection featuregate.Feature = "CoordinatedLeaderElection"
// owner: @trierra
// kep: http://kep.k8s.io/2589
// alpha: v1.23
// beta: v1.25 (off by default)
// beta: v1.31 (on by default)
//
// Enables the Portworx in-tree driver to Portworx migration feature.
CSIMigrationPortworx featuregate.Feature = "CSIMigrationPortworx"
// owner: @humblec
// alpha: v1.23
// deprecated: v1.28
//
// Enables the RBD in-tree driver to RBD CSI Driver migration feature.
CSIMigrationRBD featuregate.Feature = "CSIMigrationRBD"
// owner: @humblec, @zhucan
// kep: https://kep.k8s.io/3171
// alpha: v1.25
// beta: v1.27
// GA: v1.29
// Enables SecretRef field in CSI NodeExpandVolume request.
CSINodeExpandSecret featuregate.Feature = "CSINodeExpandSecret"
// owner: @fengzixu
// alpha: v1.21
//
@ -183,37 +188,32 @@ const (
// Enables container Checkpoint support in the kubelet
ContainerCheckpoint featuregate.Feature = "ContainerCheckpoint"
// owner: @bhcleek @wzshiming
// GA: v1.25
//
// Normalize HttpGet URL and Header passing for lifecycle handlers with probers.
ConsistentHTTPGetHandlers featuregate.Feature = "ConsistentHTTPGetHandlers"
// owner: @helayoty
// beta: v1.28
// Set the scheduled time as an annotation in the job.
CronJobsScheduledAnnotation featuregate.Feature = "CronJobsScheduledAnnotation"
// owner: @thockin
// deprecated: v1.28
//
// Changes when the default value of PodSpec.containers[].ports[].hostPort
// is assigned. The default is to only set a default value in Pods.
// Enabling this means a default will be assigned even to embeddedPodSpecs
// (e.g. in a Deployment), which is the historical default.
DefaultHostNetworkHostPortsInPodTemplates featuregate.Feature = "DefaultHostNetworkHostPortsInPodTemplates"
// owner: @elezar
// kep: http://kep.k8s.io/4009
// alpha: v1.28
// beta: v1.29
// GA: v1.31
//
// Add support for CDI Device IDs in the Device Plugin API.
DevicePluginCDIDevices featuregate.Feature = "DevicePluginCDIDevices"
// owner: @aojea
// alpha: v1.31
//
// The apiservers with the MultiCIDRServiceAllocator feature enable, in order to support live migration from the old bitmap ClusterIP
// allocators to the new IPAddress allocators introduced by the MultiCIDRServiceAllocator feature, performs a dual-write on
// both allocators. This feature gate disables the dual write on the new Cluster IP allocators.
DisableAllocatorDualWrite featuregate.Feature = "DisableAllocatorDualWrite"
// owner: @andrewsykim
// alpha: v1.22
// beta: v1.29
// GA: v1.31
//
// Disable any functionality in kube-apiserver, kube-controller-manager and kubelet related to the `--cloud-provider` component flag.
DisableCloudProviders featuregate.Feature = "DisableCloudProviders"
@ -225,9 +225,18 @@ const (
// Disable in-tree functionality in kubelet to authenticate to cloud provider container registries for image pull credentials.
DisableKubeletCloudCredentialProviders featuregate.Feature = "DisableKubeletCloudCredentialProviders"
// owner: @micahhausler
// Deprecated: v1.31
//
// Setting AllowInsecureKubeletCertificateSigningRequests to true disables node admission validation of CSRs
// for kubelet signers where CN=system:node:$nodeName.
// Remove in v1.33
AllowInsecureKubeletCertificateSigningRequests featuregate.Feature = "AllowInsecureKubeletCertificateSigningRequests"
// owner: @HirazawaUi
// kep: http://kep.k8s.io/4004
// alpha: v1.29
// beta: v1.31
// DisableNodeKubeProxyVersion disable the status.nodeInfo.kubeProxyVersion field of v1.Node
DisableNodeKubeProxyVersion featuregate.Feature = "DisableNodeKubeProxyVersion"
@ -236,7 +245,17 @@ const (
// alpha: v1.26
//
// Enables support for resources with custom parameters and a lifecycle
// that is independent of a Pod.
// that is independent of a Pod. Resource allocation is done by a DRA driver's
// "control plane controller" in cooperation with the scheduler.
DRAControlPlaneController featuregate.Feature = "DRAControlPlaneController"
// owner: @pohly
// kep: http://kep.k8s.io/4381
// alpha: v1.29
//
// Enables support for resources with custom parameters and a lifecycle
// that is independent of a Pod. Resource allocation is done by the scheduler
// based on "structured parameters".
DynamicResourceAllocation featuregate.Feature = "DynamicResourceAllocation"
// owner: @harche
@ -290,62 +309,20 @@ const (
HPAScaleToZero featuregate.Feature = "HPAScaleToZero"
// owner: @deepakkinni @xing-yang
// kep: https://kep.k8s.io/2680
// kep: https://kep.k8s.io/2644
// alpha: v1.23
// beta: v1.31
//
// Honor Persistent Volume Reclaim Policy when it is "Delete" irrespective of PV-PVC
// deletion ordering.
HonorPVReclaimPolicy featuregate.Feature = "HonorPVReclaimPolicy"
// owner: @leakingtapan
// alpha: v1.21
//
// Disables the AWS EBS in-tree driver.
InTreePluginAWSUnregister featuregate.Feature = "InTreePluginAWSUnregister"
// owner: @andyzhangx
// alpha: v1.21
//
// Disables the Azure Disk in-tree driver.
InTreePluginAzureDiskUnregister featuregate.Feature = "InTreePluginAzureDiskUnregister"
// owner: @andyzhangx
// alpha: v1.21
//
// Disables the Azure File in-tree driver.
InTreePluginAzureFileUnregister featuregate.Feature = "InTreePluginAzureFileUnregister"
// owner: @Jiawei0227
// alpha: v1.21
//
// Disables the GCE PD in-tree driver.
InTreePluginGCEUnregister featuregate.Feature = "InTreePluginGCEUnregister"
// owner: @adisky
// alpha: v1.21
//
// Disables the OpenStack Cinder in-tree driver.
InTreePluginOpenStackUnregister featuregate.Feature = "InTreePluginOpenStackUnregister"
// owner: @trierra
// alpha: v1.23
//
// Disables the Portworx in-tree driver.
InTreePluginPortworxUnregister featuregate.Feature = "InTreePluginPortworxUnregister"
// owner: @humblec
// alpha: v1.23
// deprecated: v1.28
//
// Disables the RBD in-tree driver.
InTreePluginRBDUnregister featuregate.Feature = "InTreePluginRBDUnregister"
// owner: @divyenpatel
// alpha: v1.21
//
// Disables the vSphere in-tree driver.
InTreePluginvSphereUnregister featuregate.Feature = "InTreePluginvSphereUnregister"
// owner: @mimowo
// kep: https://kep.k8s.io/3850
// alpha: v1.28
@ -365,6 +342,7 @@ const (
// kep: https://kep.k8s.io/3329
// alpha: v1.25
// beta: v1.26
// stable: v1.31
//
// Allow users to specify handling of pod failures based on container exit codes
// and pod conditions.
@ -382,21 +360,16 @@ const (
// owner: @tenzen-y
// kep: https://kep.k8s.io/3998
// alpha: v1.30
// beta: v1.31
//
// Allow users to specify when a Job can be declared as succeeded
// based on the set of succeeded pods.
JobSuccessPolicy featuregate.Feature = "JobSuccessPolicy"
// owner: @alculquicondor
// alpha: v1.23
// beta: v1.24
//
// Track the number of pods with Ready condition in the Job status.
JobReadyPods featuregate.Feature = "JobReadyPods"
// owner: @marquiz
// kep: http://kep.k8s.io/4033
// alpha: v1.28
// beta: v1.31
//
// Enable detection of the kubelet cgroup driver configuration option from
// the CRI. The CRI runtime also needs to support this feature in which
@ -425,10 +398,14 @@ const (
// Enable POD resources API with Get method
KubeletPodResourcesGet featuregate.Feature = "KubeletPodResourcesGet"
// KubeletSeparateDiskGC enables Kubelet to garbage collection images/containers on different filesystems
// owner: @kannon92
// kep: https://kep.k8s.io/4191
// alpha: v1.29
// beta: v1.31
//
// The split image filesystem feature enables kubelet to perform garbage collection
// of images (read-only layers) and/or containers (writeable layers) deployed on
// separate filesystems.
KubeletSeparateDiskGC featuregate.Feature = "KubeletSeparateDiskGC"
// owner: @sallyom
@ -443,6 +420,7 @@ const (
// kep: http://kep.k8s.io/3836
// alpha: v1.28
// beta: v1.30
// stable: v1.31
//
// Implement connection draining for terminating nodes for
// `externalTrafficPolicy: Cluster` services.
@ -459,14 +437,17 @@ const (
// owner: @RobertKrawitz
// alpha: v1.15
// beta: v1.31
//
// Allow use of filesystems for ephemeral storage monitoring.
// Only applies if LocalStorageCapacityIsolation is set.
// Relies on UserNamespacesSupport feature, and thus should follow it when setting defaults.
LocalStorageCapacityIsolationFSQuotaMonitoring featuregate.Feature = "LocalStorageCapacityIsolationFSQuotaMonitoring"
// owner: @damemi
// alpha: v1.21
// beta: v1.22
// GA: v1.31
//
// Enables scaling down replicas via logarithmic comparison of creation/ready timestamps
LogarithmicScaleDown featuregate.Feature = "LogarithmicScaleDown"
@ -474,6 +455,7 @@ const (
// owner: @sanposhiho
// kep: https://kep.k8s.io/3633
// alpha: v1.29
// beta: v1.30
//
// Enables the MatchLabelKeys and MismatchLabelKeys in PodAffinity and PodAntiAffinity.
MatchLabelKeysInPodAffinity featuregate.Feature = "MatchLabelKeysInPodAffinity"
@ -517,6 +499,7 @@ const (
// owner: @aojea
// kep: https://kep.k8s.io/1880
// alpha: v1.27
// beta: v1.31
//
// Enables the dynamic configuration of Service IP ranges
MultiCIDRServiceAllocator featuregate.Feature = "MultiCIDRServiceAllocator"
@ -532,6 +515,7 @@ const (
// owner: @danwinship
// kep: https://kep.k8s.io/3866
// alpha: v1.29
// beta: v1.31
//
// Allows running kube-proxy with `--mode nftables`.
NFTablesProxyMode featuregate.Feature = "NFTablesProxyMode"
@ -566,6 +550,7 @@ const (
// kep: http://kep.k8s.io/3018
// alpha: v1.26
// beta: v1.27
// GA: v1.31
//
// Enables PDBUnhealthyPodEvictionPolicy for PodDisruptionBudgets
PDBUnhealthyPodEvictionPolicy featuregate.Feature = "PDBUnhealthyPodEvictionPolicy"
@ -574,6 +559,7 @@ const (
// kep: https://kep.k8s.io/3762
// alpha: v1.28
// beta: v1.29
// GA: v1.31
//
// Adds a new field to persistent volumes which holds a timestamp of when the volume last transitioned its phase.
PersistentVolumeLastPhaseTransitionTime featuregate.Feature = "PersistentVolumeLastPhaseTransitionTime"
@ -597,6 +583,7 @@ const (
// kep: https://kep.k8s.io/3329
// alpha: v1.25
// beta: v1.26
// stable: v1.31
//
// Enables support for appending a dedicated pod condition indicating that
// the pod is being deleted due to a disruption.
@ -646,12 +633,14 @@ const (
// owner: @seans3
// kep: http://kep.k8s.io/4006
// alpha: v1.30
// beta: v1.31
//
// Enables PortForward to be proxied with a websocket client
PortForwardWebsockets featuregate.Feature = "PortForwardWebsockets"
// owner: @jessfraz
// alpha: v1.12
// beta: v1.31
//
// Enables control over ProcMountType for containers.
ProcMountType featuregate.Feature = "ProcMountType"
@ -663,15 +652,6 @@ const (
// bursting into resources requested at higher QoS levels (memory only for now)
QOSReserved featuregate.Feature = "QOSReserved"
// owner: @chrishenzie
// kep: https://kep.k8s.io/2485
// alpha: v1.22
// beta: v1.27
// GA: v1.29
//
// Enables usage of the ReadWriteOncePod PersistentVolume access mode.
ReadWriteOncePod featuregate.Feature = "ReadWriteOncePod"
// owner: @gnufied
// kep: https://kep.k8s.io/1790
// alpha: v1.23
@ -686,6 +666,21 @@ const (
// Allow almost all printable ASCII characters in environment variables
RelaxedEnvironmentVariableValidation featuregate.Feature = "RelaxedEnvironmentVariableValidation"
// owner: @zhangweikop
// beta: v1.31
//
// Enable kubelet tls server to update certificate if the specified certificate files are changed.
// This feature is useful when specifying tlsCertFile & tlsPrivateKeyFile in kubelet Configuration.
// No effect for other cases such as using serverTLSbootstap.
ReloadKubeletServerCertificateFile featuregate.Feature = "ReloadKubeletServerCertificateFile"
// owner: @SergeyKanzhelev
// kep: https://kep.k8s.io/4680
// alpha: v1.31
//
// Adds the AllocatedResourcesStatus to the container status.
ResourceHealthStatus featuregate.Feature = "ResourceHealthStatus"
// owner: @mikedanese
// alpha: v1.7
// beta: v1.12
@ -738,6 +733,7 @@ const (
// owner: @munnerz
// kep: http://kep.k8s.io/4193
// alpha: v1.29
// beta: v1.31
//
// Controls whether the apiserver supports binding service account tokens to Node objects.
ServiceAccountTokenNodeBinding featuregate.Feature = "ServiceAccountTokenNodeBinding"
@ -759,18 +755,10 @@ const (
// service account tokens bound to Pod objects.
ServiceAccountTokenPodNodeInfo featuregate.Feature = "ServiceAccountTokenPodNodeInfo"
// owner: @xuzhenglun
// kep: http://kep.k8s.io/3682
// alpha: v1.27
// beta: v1.28
// stable: v1.29
//
// Subdivide the NodePort range for dynamic and static port allocation.
ServiceNodePortStaticSubrange featuregate.Feature = "ServiceNodePortStaticSubrange"
// owner: @gauravkghildiyal @robscott
// kep: https://kep.k8s.io/4444
// alpha: v1.30
// beta: v1.31
//
// Enables trafficDistribution field on Services.
ServiceTrafficDistribution featuregate.Feature = "ServiceTrafficDistribution"
@ -811,6 +799,7 @@ const (
// owner: @psch
// alpha: v1.26
// beta: v1.27
// stable: v1.31
//
// Enables a StatefulSet to start from an arbitrary non zero ordinal
StatefulSetStartOrdinal featuregate.Feature = "StatefulSetStartOrdinal"
@ -860,6 +849,7 @@ const (
// owner: @seans3
// kep: http://kep.k8s.io/4006
// alpha: v1.29
// beta: v1.30
//
// Enables StreamTranslator proxy to handle WebSockets upgrade requests for the
@ -883,6 +873,7 @@ const (
// owner: @mattcarry, @sunnylovestiramisu
// kep: https://kep.k8s.io/3751
// alpha: v1.29
// beta: v1.31 (off by default)
//
// Enables user specified volume attributes for persistent volumes, like iops and throughput.
VolumeAttributesClass featuregate.Feature = "VolumeAttributesClass"
@ -979,13 +970,29 @@ const (
// owner: @AkihiroSuda
// kep: https://kep.k8s.io/3857
// alpha: v1.30
// beta: v1.31
//
// Allows recursive read-only mounts.
RecursiveReadOnlyMounts featuregate.Feature = "RecursiveReadOnlyMounts"
// owner: @everpeace
// kep: https://kep.k8s.io/3619
// alpha: v1.31
//
// Enable SupplementalGroupsPolicy feature in PodSecurityContext
SupplementalGroupsPolicy featuregate.Feature = "SupplementalGroupsPolicy"
// owner: @saschagrunert
// kep: https://kep.k8s.io/4639
// alpha: v1.31
//
// Enables the image volume source.
ImageVolume featuregate.Feature = "ImageVolume"
)
func init() {
runtime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates))
runtime.Must(utilfeature.DefaultMutableFeatureGate.AddVersioned(defaultVersionedKubernetesFeatureGates))
// Register all client-go features with kube's feature gate instance and make all client-go
// feature checks use kube's instance. The effect is that for kube binaries, client-go
@ -1006,13 +1013,17 @@ func init() {
var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
CrossNamespaceVolumeDataSource: {Default: false, PreRelease: featuregate.Alpha},
AllowDNSOnlyNodeCSR: {Default: false, PreRelease: featuregate.Deprecated}, // remove after 1.33
AllowServiceLBStatusOnNonLB: {Default: false, PreRelease: featuregate.Deprecated}, // remove after 1.29
AnyVolumeDataSource: {Default: true, PreRelease: featuregate.Beta}, // on by default in 1.24
AppArmor: {Default: true, PreRelease: featuregate.Beta},
AppArmor: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
AppArmorFields: {Default: true, PreRelease: featuregate.Beta},
AppArmorFields: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
AuthorizeNodeWithSelectors: {Default: false, PreRelease: featuregate.Alpha},
CloudDualStackNodeIPs: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32
@ -1030,33 +1041,27 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
CPUManagerPolicyOptions: {Default: true, PreRelease: featuregate.Beta},
CSIMigrationPortworx: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires Portworx CSI driver)
CSIMigrationRBD: {Default: false, PreRelease: featuregate.Deprecated}, // deprecated in 1.28, remove in 1.31
CSINodeExpandSecret: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
CSIMigrationPortworx: {Default: true, PreRelease: featuregate.Beta}, // On by default (requires Portworx CSI driver)
CSIVolumeHealth: {Default: false, PreRelease: featuregate.Alpha},
SkipReadOnlyValidationGCE: {Default: true, PreRelease: featuregate.Deprecated}, // remove in 1.31
CloudControllerManagerWebhook: {Default: false, PreRelease: featuregate.Alpha},
ContainerCheckpoint: {Default: true, PreRelease: featuregate.Beta},
ConsistentHTTPGetHandlers: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
CronJobsScheduledAnnotation: {Default: true, PreRelease: featuregate.Beta},
DefaultHostNetworkHostPortsInPodTemplates: {Default: false, PreRelease: featuregate.Deprecated},
DisableAllocatorDualWrite: {Default: false, PreRelease: featuregate.Alpha}, // remove after MultiCIDRServiceAllocator is GA
DisableCloudProviders: {Default: true, PreRelease: featuregate.Beta},
DisableCloudProviders: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
DisableKubeletCloudCredentialProviders: {Default: true, PreRelease: featuregate.Beta},
DisableKubeletCloudCredentialProviders: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
DisableNodeKubeProxyVersion: {Default: false, PreRelease: featuregate.Alpha},
DisableNodeKubeProxyVersion: {Default: true, PreRelease: featuregate.Beta},
DevicePluginCDIDevices: {Default: true, PreRelease: featuregate.Beta},
DevicePluginCDIDevices: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
DRAControlPlaneController: {Default: false, PreRelease: featuregate.Alpha},
DynamicResourceAllocation: {Default: false, PreRelease: featuregate.Alpha},
@ -1064,7 +1069,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
ExecProbeTimeout: {Default: true, PreRelease: featuregate.GA}, // lock to default and remove after v1.22 based on KEP #1972 update
RetryGenerateName: {Default: false, PreRelease: featuregate.Alpha},
RetryGenerateName: {Default: true, PreRelease: featuregate.Beta},
GracefulNodeShutdown: {Default: true, PreRelease: featuregate.Beta},
@ -1072,39 +1077,23 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
HPAContainerMetrics: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32
HonorPVReclaimPolicy: {Default: false, PreRelease: featuregate.Alpha},
HonorPVReclaimPolicy: {Default: true, PreRelease: featuregate.Beta},
ImageMaximumGCAge: {Default: true, PreRelease: featuregate.Beta},
InTreePluginAWSUnregister: {Default: false, PreRelease: featuregate.Alpha},
InTreePluginAzureDiskUnregister: {Default: false, PreRelease: featuregate.Alpha},
InTreePluginAzureFileUnregister: {Default: false, PreRelease: featuregate.Alpha},
InTreePluginGCEUnregister: {Default: false, PreRelease: featuregate.Alpha},
InTreePluginOpenStackUnregister: {Default: false, PreRelease: featuregate.Alpha},
InTreePluginPortworxUnregister: {Default: false, PreRelease: featuregate.Alpha},
InTreePluginRBDUnregister: {Default: false, PreRelease: featuregate.Deprecated}, // deprecated in 1.28, remove in 1.31
InTreePluginvSphereUnregister: {Default: false, PreRelease: featuregate.Alpha},
JobBackoffLimitPerIndex: {Default: true, PreRelease: featuregate.Beta},
JobManagedBy: {Default: false, PreRelease: featuregate.Alpha},
JobPodFailurePolicy: {Default: true, PreRelease: featuregate.Beta},
JobPodFailurePolicy: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
JobPodReplacementPolicy: {Default: true, PreRelease: featuregate.Beta},
JobSuccessPolicy: {Default: false, PreRelease: featuregate.Alpha},
JobSuccessPolicy: {Default: true, PreRelease: featuregate.Beta},
JobReadyPods: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
KubeletCgroupDriverFromCRI: {Default: false, PreRelease: featuregate.Alpha},
KubeletCgroupDriverFromCRI: {Default: true, PreRelease: featuregate.Beta},
KubeletInUserNamespace: {Default: false, PreRelease: featuregate.Alpha},
@ -1112,19 +1101,19 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
KubeletPodResourcesGet: {Default: false, PreRelease: featuregate.Alpha},
KubeletSeparateDiskGC: {Default: false, PreRelease: featuregate.Alpha},
KubeletSeparateDiskGC: {Default: true, PreRelease: featuregate.Beta},
KubeletTracing: {Default: true, PreRelease: featuregate.Beta},
KubeProxyDrainingTerminatingNodes: {Default: true, PreRelease: featuregate.Beta},
KubeProxyDrainingTerminatingNodes: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31; remove in 1.33
LegacyServiceAccountTokenCleanUp: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.30; remove in 1.32
LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha},
LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Beta},
LogarithmicScaleDown: {Default: true, PreRelease: featuregate.Beta},
LogarithmicScaleDown: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
MatchLabelKeysInPodAffinity: {Default: false, PreRelease: featuregate.Alpha},
MatchLabelKeysInPodAffinity: {Default: true, PreRelease: featuregate.Beta},
MatchLabelKeysInPodTopologySpread: {Default: true, PreRelease: featuregate.Beta},
@ -1136,11 +1125,11 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
MinDomainsInPodTopologySpread: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32
MultiCIDRServiceAllocator: {Default: false, PreRelease: featuregate.Alpha},
MultiCIDRServiceAllocator: {Default: false, PreRelease: featuregate.Beta},
NewVolumeManagerReconstruction: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32
NFTablesProxyMode: {Default: false, PreRelease: featuregate.Alpha},
NFTablesProxyMode: {Default: true, PreRelease: featuregate.Beta},
NodeLogQuery: {Default: false, PreRelease: featuregate.Beta},
@ -1148,15 +1137,15 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
NodeSwap: {Default: true, PreRelease: featuregate.Beta},
PDBUnhealthyPodEvictionPolicy: {Default: true, PreRelease: featuregate.Beta},
PDBUnhealthyPodEvictionPolicy: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
PersistentVolumeLastPhaseTransitionTime: {Default: true, PreRelease: featuregate.Beta},
PersistentVolumeLastPhaseTransitionTime: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
PodAndContainerStatsFromCRI: {Default: false, PreRelease: featuregate.Alpha},
PodDeletionCost: {Default: true, PreRelease: featuregate.Beta},
PodDisruptionConditions: {Default: true, PreRelease: featuregate.Beta},
PodDisruptionConditions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
PodReadyToStartContainersCondition: {Default: true, PreRelease: featuregate.Beta},
@ -1166,23 +1155,25 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
PodSchedulingReadiness: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.30; remove in 1.32
PortForwardWebsockets: {Default: false, PreRelease: featuregate.Alpha},
PortForwardWebsockets: {Default: true, PreRelease: featuregate.Beta},
ProcMountType: {Default: false, PreRelease: featuregate.Alpha},
ProcMountType: {Default: false, PreRelease: featuregate.Beta},
QOSReserved: {Default: false, PreRelease: featuregate.Alpha},
ReadWriteOncePod: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
RecoverVolumeExpansionFailure: {Default: false, PreRelease: featuregate.Alpha},
RelaxedEnvironmentVariableValidation: {Default: false, PreRelease: featuregate.Alpha},
ReloadKubeletServerCertificateFile: {Default: true, PreRelease: featuregate.Beta},
ResourceHealthStatus: {Default: false, PreRelease: featuregate.Alpha},
RotateKubeletServerCertificate: {Default: true, PreRelease: featuregate.Beta},
RuntimeClassInImageCriAPI: {Default: false, PreRelease: featuregate.Alpha},
ElasticIndexedJob: {Default: true, PreRelease: featuregate.Beta},
ElasticIndexedJob: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31, remove in 1.32
SchedulerQueueingHints: {Default: false, PreRelease: featuregate.Beta},
@ -1192,23 +1183,21 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
ServiceAccountTokenPodNodeInfo: {Default: true, PreRelease: featuregate.Beta},
ServiceAccountTokenNodeBinding: {Default: false, PreRelease: featuregate.Alpha},
ServiceAccountTokenNodeBinding: {Default: true, PreRelease: featuregate.Beta},
ServiceAccountTokenNodeBindingValidation: {Default: true, PreRelease: featuregate.Beta},
ServiceNodePortStaticSubrange: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.29; remove in 1.31
ServiceTrafficDistribution: {Default: false, PreRelease: featuregate.Alpha},
ServiceTrafficDistribution: {Default: true, PreRelease: featuregate.Beta},
SidecarContainers: {Default: true, PreRelease: featuregate.Beta},
SizeMemoryBackedVolumes: {Default: true, PreRelease: featuregate.Beta},
StableLoadBalancerNodeSet: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.30, remove in 1.31
StableLoadBalancerNodeSet: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.30, remove in 1.32
StatefulSetAutoDeletePVC: {Default: true, PreRelease: featuregate.Beta},
StatefulSetStartOrdinal: {Default: true, PreRelease: featuregate.Beta},
StatefulSetStartOrdinal: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31, remove in 1.33
StorageVersionMigrator: {Default: false, PreRelease: featuregate.Alpha},
@ -1224,7 +1213,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
UnknownVersionInteroperabilityProxy: {Default: false, PreRelease: featuregate.Alpha},
VolumeAttributesClass: {Default: false, PreRelease: featuregate.Alpha},
VolumeAttributesClass: {Default: false, PreRelease: featuregate.Beta},
VolumeCapacityPriority: {Default: false, PreRelease: featuregate.Alpha},
@ -1250,6 +1239,10 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
SELinuxMount: {Default: false, PreRelease: featuregate.Alpha},
SupplementalGroupsPolicy: {Default: false, PreRelease: featuregate.Alpha},
ImageVolume: {Default: false, PreRelease: featuregate.Alpha},
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
@ -1257,9 +1250,9 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
genericfeatures.AggregatedDiscoveryEndpoint: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
genericfeatures.APIListChunking: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32
genericfeatures.AnonymousAuthConfigurableEndpoints: {Default: false, PreRelease: featuregate.Alpha},
genericfeatures.APIPriorityAndFairness: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
genericfeatures.APIListChunking: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32
genericfeatures.APIResponseCompression: {Default: true, PreRelease: featuregate.Beta},
@ -1267,11 +1260,15 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
genericfeatures.APIServerTracing: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.APIServingWithRoutine: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.APIServingWithRoutine: {Default: false, PreRelease: featuregate.Alpha},
genericfeatures.ConsistentListFromCache: {Default: false, PreRelease: featuregate.Alpha},
genericfeatures.AuthorizeWithSelectors: {Default: false, PreRelease: featuregate.Alpha},
genericfeatures.CustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
genericfeatures.ConcurrentWatchObjectDecode: {Default: false, PreRelease: featuregate.Beta},
genericfeatures.ConsistentListFromCache: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.CoordinatedLeaderElection: {Default: false, PreRelease: featuregate.Alpha},
genericfeatures.EfficientWatchResumption: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
@ -1287,6 +1284,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
genericfeatures.RemainingItemCount: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
genericfeatures.ResilientWatchCacheInitialization: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
@ -1311,6 +1310,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
genericfeatures.WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
genericfeatures.WatchCacheInitializationPostStartHook: {Default: false, PreRelease: featuregate.Beta},
genericfeatures.WatchFromStorageWithoutResourceVersion: {Default: false, PreRelease: featuregate.Beta},
genericfeatures.WatchList: {Default: false, PreRelease: featuregate.Alpha},
@ -1322,13 +1323,15 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
apiextensionsfeatures.CRDValidationRatcheting: {Default: true, PreRelease: featuregate.Beta},
apiextensionsfeatures.CustomResourceFieldSelectors: {Default: false, PreRelease: featuregate.Alpha},
apiextensionsfeatures.CustomResourceFieldSelectors: {Default: true, PreRelease: featuregate.Beta},
// features that enable backwards compatibility but are scheduled to be removed
// ...
HPAScaleToZero: {Default: false, PreRelease: featuregate.Alpha},
AllowInsecureKubeletCertificateSigningRequests: {Default: false, PreRelease: featuregate.Deprecated}, // remove in 1.33
StorageNamespaceIndex: {Default: true, PreRelease: featuregate.Beta},
RecursiveReadOnlyMounts: {Default: false, PreRelease: featuregate.Alpha},
RecursiveReadOnlyMounts: {Default: true, PreRelease: featuregate.Beta},
}

View File

@ -0,0 +1,34 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package features
import (
"k8s.io/component-base/featuregate"
)
// defaultVersionedKubernetesFeatureGates consists of all known Kubernetes-specific feature keys with VersionedSpecs.
// To add a new feature, define a key for it and add it here. The features will be
// available throughout Kubernetes binaries.
//
// Entries are separated from each other with blank lines to avoid sweeping gofmt changes
// when adding or removing one entry.
var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate.VersionedSpecs{
// Example:
// genericfeatures.EmulationVersion: {
// {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
// },
}

View File

@ -27,6 +27,7 @@ type PodSecurityContextAccessor interface {
HostNetwork() bool
HostPID() bool
HostIPC() bool
HostUsers() *bool
SELinuxOptions() *api.SELinuxOptions
RunAsUser() *int64
RunAsGroup() *int64
@ -43,6 +44,7 @@ type PodSecurityContextMutator interface {
SetHostNetwork(bool)
SetHostPID(bool)
SetHostIPC(bool)
SetHostUsers(*bool)
SetSELinuxOptions(*api.SELinuxOptions)
SetRunAsUser(*int64)
SetRunAsGroup(*int64)
@ -120,6 +122,19 @@ func (w *podSecurityContextWrapper) SetHostIPC(v bool) {
w.ensurePodSC()
w.podSC.HostIPC = v
}
func (w *podSecurityContextWrapper) HostUsers() *bool {
if w.podSC == nil {
return nil
}
return w.podSC.HostUsers
}
func (w *podSecurityContextWrapper) SetHostUsers(v *bool) {
if w.podSC == nil && v == nil {
return
}
w.ensurePodSC()
w.podSC.HostUsers = v
}
func (w *podSecurityContextWrapper) SELinuxOptions() *api.SELinuxOptions {
if w.podSC == nil {
return nil

View File

@ -37,12 +37,12 @@ func IsUnixDomainSocket(filePath string) (bool, error) {
return true, nil
}
// Chmod is the same as os.Chmod on Linux.
// Chmod is the same as os.Chmod on Unix.
func Chmod(name string, mode os.FileMode) error {
return os.Chmod(name, mode)
}
// MkdirAll is the same as os.MkdirAll on Linux.
// MkdirAll is same as os.MkdirAll on Unix.
func MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}

View File

@ -52,7 +52,7 @@ func (n *noopExpandableVolumePluginInstance) RequiresRemount(spec *Spec) bool {
return false
}
func (n *noopExpandableVolumePluginInstance) NewMounter(spec *Spec, podRef *v1.Pod, opts VolumeOptions) (Mounter, error) {
func (n *noopExpandableVolumePluginInstance) NewMounter(spec *Spec, podRef *v1.Pod) (Mounter, error) {
return nil, nil
}
@ -68,10 +68,6 @@ func (n *noopExpandableVolumePluginInstance) SupportsMountOption() bool {
return true
}
func (n *noopExpandableVolumePluginInstance) SupportsBulkVolumeVerification() bool {
return false
}
func (n *noopExpandableVolumePluginInstance) RequiresFSResize() bool {
return true
}

View File

@ -39,7 +39,6 @@ import (
storagelistersv1 "k8s.io/client-go/listers/storage/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
"k8s.io/kubernetes/pkg/volume/util/subpath"
@ -83,10 +82,6 @@ type VolumeOptions struct {
// i.e. with required capacity, accessMode, labels matching PVC.Selector and
// so on.
PVC *v1.PersistentVolumeClaim
// Unique name of Kubernetes cluster.
ClusterName string
// Tags to attach to the real volume in the cloud provider - e.g. AWS EBS
CloudTags *map[string]string
// Volume provisioning parameters from StorageClass
Parameters map[string]string
}
@ -154,7 +149,7 @@ type VolumePlugin interface {
// Ownership of the spec pointer in *not* transferred.
// - spec: The v1.Volume spec
// - pod: The enclosing pod
NewMounter(spec *Spec, podRef *v1.Pod, opts VolumeOptions) (Mounter, error)
NewMounter(spec *Spec, podRef *v1.Pod) (Mounter, error)
// NewUnmounter creates a new volume.Unmounter from recoverable state.
// - name: The volume name, as per the v1.Volume spec.
@ -172,11 +167,6 @@ type VolumePlugin interface {
// user specified mount options will result in error creating persistent volumes
SupportsMountOption() bool
// SupportsBulkVolumeVerification checks if volume plugin type is capable
// of enabling bulk polling of all nodes. This can speed up verification of
// attached volumes by quite a bit, but underlying pluging must support it.
SupportsBulkVolumeVerification() bool
// SupportsSELinuxContextMount returns true if volume plugins supports
// mount -o context=XYZ for a given volume.
SupportsSELinuxContextMount(spec *Spec) (bool, error)
@ -263,32 +253,6 @@ type NodeExpandableVolumePlugin interface {
NodeExpand(resizeOptions NodeResizeOptions) (bool, error)
}
// VolumePluginWithAttachLimits is an extended interface of VolumePlugin that restricts number of
// volumes that can be attached to a node.
type VolumePluginWithAttachLimits interface {
VolumePlugin
// Return maximum number of volumes that can be attached to a node for this plugin.
// The key must be same as string returned by VolumeLimitKey function. The returned
// map may look like:
// - { "storage-limits-aws-ebs": 39 }
// - { "storage-limits-gce-pd": 10 }
// A volume plugin may return error from this function - if it can not be used on a given node or not
// applicable in given environment (where environment could be cloudprovider or any other dependency)
// For example - calling this function for EBS volume plugin on a GCE node should
// result in error.
// The returned values are stored in node allocatable property and will be used
// by scheduler to determine how many pods with volumes can be scheduled on given node.
GetVolumeLimits() (map[string]int64, error)
// Return volume limit key string to be used in node capacity constraints
// The key must start with prefix storage-limits-. For example:
// - storage-limits-aws-ebs
// - storage-limits-csi-cinder
// The key should respect character limit of ResourceName type
// This function may be called by kubelet or scheduler to identify node allocatable property
// which stores volumes limits.
VolumeLimitKey(spec *Spec) string
}
// BlockVolumePlugin is an extend interface of VolumePlugin and is used for block volumes support.
type BlockVolumePlugin interface {
VolumePlugin
@ -296,7 +260,7 @@ type BlockVolumePlugin interface {
// Ownership of the spec pointer in *not* transferred.
// - spec: The v1.Volume spec
// - pod: The enclosing pod
NewBlockVolumeMapper(spec *Spec, podRef *v1.Pod, opts VolumeOptions) (BlockVolumeMapper, error)
NewBlockVolumeMapper(spec *Spec, podRef *v1.Pod) (BlockVolumeMapper, error)
// NewBlockVolumeUnmapper creates a new volume.BlockVolumeUnmapper from recoverable state.
// - name: The volume name, as per the v1.Volume spec.
// - podUID: The UID of the enclosing pod
@ -401,16 +365,13 @@ type VolumeHost interface {
// the provided spec. This is used to implement volume plugins which
// "wrap" other plugins. For example, the "secret" volume is
// implemented in terms of the "emptyDir" volume.
NewWrapperMounter(volName string, spec Spec, pod *v1.Pod, opts VolumeOptions) (Mounter, error)
NewWrapperMounter(volName string, spec Spec, pod *v1.Pod) (Mounter, error)
// NewWrapperUnmounter finds an appropriate plugin with which to handle
// the provided spec. See comments on NewWrapperMounter for more
// context.
NewWrapperUnmounter(volName string, spec Spec, podUID types.UID) (Unmounter, error)
// Get cloud provider from kubelet.
GetCloudProvider() cloudprovider.Interface
// Get mounter interface.
GetMounter(pluginName string) mount.Interface
@ -457,7 +418,7 @@ type VolumePluginMgr struct {
plugins map[string]VolumePlugin
prober DynamicPluginProber
probedPlugins map[string]VolumePlugin
loggedDeprecationWarnings sets.String
loggedDeprecationWarnings sets.Set[string]
Host VolumeHost
}
@ -599,7 +560,7 @@ func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, prober DynamicPlu
defer pm.mutex.Unlock()
pm.Host = host
pm.loggedDeprecationWarnings = sets.NewString()
pm.loggedDeprecationWarnings = sets.New[string]()
if prober == nil {
// Use a dummy prober to prevent nil deference.
@ -751,20 +712,6 @@ func (pm *VolumePluginMgr) refreshProbedPlugins() {
}
}
// ListVolumePluginWithLimits returns plugins that have volume limits on nodes
func (pm *VolumePluginMgr) ListVolumePluginWithLimits() []VolumePluginWithAttachLimits {
pm.mutex.RLock()
defer pm.mutex.RUnlock()
matchedPlugins := []VolumePluginWithAttachLimits{}
for _, v := range pm.plugins {
if plugin, ok := v.(VolumePluginWithAttachLimits); ok {
matchedPlugins = append(matchedPlugins, plugin)
}
}
return matchedPlugins
}
// FindPersistentPluginBySpec looks for a persistent volume plugin that can
// support a given volume specification. If no plugin is found, return an
// error
@ -779,20 +726,6 @@ func (pm *VolumePluginMgr) FindPersistentPluginBySpec(spec *Spec) (PersistentVol
return nil, fmt.Errorf("no persistent volume plugin matched")
}
// FindVolumePluginWithLimitsBySpec returns volume plugin that has a limit on how many
// of them can be attached to a node
func (pm *VolumePluginMgr) FindVolumePluginWithLimitsBySpec(spec *Spec) (VolumePluginWithAttachLimits, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)
if err != nil {
return nil, fmt.Errorf("could not find volume plugin for spec : %#v", spec)
}
if limitedPlugin, ok := volumePlugin.(VolumePluginWithAttachLimits); ok {
return limitedPlugin, nil
}
return nil, fmt.Errorf("no plugin with limits found")
}
// FindPersistentPluginByName fetches a persistent volume plugin by name. If
// no plugin is found, returns error.
func (pm *VolumePluginMgr) FindPersistentPluginByName(name string) (PersistentVolumePlugin, error) {
@ -858,19 +791,6 @@ func (pm *VolumePluginMgr) FindDeletablePluginByName(name string) (DeletableVolu
return nil, fmt.Errorf("no deletable volume plugin matched")
}
// FindCreatablePluginBySpec fetches a persistent volume plugin by name. If
// no plugin is found, returns error.
func (pm *VolumePluginMgr) FindCreatablePluginBySpec(spec *Spec) (ProvisionableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginBySpec(spec)
if err != nil {
return nil, err
}
if provisionableVolumePlugin, ok := volumePlugin.(ProvisionableVolumePlugin); ok {
return provisionableVolumePlugin, nil
}
return nil, fmt.Errorf("no creatable volume plugin matched")
}
// FindAttachablePluginBySpec fetches a persistent volume plugin by spec.
// Unlike the other "FindPlugin" methods, this does not return error if no
// plugin is found. All volumes require a mounter and unmounter, but not

View File

@ -158,7 +158,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection, setPerms func(su
}
oldTsPath := filepath.Join(w.targetDir, oldTsDir)
var pathsToRemove sets.String
var pathsToRemove sets.Set[string]
shouldWrite := true
// if there was no old version, there's nothing to remove
if len(oldTsDir) != 0 {
@ -355,10 +355,10 @@ func shouldWriteFile(path string, content []byte) (bool, error) {
// pathsToRemove walks the current version of the data directory and
// determines which paths should be removed (if any) after the payload is
// written to the target directory.
func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir string) (sets.String, error) {
paths := sets.NewString()
func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTSDir string) (sets.Set[string], error) {
paths := sets.New[string]()
visitor := func(path string, info os.FileInfo, err error) error {
relativePath := strings.TrimPrefix(path, oldTsDir)
relativePath := strings.TrimPrefix(path, oldTSDir)
relativePath = strings.TrimPrefix(relativePath, string(os.PathSeparator))
if relativePath == "" {
return nil
@ -368,15 +368,15 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir
return nil
}
err := filepath.Walk(oldTsDir, visitor)
err := filepath.Walk(oldTSDir, visitor)
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
return nil, err
}
klog.V(5).Infof("%s: current paths: %+v", w.targetDir, paths.List())
klog.V(5).Infof("%s: current paths: %+v", w.targetDir, sets.List(paths))
newPaths := sets.NewString()
newPaths := sets.New[string]()
for file := range payload {
// add all subpaths for the payload to the set of new paths
// to avoid attempting to remove non-empty dirs
@ -386,7 +386,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir
subPath = strings.TrimSuffix(subPath, string(os.PathSeparator))
}
}
klog.V(5).Infof("%s: new paths: %+v", w.targetDir, newPaths.List())
klog.V(5).Infof("%s: new paths: %+v", w.targetDir, sets.List(newPaths))
result := paths.Difference(newPaths)
klog.V(5).Infof("%s: paths to remove: %+v", w.targetDir, result)
@ -444,7 +444,8 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir
if fileProjection.FsUser == nil {
continue
}
if err := os.Chown(fullPath, int(*fileProjection.FsUser), -1); err != nil {
if err := w.chown(fullPath, int(*fileProjection.FsUser), -1); err != nil {
klog.Errorf("%s: unable to change file %s with owner %v: %v", w.logContext, fullPath, int(*fileProjection.FsUser), err)
return err
}
@ -487,7 +488,7 @@ func (w *AtomicWriter) createUserVisibleFiles(payload map[string]FileProjection)
// removeUserVisiblePaths removes the set of paths from the user-visible
// portion of the writer's target directory.
func (w *AtomicWriter) removeUserVisiblePaths(paths sets.String) error {
func (w *AtomicWriter) removeUserVisiblePaths(paths sets.Set[string]) error {
ps := string(os.PathSeparator)
var lasterr error
for p := range paths {

View File

@ -0,0 +1,27 @@
//go:build linux
// +build linux
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import "os"
// chown changes the numeric uid and gid of the named file.
func (w *AtomicWriter) chown(name string, uid, gid int) error {
return os.Chown(name, uid, gid)
}

View File

@ -0,0 +1,33 @@
//go:build !linux
// +build !linux
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"runtime"
"k8s.io/klog/v2"
)
// chown changes the numeric uid and gid of the named file.
// This is a no-op on unsupported platforms.
func (w *AtomicWriter) chown(name string, uid, _ /* gid */ int) error {
klog.Warningf("%s: skipping change of Linux owner %v for file %s; unsupported on %s", w.logContext, uid, name, runtime.GOOS)
return nil
}

View File

@ -225,11 +225,11 @@ func GetQuotaOnDir(m mount.Interface, path string) (common.QuotaID, error) {
return getApplier(path).GetQuotaOnDir(path)
}
func clearQuotaOnDir(m mount.Interface, path string) error {
func clearQuotaOnDir(m mount.Interface, path string, userNamespacesEnabled bool) error {
// Since we may be called without path being in the map,
// we explicitly have to check in this case.
klog.V(4).Infof("clearQuotaOnDir %s", path)
supportsQuotas, err := SupportsQuotas(m, path)
supportsQuotas, err := SupportsQuotas(m, path, userNamespacesEnabled)
if err != nil {
// Log-and-continue instead of returning an error for now
// due to unspecified backwards compatibility concerns (a subject to revise)
@ -269,11 +269,17 @@ func clearQuotaOnDir(m mount.Interface, path string) error {
// don't cache the result because nothing will clean it up.
// However, do cache the device->applier map; the number of devices
// is bounded.
func SupportsQuotas(m mount.Interface, path string) (bool, error) {
// User namespaces prevent changes to project IDs on the filesystem,
// ensuring xfs-quota metrics' reliability; hence, userNamespacesEnabled is checked.
func SupportsQuotas(m mount.Interface, path string, userNamespacesEnabled bool) (bool, error) {
if !enabledQuotasForMonitoring() {
klog.V(3).Info("SupportsQuotas called, but quotas disabled")
return false, nil
}
if !userNamespacesEnabled {
klog.V(3).Info("SupportQuotas called and LocalStorageCapacityIsolationFSQuotaMonitoring enabled, but pod is not in a user namespace")
return false, nil
}
supportsQuotasLock.Lock()
defer supportsQuotasLock.Unlock()
if supportsQuotas, ok := supportsQuotasMap[path]; ok {
@ -307,12 +313,12 @@ func SupportsQuotas(m mount.Interface, path string) (bool, error) {
// AssignQuota chooses the quota ID based on the pod UID and path.
// If the pod UID is identical to another one known, it may (but presently
// doesn't) choose the same quota ID as other volumes in the pod.
func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resource.Quantity) error { //nolint:staticcheck
func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resource.Quantity, userNamespacesEnabled bool) error { //nolint:staticcheck
if bytes == nil {
return fmt.Errorf("attempting to assign null quota to %s", path)
}
ibytes := bytes.Value()
if ok, err := SupportsQuotas(m, path); !ok {
if ok, err := SupportsQuotas(m, path, userNamespacesEnabled); !ok {
return fmt.Errorf("quotas not supported on %s: %v", path, err)
}
quotaLock.Lock()
@ -410,7 +416,7 @@ func GetInodes(path string) (*resource.Quantity, error) {
}
// ClearQuota -- remove the quota assigned to a directory
func ClearQuota(m mount.Interface, path string) error {
func ClearQuota(m mount.Interface, path string, userNamespacesEnabled bool) error {
klog.V(3).Infof("ClearQuota %s", path)
if !enabledQuotasForMonitoring() {
return fmt.Errorf("clearQuota called, but quotas disabled")
@ -426,7 +432,7 @@ func ClearQuota(m mount.Interface, path string) error {
// be found, which needs to be cleaned up.
defer delete(supportsQuotasMap, path)
defer clearApplier(path)
return clearQuotaOnDir(m, path)
return clearQuotaOnDir(m, path, userNamespacesEnabled)
}
_, ok = podQuotaMap[poduid]
if !ok {
@ -443,7 +449,7 @@ func ClearQuota(m mount.Interface, path string) error {
}
count, ok := podDirCountMap[poduid]
if count <= 1 || !ok {
err = clearQuotaOnDir(m, path)
err = clearQuotaOnDir(m, path, userNamespacesEnabled)
// This error should be noted; we still need to clean up
// and otherwise handle in the same way.
if err != nil {

View File

@ -39,12 +39,12 @@ func GetQuotaOnDir(_ mount.Interface, _ string) (common.QuotaID, error) {
}
// SupportsQuotas -- dummy implementation
func SupportsQuotas(_ mount.Interface, _ string) (bool, error) {
func SupportsQuotas(_ mount.Interface, _ string, _ bool) (bool, error) {
return false, errNotImplemented
}
// AssignQuota -- dummy implementation
func AssignQuota(_ mount.Interface, _ string, _ types.UID, _ *resource.Quantity) error {
func AssignQuota(_ mount.Interface, _ string, _ types.UID, _ *resource.Quantity, _ bool) error {
return errNotImplemented
}
@ -59,6 +59,6 @@ func GetInodes(_ string) (*resource.Quantity, error) {
}
// ClearQuota -- dummy implementation
func ClearQuota(_ mount.Interface, _ string) error {
func ClearQuota(_ mount.Interface, _ string, _ bool) error {
return errNotImplemented
}

View File

@ -40,6 +40,8 @@ var (
knownResizeConditions = map[v1.PersistentVolumeClaimConditionType]bool{
v1.PersistentVolumeClaimFileSystemResizePending: true,
v1.PersistentVolumeClaimResizing: true,
v1.PersistentVolumeClaimControllerResizeError: true,
v1.PersistentVolumeClaimNodeResizeError: true,
}
// AnnPreResizeCapacity annotation is added to a PV when expanding volume.
@ -140,7 +142,7 @@ func MarkResizeInProgressWithResizer(
}
conditions := []v1.PersistentVolumeClaimCondition{progressCondition}
newPVC := pvc.DeepCopy()
newPVC = MergeResizeConditionOnPVC(newPVC, conditions)
newPVC = MergeResizeConditionOnPVC(newPVC, conditions, false /* keepOldResizeConditions */)
newPVC = setResizer(newPVC, resizerName)
return PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
}
@ -154,7 +156,7 @@ func MarkControllerReisizeInProgress(pvc *v1.PersistentVolumeClaim, resizerName
}
conditions := []v1.PersistentVolumeClaimCondition{progressCondition}
newPVC := pvc.DeepCopy()
newPVC = MergeResizeConditionOnPVC(newPVC, conditions)
newPVC = MergeResizeConditionOnPVC(newPVC, conditions, false /* keepOldResizeConditions */)
newPVC = mergeStorageResourceStatus(newPVC, v1.PersistentVolumeClaimControllerResizeInProgress)
newPVC = mergeStorageAllocatedResources(newPVC, newSize)
newPVC = setResizer(newPVC, resizerName)
@ -196,7 +198,7 @@ func MarkForFSResize(
newPVC = mergeStorageResourceStatus(newPVC, v1.PersistentVolumeClaimNodeResizePending)
}
newPVC = MergeResizeConditionOnPVC(newPVC, conditions)
newPVC = MergeResizeConditionOnPVC(newPVC, conditions, true /* keepOldResizeConditions */)
updatedPVC, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
return updatedPVC, err
}
@ -229,16 +231,25 @@ func MarkFSResizeFinished(
}
}
newPVC = MergeResizeConditionOnPVC(newPVC, []v1.PersistentVolumeClaimCondition{})
newPVC = MergeResizeConditionOnPVC(newPVC, []v1.PersistentVolumeClaimCondition{}, false /* keepOldResizeConditions */)
updatedPVC, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
return updatedPVC, err
}
// MarkNodeExpansionFailed marks a PVC for node expansion as failed. Kubelet should not retry expansion
// MarkNodeExpansionInfeasible marks a PVC for node expansion as failed. Kubelet should not retry expansion
// of volumes which are in failed state.
func MarkNodeExpansionFailed(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
func MarkNodeExpansionInfeasible(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface, err error) (*v1.PersistentVolumeClaim, error) {
newPVC := pvc.DeepCopy()
newPVC = mergeStorageResourceStatus(newPVC, v1.PersistentVolumeClaimNodeResizeFailed)
newPVC = mergeStorageResourceStatus(newPVC, v1.PersistentVolumeClaimNodeResizeInfeasible)
errorCondition := v1.PersistentVolumeClaimCondition{
Type: v1.PersistentVolumeClaimNodeResizeError,
Status: v1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Message: fmt.Sprintf("failed to expand pvc with %v", err),
}
newPVC = MergeResizeConditionOnPVC(newPVC,
[]v1.PersistentVolumeClaimCondition{errorCondition},
true /* keepOldResizeConditions */)
patchBytes, err := createPVCPatch(pvc, newPVC, false /* addResourceVersionCheck */)
if err != nil {
@ -253,6 +264,30 @@ func MarkNodeExpansionFailed(pvc *v1.PersistentVolumeClaim, kubeClient clientset
return updatedClaim, nil
}
func MarkNodeExpansionFailedCondition(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface, err error) (*v1.PersistentVolumeClaim, error) {
newPVC := pvc.DeepCopy()
errorCondition := v1.PersistentVolumeClaimCondition{
Type: v1.PersistentVolumeClaimNodeResizeError,
Status: v1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Message: fmt.Sprintf("failed to expand pvc with %v", err),
}
newPVC = MergeResizeConditionOnPVC(newPVC,
[]v1.PersistentVolumeClaimCondition{errorCondition},
true /* keepOldResizeConditions */)
patchBytes, err := createPVCPatch(pvc, newPVC, false /* addResourceVersionCheck */)
if err != nil {
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %w", pvc.Name, err)
}
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).
Patch(context.TODO(), pvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
if updateErr != nil {
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %w", pvc.Name, updateErr)
}
return updatedClaim, nil
}
// MarkNodeExpansionInProgress marks pvc expansion in progress on node
func MarkNodeExpansionInProgress(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
newPVC := pvc.DeepCopy()
@ -333,7 +368,7 @@ func addResourceVersion(patchBytes []byte, resourceVersion string) ([]byte, erro
// leaving other conditions untouched.
func MergeResizeConditionOnPVC(
pvc *v1.PersistentVolumeClaim,
resizeConditions []v1.PersistentVolumeClaimCondition) *v1.PersistentVolumeClaim {
resizeConditions []v1.PersistentVolumeClaimCondition, keepOldResizeConditions bool) *v1.PersistentVolumeClaim {
resizeConditionMap := map[v1.PersistentVolumeClaimConditionType]*resizeProcessStatus{}
for _, condition := range resizeConditions {
@ -356,6 +391,10 @@ func MergeResizeConditionOnPVC(
newConditions = append(newConditions, condition)
}
newCondition.processed = true
} else if keepOldResizeConditions {
// if keepOldResizeConditions is true, we keep the old resize conditions that were present in the
// existing pvc.Status.Conditions field.
newConditions = append(newConditions, condition)
}
}

View File

@ -102,6 +102,27 @@ func IsFailedPreconditionError(err error) bool {
return errors.As(err, &failedPreconditionError)
}
// InfeasibleError errors are a subset of OperationFinished or final error
// codes. In terms of CSI - this usually means that, the operation is not possible
// in current state with given arguments.
type InfeasibleError struct {
msg string
}
func (err *InfeasibleError) Error() string {
return err.msg
}
// NewInfeasibleError returns a new instance of InfeasibleError
func NewInfeasibleError(msg string) *InfeasibleError {
return &InfeasibleError{msg: msg}
}
func IsInfeasibleError(err error) bool {
var infeasibleError *InfeasibleError
return errors.As(err, &infeasibleError)
}
type OperationNotSupported struct {
msg string
}

View File

@ -22,12 +22,10 @@ import (
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"time"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiruntime "k8s.io/apimachinery/pkg/runtime"
@ -36,7 +34,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
storagehelpers "k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/api/legacyscheme"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
@ -46,7 +43,6 @@ import (
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
"k8s.io/mount-utils"
utilexec "k8s.io/utils/exec"
"k8s.io/utils/io"
utilstrings "k8s.io/utils/strings"
)
@ -59,10 +55,6 @@ const (
// managed by the attach/detach controller
ControllerManagedAttachAnnotation string = "volumes.kubernetes.io/controller-managed-attach-detach"
// KeepTerminatedPodVolumesAnnotation is the key of the annotation on Node
// that decides if pod volumes are unmounted when pod is terminated
KeepTerminatedPodVolumesAnnotation string = "volumes.kubernetes.io/keep-terminated-pod-volumes"
// MountsInGlobalPDPath is name of the directory appended to a volume plugin
// name to create the place for volume mounts in the global PD path.
MountsInGlobalPDPath = "mounts"
@ -115,22 +107,6 @@ func SetReady(dir string) {
file.Close()
}
// GetSecretForPod locates secret by name in the pod's namespace and returns secret map
func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (map[string]string, error) {
secret := make(map[string]string)
if kubeClient == nil {
return secret, fmt.Errorf("cannot get kube client")
}
secrets, err := kubeClient.CoreV1().Secrets(pod.Namespace).Get(context.TODO(), secretName, metav1.GetOptions{})
if err != nil {
return secret, err
}
for name, data := range secrets.Data {
secret[name] = string(data)
}
return secret, nil
}
// GetSecretForPV locates secret by name and namespace, verifies the secret type, and returns secret map
func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeClient clientset.Interface) (map[string]string, error) {
secret := make(map[string]string)
@ -150,23 +126,6 @@ func GetSecretForPV(secretNamespace, secretName, volumePluginName string, kubeCl
return secret, nil
}
// GetClassForVolume locates storage class by persistent volume
func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume) (*storage.StorageClass, error) {
if kubeClient == nil {
return nil, fmt.Errorf("cannot get kube client")
}
className := storagehelpers.GetPersistentVolumeClass(pv)
if className == "" {
return nil, fmt.Errorf("volume has no storage class")
}
class, err := kubeClient.StorageV1().StorageClasses().Get(context.TODO(), className, metav1.GetOptions{})
if err != nil {
return nil, err
}
return class, nil
}
// LoadPodFromFile will read, decode, and return a Pod from a file.
func LoadPodFromFile(filePath string) (*v1.Pod, error) {
if filePath == "" {
@ -204,22 +163,6 @@ func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.Pers
return timeout
}
// GenerateVolumeName returns a PV name with clusterName prefix. The function
// should be used to generate a name of GCE PD or Cinder volume. It basically
// adds "<clusterName>-dynamic-" before the PV name, making sure the resulting
// string fits given length and cuts "dynamic" if not.
func GenerateVolumeName(clusterName, pvName string, maxLength int) string {
prefix := clusterName + "-dynamic"
pvLen := len(pvName)
// cut the "<clusterName>-dynamic" to fit full pvName into maxLength
// +1 for the '-' dash
if pvLen+1+len(prefix) > maxLength {
prefix = prefix[:maxLength-pvLen-1]
}
return prefix + "-" + pvName
}
// GetPath checks if the path from the mounter is empty.
func GetPath(mounter volume.Mounter) (string, error) {
path := mounter.GetPath()
@ -263,7 +206,7 @@ func MountOptionFromSpec(spec *volume.Spec, options ...string) []string {
// JoinMountOptions joins mount options eliminating duplicates
func JoinMountOptions(userOptions []string, systemOptions []string) []string {
allMountOptions := sets.NewString()
allMountOptions := sets.New[string]()
for _, mountOption := range userOptions {
if len(mountOption) > 0 {
@ -274,7 +217,7 @@ func JoinMountOptions(userOptions []string, systemOptions []string) []string {
for _, mountOption := range systemOptions {
allMountOptions.Insert(mountOption)
}
return allMountOptions.List()
return sets.List(allMountOptions)
}
// ContainsAccessMode returns whether the requested mode is contained by modes
@ -558,13 +501,6 @@ func UnmapBlockVolume(
return nil
}
// GetPluginMountDir returns the global mount directory name appended
// to the given plugin name's plugin directory
func GetPluginMountDir(host volume.VolumeHost, name string) string {
mntDir := filepath.Join(host.GetPluginDir(name), MountsInGlobalPDPath)
return mntDir
}
// IsLocalEphemeralVolume determines whether the argument is a local ephemeral
// volume vs. some other type
// Local means the volume is using storage from the local disk that is managed by kubelet.
@ -616,9 +552,9 @@ func GetLocalPersistentVolumeNodeNames(pv *v1.PersistentVolume) []string {
// GetPodVolumeNames returns names of volumes that are used in a pod,
// either as filesystem mount or raw block device, together with list
// of all SELinux contexts of all containers that use the volumes.
func GetPodVolumeNames(pod *v1.Pod) (mounts sets.String, devices sets.String, seLinuxContainerContexts map[string][]*v1.SELinuxOptions) {
mounts = sets.NewString()
devices = sets.NewString()
func GetPodVolumeNames(pod *v1.Pod) (mounts sets.Set[string], devices sets.Set[string], seLinuxContainerContexts map[string][]*v1.SELinuxOptions) {
mounts = sets.New[string]()
devices = sets.New[string]()
seLinuxContainerContexts = make(map[string][]*v1.SELinuxOptions)
podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(container *v1.Container, containerType podutil.ContainerType) bool {
@ -653,8 +589,7 @@ func GetPodVolumeNames(pod *v1.Pod) (mounts sets.String, devices sets.String, se
// attributes.
func FsUserFrom(pod *v1.Pod) *int64 {
var fsUser *int64
// Exclude ephemeral containers because SecurityContext is not allowed.
podutil.VisitContainers(&pod.Spec, podutil.InitContainers|podutil.Containers, func(container *v1.Container, containerType podutil.ContainerType) bool {
podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(container *v1.Container, containerType podutil.ContainerType) bool {
runAsUser, ok := securitycontext.DetermineEffectiveRunAsUser(pod, container)
// One container doesn't specify user or there are more than one
// non-root UIDs.
@ -705,25 +640,6 @@ func HasMountRefs(mountPath string, mountRefs []string) bool {
return false
}
// WriteVolumeCache flush disk data given the specified mount path
func WriteVolumeCache(deviceMountPath string, exec utilexec.Interface) error {
// If runtime os is windows, execute Write-VolumeCache powershell command on the disk
if runtime.GOOS == "windows" {
cmdString := "Get-Volume -FilePath $env:mountpath | Write-Volumecache"
cmd := exec.Command("powershell", "/c", cmdString)
env := append(os.Environ(), fmt.Sprintf("mountpath=%s", deviceMountPath))
cmd.SetEnv(env)
klog.V(8).Infof("Executing command: %q", cmdString)
output, err := cmd.CombinedOutput()
klog.Infof("command (%q) execeuted: %v, output: %q", cmdString, err, string(output))
if err != nil {
return fmt.Errorf("command (%q) failed: %v, output: %q", cmdString, err, string(output))
}
}
// For linux runtime, it skips because unmount will automatically flush disk data
return nil
}
// IsMultiAttachAllowed checks if attaching this volume to multiple nodes is definitely not allowed/possible.
// In its current form, this function can only reliably say for which volumes it's definitely forbidden. If it returns
// false, it is not guaranteed that multi-attach is actually supported by the volume type and we must rely on the

View File

@ -19,10 +19,10 @@ package util
import (
"sort"
storagev1alpha1 "k8s.io/api/storage/v1alpha1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
storagev1alpha1listers "k8s.io/client-go/listers/storage/v1alpha1"
storagev1beta1listers "k8s.io/client-go/listers/storage/v1beta1"
"k8s.io/klog/v2"
)
@ -32,13 +32,13 @@ const (
)
// GetDefaultVolumeAttributesClass returns the default VolumeAttributesClass from the store, or nil.
func GetDefaultVolumeAttributesClass(lister storagev1alpha1listers.VolumeAttributesClassLister, driverName string) (*storagev1alpha1.VolumeAttributesClass, error) {
func GetDefaultVolumeAttributesClass(lister storagev1beta1listers.VolumeAttributesClassLister, driverName string) (*storagev1beta1.VolumeAttributesClass, error) {
list, err := lister.List(labels.Everything())
if err != nil {
return nil, err
}
defaultClasses := []*storagev1alpha1.VolumeAttributesClass{}
defaultClasses := []*storagev1beta1.VolumeAttributesClass{}
for _, class := range list {
if IsDefaultVolumeAttributesClassAnnotation(class.ObjectMeta) && class.DriverName == driverName {
defaultClasses = append(defaultClasses, class)

View File

@ -284,14 +284,6 @@ type DeviceMounter interface {
MountDevice(spec *Spec, devicePath string, deviceMountPath string, deviceMounterArgs DeviceMounterArgs) error
}
type BulkVolumeVerifier interface {
// BulkVerifyVolumes checks whether the list of volumes still attached to the
// clusters in the node. It returns a map which maps from the volume spec to the checking result.
// If an error occurs during check - error should be returned and volume on nodes
// should be assumed as still attached.
BulkVerifyVolumes(volumesByNode map[types.NodeName][]*Spec) (map[types.NodeName]map[*Spec]bool, error)
}
// Detacher can detach a volume from a node.
type Detacher interface {
DeviceUnmounter