rebase: bump k8s.io/kubernetes from 1.26.2 to 1.27.2

Bumps [k8s.io/kubernetes](https://github.com/kubernetes/kubernetes) from 1.26.2 to 1.27.2.
- [Release notes](https://github.com/kubernetes/kubernetes/releases)
- [Commits](https://github.com/kubernetes/kubernetes/compare/v1.26.2...v1.27.2)

---
updated-dependencies:
- dependency-name: k8s.io/kubernetes
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2023-05-29 21:03:29 +00:00
committed by mergify[bot]
parent 0e79135419
commit 07b05616a0
1072 changed files with 208716 additions and 198880 deletions

View File

@ -74,7 +74,7 @@ func RequestsOnlyLocalTraffic(service *api.Service) bool {
return false
}
return service.Spec.ExternalTrafficPolicy == api.ServiceExternalTrafficPolicyTypeLocal
return service.Spec.ExternalTrafficPolicy == api.ServiceExternalTrafficPolicyLocal
}
// NeedsHealthCheck checks if service needs health check.

99
vendor/k8s.io/kubernetes/pkg/api/service/warnings.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"fmt"
"net/netip"
"k8s.io/apimachinery/pkg/util/validation/field"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
)
func GetWarningsForService(service, oldService *api.Service) []string {
if service == nil {
return nil
}
var warnings []string
if _, ok := service.Annotations[api.DeprecatedAnnotationTopologyAwareHints]; ok {
warnings = append(warnings, fmt.Sprintf("annotation %s is deprecated, please use %s instead", api.DeprecatedAnnotationTopologyAwareHints, api.AnnotationTopologyMode))
}
if helper.IsServiceIPSet(service) {
for i, clusterIP := range service.Spec.ClusterIPs {
warnings = append(warnings, getWarningsForIP(field.NewPath("spec").Child("clusterIPs").Index(i), clusterIP)...)
}
}
for i, externalIP := range service.Spec.ExternalIPs {
warnings = append(warnings, getWarningsForIP(field.NewPath("spec").Child("externalIPs").Index(i), externalIP)...)
}
if len(service.Spec.LoadBalancerIP) > 0 {
warnings = append(warnings, getWarningsForIP(field.NewPath("spec").Child("loadBalancerIP"), service.Spec.LoadBalancerIP)...)
}
for i, cidr := range service.Spec.LoadBalancerSourceRanges {
warnings = append(warnings, getWarningsForCIDR(field.NewPath("spec").Child("loadBalancerSourceRanges").Index(i), cidr)...)
}
return warnings
}
func getWarningsForIP(fieldPath *field.Path, address string) []string {
// IPv4 addresses with leading zeros CVE-2021-29923 are not valid in golang since 1.17
// This will also warn about possible future changes on the golang std library
// xref: https://issues.k8s.io/108074
ip, err := netip.ParseAddr(address)
if err != nil {
return []string{fmt.Sprintf("%s: IP address was accepted, but will be invalid in a future Kubernetes release: %v", fieldPath, err)}
}
// A Recommendation for IPv6 Address Text Representation
//
// "All of the above examples represent the same IPv6 address. This
// flexibility has caused many problems for operators, systems
// engineers, and customers.
// ..."
// https://datatracker.ietf.org/doc/rfc5952/
if ip.Is6() && ip.String() != address {
return []string{fmt.Sprintf("%s: IPv6 address %q is not in RFC 5952 canonical format (%q), which may cause controller apply-loops", fieldPath, address, ip.String())}
}
return []string{}
}
func getWarningsForCIDR(fieldPath *field.Path, cidr string) []string {
// IPv4 addresses with leading zeros CVE-2021-29923 are not valid in golang since 1.17
// This will also warn about possible future changes on the golang std library
// xref: https://issues.k8s.io/108074
prefix, err := netip.ParsePrefix(cidr)
if err != nil {
return []string{fmt.Sprintf("%s: IP prefix was accepted, but will be invalid in a future Kubernetes release: %v", fieldPath, err)}
}
// A Recommendation for IPv6 Address Text Representation
//
// "All of the above examples represent the same IPv6 address. This
// flexibility has caused many problems for operators, systems
// engineers, and customers.
// ..."
// https://datatracker.ietf.org/doc/rfc5952/
if prefix.Addr().Is6() && prefix.String() != cidr {
return []string{fmt.Sprintf("%s: IPv6 prefix %q is not in RFC 5952 canonical format (%q), which may cause controller apply-loops", fieldPath, cidr, prefix.String())}
}
return []string{}
}

View File

@ -257,7 +257,7 @@ func visitContainerConfigmapNames(container *v1.Container, visitor Visitor) bool
}
// GetContainerStatus extracts the status of container "name" from "statuses".
// It also returns if "name" exists.
// It returns true if "name" exists, else returns false.
func GetContainerStatus(statuses []v1.ContainerStatus, name string) (v1.ContainerStatus, bool) {
for i := range statuses {
if statuses[i].Name == name {
@ -274,6 +274,17 @@ func GetExistingContainerStatus(statuses []v1.ContainerStatus, name string) v1.C
return status
}
// GetIndexOfContainerStatus gets the index of status of container "name" from "statuses",
// It returns (index, true) if "name" exists, else returns (0, false).
func GetIndexOfContainerStatus(statuses []v1.ContainerStatus, name string) (int, bool) {
for i := range statuses {
if statuses[i].Name == name {
return i, true
}
}
return 0, false
}
// IsPodAvailable returns true if a pod is available; false otherwise.
// Precondition for an available pod is that it must be ready. On top
// of that, there are two cases when a pod can be considered available:

View File

@ -180,6 +180,7 @@ type StatefulSetSpec struct {
// of the StatefulSet. Each pod will be named with the format
// <statefulsetname>-<podindex>. For example, a pod in a StatefulSet named
// "web" with index number "3" would be named "web-3".
// The only allowed template.spec.restartPolicy value is "Always".
Template api.PodTemplateSpec
// VolumeClaimTemplates is a list of claims that pods are allowed to reference.
@ -229,7 +230,7 @@ type StatefulSetSpec struct {
// PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from
// the StatefulSet VolumeClaimTemplates. This requires the
// StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.
// StatefulSetAutoDeletePVC feature gate to be enabled, which is beta and default on from 1.27.
// +optional
PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy
@ -237,7 +238,7 @@ type StatefulSetSpec struct {
// default ordinals behavior assigns a "0" index to the first replica and
// increments the index by one for each additional replica requested. Using
// the ordinals field requires the StatefulSetStartOrdinal feature gate to be
// enabled, which is alpha.
// enabled, which is beta.
// +optional
Ordinals *StatefulSetOrdinals
}
@ -376,6 +377,7 @@ type DeploymentSpec struct {
Selector *metav1.LabelSelector
// Template describes the pods that will be created.
// The only allowed template.spec.restartPolicy value is "Always".
Template api.PodTemplateSpec
// The deployment strategy to use to replace existing pods with new ones.
@ -666,6 +668,7 @@ type DaemonSetSpec struct {
// The DaemonSet will create exactly one copy of this pod on every node
// that matches the template's node selector (or on every node if no node
// selector is specified).
// The only allowed template.spec.restartPolicy value is "Always".
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
Template api.PodTemplateSpec
@ -857,6 +860,7 @@ type ReplicaSetSpec struct {
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected.
// The only allowed template.spec.restartPolicy value is "Always".
// +optional
Template api.PodTemplateSpec
}

View File

@ -31,25 +31,25 @@ type Scale struct {
// +optional
metav1.ObjectMeta
// defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
// spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
// +optional
Spec ScaleSpec
// current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.
// status represents the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.
// +optional
Status ScaleStatus
}
// ScaleSpec describes the attributes of a scale subresource.
type ScaleSpec struct {
// desired number of instances for the scaled object.
// replicas is the desired number of instances for the scaled object.
// +optional
Replicas int32
}
// ScaleStatus represents the current status of a scale subresource.
type ScaleStatus struct {
// actual number of observed instances of the scaled object.
// replicas is the actual number of observed instances of the scaled object.
Replicas int32
// label query over pods that should match the replicas count. This is same
@ -62,20 +62,23 @@ type ScaleStatus struct {
// CrossVersionObjectReference contains enough information to let you identify the referred resource.
type CrossVersionObjectReference struct {
// Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
// kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
Kind string
// Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
// name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
Name string
// API version of the referent
// apiVersion is the API version of the referent
// +optional
APIVersion string
}
// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.
type HorizontalPodAutoscalerSpec struct {
// ScaleTargetRef points to the target resource to scale, and is used to the pods for which metrics
// scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics
// should be collected, as well as to actually change the replica count.
ScaleTargetRef CrossVersionObjectReference
// minReplicas is the lower limit for the number of replicas to which the autoscaler
// can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the
// alpha feature gate HPAScaleToZero is enabled and at least one Object or External
@ -83,10 +86,12 @@ type HorizontalPodAutoscalerSpec struct {
// available.
// +optional
MinReplicas *int32
// MaxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
// maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
// It cannot be less that minReplicas.
MaxReplicas int32
// Metrics contains the specifications for which to use to calculate the
// metrics contains the specifications for which to use to calculate the
// desired replica count (the maximum replica count across all metrics will
// be used). The desired replica count is calculated multiplying the
// ratio between the target value and the current value by the current
@ -487,7 +492,7 @@ type PodsMetricStatus struct {
// Kubernetes, and have special scaling options on top of those available to
// normal per-pod metrics using the "pods" source.
type ResourceMetricStatus struct {
// Name is the name of the resource in question.
// name is the name of the resource in question.
Name api.ResourceName
Current MetricValueStatus
}
@ -498,7 +503,7 @@ type ResourceMetricStatus struct {
// Kubernetes, and have special scaling options on top of those available to
// normal per-pod metrics using the "pods" source.
type ContainerResourceMetricStatus struct {
// Name is the name of the resource in question.
// name is the name of the resource in question.
Name api.ResourceName
Container string
Current MetricValueStatus
@ -530,12 +535,12 @@ type HorizontalPodAutoscaler struct {
// +optional
metav1.ObjectMeta
// Spec is the specification for the behaviour of the autoscaler.
// spec is the specification for the behaviour of the autoscaler.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
// +optional
Spec HorizontalPodAutoscalerSpec
// Status is the current information about the autoscaler.
// status is the current information about the autoscaler.
// +optional
Status HorizontalPodAutoscalerStatus
}
@ -549,6 +554,6 @@ type HorizontalPodAutoscalerList struct {
// +optional
metav1.ListMeta
// Items is the list of horizontal pod autoscaler objects.
// items is the list of horizontal pod autoscaler objects.
Items []HorizontalPodAutoscaler
}

View File

@ -49,7 +49,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Job{},
&JobList{},
&JobTemplate{},
&CronJob{},
&CronJobList{},
)

View File

@ -22,16 +22,29 @@ import (
api "k8s.io/kubernetes/pkg/apis/core"
)
// JobTrackingFinalizer is a finalizer for Job's pods. It prevents them from
// being deleted before being accounted in the Job status.
//
// Additionally, the apiserver and job controller use this string as a Job
// annotation, to mark Jobs that are being tracked using pod finalizers.
// However, this behavior is deprecated in kubernetes 1.26. This means that, in
// 1.27+, one release after JobTrackingWithFinalizers graduates to GA, the
// apiserver and job controller will ignore this annotation and they will
// always track jobs using finalizers.
const JobTrackingFinalizer = "batch.kubernetes.io/job-tracking"
const (
// Unprefixed labels are reserved for end-users
// so we will add a batch.kubernetes.io to designate these labels as official Kubernetes labels.
// See https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#label-selector-and-annotation-conventions
labelPrefix = "batch.kubernetes.io/"
// JobTrackingFinalizer is a finalizer for Job's pods. It prevents them from
// being deleted before being accounted in the Job status.
//
// Additionally, the apiserver and job controller use this string as a Job
// annotation, to mark Jobs that are being tracked using pod finalizers.
// However, this behavior is deprecated in kubernetes 1.26. This means that, in
// 1.27+, one release after JobTrackingWithFinalizers graduates to GA, the
// apiserver and job controller will ignore this annotation and they will
// always track jobs using finalizers.
JobTrackingFinalizer = labelPrefix + "job-tracking"
// LegacyJobName and LegacyControllerUid are legacy labels that were set using unprefixed labels.
LegacyJobNameLabel = "job-name"
LegacyControllerUidLabel = "controller-uid"
// JobName is a user friendly way to refer to jobs and is set in the labels for jobs.
JobNameLabel = labelPrefix + LegacyJobNameLabel
// Controller UID is used for selectors and labels for jobs
ControllerUidLabel = labelPrefix + LegacyControllerUidLabel
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@ -68,22 +81,6 @@ type JobList struct {
Items []Job
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// JobTemplate describes a template for creating copies of a predefined pod.
type JobTemplate struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta
// Defines jobs that will be created from this template.
// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Template JobTemplateSpec
}
// JobTemplateSpec describes the data a Job should have when created from a template
type JobTemplateSpec struct {
// Standard object's metadata of the jobs created from this template.
@ -158,6 +155,7 @@ type PodFailurePolicyOnExitCodesRequirement struct {
// Represents the relationship between the container exit code(s) and the
// specified values. Containers completed with success (exit code 0) are
// excluded from the requirement check. Possible values are:
//
// - In: the requirement is satisfied if at least one container exit code
// (might be multiple if there are multiple containers not restricted
// by the 'containerName' field) is in the set of specified values.
@ -194,6 +192,7 @@ type PodFailurePolicyOnPodConditionsPattern struct {
type PodFailurePolicyRule struct {
// Specifies the action taken on a pod failure when the requirements are satisfied.
// Possible values are:
//
// - FailJob: indicates that the pod's job is marked as Failed and all
// running pods are terminated.
// - Ignore: indicates that the counter towards the .backoffLimit is not
@ -237,7 +236,7 @@ type JobSpec struct {
Parallelism *int32
// Specifies the desired number of successfully finished pods the
// job should be run with. Setting to nil means that the success of any
// job should be run with. Setting to null means that the success of any
// pod signals the success of all pods, and allows parallelism to have any positive
// value. Setting to 1 means that parallelism is limited to 1 and the success of that
// pod signals the success of the job.
@ -293,6 +292,7 @@ type JobSpec struct {
ManualSelector *bool
// Describes the pod that will be created when executing a job.
// The only allowed template.spec.restartPolicy values are "Never" or "OnFailure".
Template api.PodTemplateSpec
// ttlSecondsAfterFinished limits the lifetime of a Job that has finished
@ -305,7 +305,7 @@ type JobSpec struct {
// +optional
TTLSecondsAfterFinished *int32
// CompletionMode specifies how Pod completions are tracked. It can be
// completionMode specifies how Pod completions are tracked. It can be
// `NonIndexed` (default) or `Indexed`.
//
// `NonIndexed` means that the Job is considered complete when there have
@ -330,7 +330,7 @@ type JobSpec struct {
// +optional
CompletionMode *CompletionMode
// Suspend specifies whether the Job controller should create Pods or not. If
// suspend specifies whether the Job controller should create Pods or not. If
// a Job is created with suspend set to true, no Pods are created by the Job
// controller. If a Job is suspended after creation (i.e. the flag goes from
// false to true), the Job controller will delete all active Pods associated
@ -387,7 +387,7 @@ type JobStatus struct {
// +optional
Failed int32
// CompletedIndexes holds the completed indexes when .spec.completionMode =
// completedIndexes holds the completed indexes when .spec.completionMode =
// "Indexed" in a text format. The indexes are represented as decimal integers
// separated by commas. The numbers are listed in increasing order. Three or
// more consecutive numbers are compressed and represented by the first and
@ -397,15 +397,16 @@ type JobStatus struct {
// +optional
CompletedIndexes string
// UncountedTerminatedPods holds the UIDs of Pods that have terminated but
// uncountedTerminatedPods holds the UIDs of Pods that have terminated but
// the job controller hasn't yet accounted for in the status counters.
//
// The job controller creates pods with a finalizer. When a pod terminates
// (succeeded or failed), the controller does three steps to account for it
// in the job status:
// (1) Add the pod UID to the corresponding array in this field.
// (2) Remove the pod finalizer.
// (3) Remove the pod UID from the array while increasing the corresponding
//
// 1. Add the pod UID to the corresponding array in this field.
// 2. Remove the pod finalizer.
// 3. Remove the pod UID from the array while increasing the corresponding
// counter.
//
// Old jobs might not be tracked using this field, in which case the field
@ -417,12 +418,12 @@ type JobStatus struct {
// UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't
// been accounted in Job status counters.
type UncountedTerminatedPods struct {
// Succeeded holds UIDs of succeeded Pods.
// succeeded holds UIDs of succeeded Pods.
// +listType=set
// +optional
Succeeded []types.UID
// Failed holds UIDs of failed Pods.
// failed holds UIDs of failed Pods.
// +listType=set
// +optional
Failed []types.UID
@ -513,7 +514,6 @@ type CronJobSpec struct {
// configuration, the controller will stop creating new new Jobs and will create a system event with the
// reason UnknownTimeZone.
// More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones
// This is beta field and must be enabled via the `CronJobTimeZone` feature gate.
// +optional
TimeZone *string
@ -524,6 +524,7 @@ type CronJobSpec struct {
// Specifies how to treat concurrent executions of a Job.
// Valid values are:
//
// - "Allow" (default): allows CronJobs to run concurrently;
// - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet;
// - "Replace": cancels currently running job and replaces it with a new one

View File

@ -347,33 +347,6 @@ func (in *JobStatus) DeepCopy() *JobStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JobTemplate) DeepCopyInto(out *JobTemplate) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Template.DeepCopyInto(&out.Template)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplate.
func (in *JobTemplate) DeepCopy() *JobTemplate {
if in == nil {
return nil
}
out := new(JobTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *JobTemplate) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JobTemplateSpec) DeepCopyInto(out *JobTemplateSpec) {
*out = *in

View File

@ -122,8 +122,24 @@ const (
// This annotation is beta-level and is only honored when PodDeletionCost feature is enabled.
PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost"
// AnnotationTopologyAwareHints can be used to enable or disable Topology
// Aware Hints for a Service. This may be set to "Auto" or "Disabled". Any
// other value is treated as "Disabled".
AnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints"
// DeprecatedAnnotationTopologyAwareHints can be used to enable or disable
// Topology Aware Hints for a Service. This may be set to "Auto" or
// "Disabled". Any other value is treated as "Disabled". This annotation has
// been deprecated in favor of the `service.kubernetes.io/topology-mode`
// annotation which also allows "Auto" and "Disabled", but is not limited to
// those (it's open ended to provide room for experimentation while we
// pursue configuration for topology via specification). When both
// `service.kubernetes.io/topology-aware-hints` and
// `service.kubernetes.io/topology-mode` annotations are set, the value of
// `service.kubernetes.io/topology-aware-hints` has precedence.
DeprecatedAnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints"
// AnnotationTopologyMode can be used to enable or disable Topology Aware
// Routing for a Service. Well known values are "Auto" and "Disabled".
// Implementations may choose to develop new topology approaches, exposing
// them with domain-prefixed values. For example, "example.com/lowest-rtt"
// could be a valid implementation-specific value for this annotation. These
// heuristics will often populate topology hints on EndpointSlices, but that
// is not a requirement.
AnnotationTopologyMode = "service.kubernetes.io/topology-mode"
)

View File

@ -0,0 +1,102 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// NOTE: DO NOT use those helper functions through client-go, the
// package path will be changed in the future.
package qos
import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/apis/core"
)
var supportedQoSComputeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory))
func isSupportedQoSComputeResource(name core.ResourceName) bool {
return supportedQoSComputeResources.Has(string(name))
}
// GetPodQOS returns the QoS class of a pod.
// A pod is besteffort if none of its containers have specified any requests or limits.
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
// A pod is burstable if limits and requests do not match across all containers.
// When this function is updated please also update staging/src/k8s.io/kubectl/pkg/util/qos/qos.go
func GetPodQOS(pod *core.Pod) core.PodQOSClass {
requests := core.ResourceList{}
limits := core.ResourceList{}
zeroQuantity := resource.MustParse("0")
isGuaranteed := true
// note, ephemeral containers are not considered for QoS as they cannot define resources
allContainers := []core.Container{}
allContainers = append(allContainers, pod.Spec.Containers...)
allContainers = append(allContainers, pod.Spec.InitContainers...)
for _, container := range allContainers {
// process requests
for name, quantity := range container.Resources.Requests {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
delta := quantity.DeepCopy()
if _, exists := requests[name]; !exists {
requests[name] = delta
} else {
delta.Add(requests[name])
requests[name] = delta
}
}
}
// process limits
qosLimitsFound := sets.NewString()
for name, quantity := range container.Resources.Limits {
if !isSupportedQoSComputeResource(name) {
continue
}
if quantity.Cmp(zeroQuantity) == 1 {
qosLimitsFound.Insert(string(name))
delta := quantity.DeepCopy()
if _, exists := limits[name]; !exists {
limits[name] = delta
} else {
delta.Add(limits[name])
limits[name] = delta
}
}
}
if !qosLimitsFound.HasAll(string(core.ResourceMemory), string(core.ResourceCPU)) {
isGuaranteed = false
}
}
if len(requests) == 0 && len(limits) == 0 {
return core.PodQOSBestEffort
}
// Check is requests match limits for all resources.
if isGuaranteed {
for name, req := range requests {
if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 {
isGuaranteed = false
break
}
}
}
if isGuaranteed &&
len(requests) == len(limits) {
return core.PodQOSGuaranteed
}
return core.PodQOSBurstable
}

View File

@ -682,7 +682,7 @@ type EmptyDirVolumeSource struct {
// The maximum usage on memory medium EmptyDir would be the minimum value between
// the SizeLimit specified here and the sum of memory limits of all containers in a pod.
// The default is nil which means that the limit is undefined.
// More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
SizeLimit *resource.Quantity
}
@ -1741,7 +1741,6 @@ type CSIPersistentVolumeSource struct {
// ControllerExpandSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// ControllerExpandVolume call.
// This is an beta field and requires enabling ExpandCSIVolumes feature gate.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
@ -1750,9 +1749,10 @@ type CSIPersistentVolumeSource struct {
// NodeExpandSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// NodeExpandVolume call.
// This is an alpha field and requires enabling CSINodeExpandSecret feature gate.
// This is a beta field which is enabled default by CSINodeExpandSecret feature gate.
// This field is optional, may be omitted if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +featureGate=CSINodeExpandSecret
// +optional
NodeExpandSecretRef *SecretReference
}
@ -2037,7 +2037,8 @@ type SecretEnvSource struct {
// HTTPHeader describes a custom header to be used in HTTP probes
type HTTPHeader struct {
// The header field name
// The header field name.
// This will be canonicalized upon output, so case-variant names will be understood as the same header.
Name string
// The header field value
Value string
@ -2139,6 +2140,33 @@ const (
PullIfNotPresent PullPolicy = "IfNotPresent"
)
// ResourceResizeRestartPolicy specifies how to handle container resource resize.
type ResourceResizeRestartPolicy string
// These are the valid resource resize restart policy values:
const (
// 'NotRequired' means Kubernetes will try to resize the container
// without restarting it, if possible. Kubernetes may however choose to
// restart the container if it is unable to actuate resize without a
// restart. For e.g. the runtime doesn't support restart-free resizing.
NotRequired ResourceResizeRestartPolicy = "NotRequired"
// 'RestartContainer' means Kubernetes will resize the container in-place
// by stopping and starting the container when new resources are applied.
// This is needed for legacy applications. For e.g. java apps using the
// -xmxN flag which are unable to use resized memory without restarting.
RestartContainer ResourceResizeRestartPolicy = "RestartContainer"
)
// ContainerResizePolicy represents resource resize policy for the container.
type ContainerResizePolicy struct {
// Name of the resource to which this resource resize policy applies.
// Supported values: cpu, memory.
ResourceName ResourceName
// Restart policy to apply when specified resource is resized.
// If not specified, it defaults to NotRequired.
RestartPolicy ResourceResizeRestartPolicy
}
// PreemptionPolicy describes a policy for if/when to preempt a pod.
type PreemptionPolicy string
@ -2191,7 +2219,7 @@ type ResourceRequirements struct {
// This is an alpha field and requires enabling the
// DynamicResourceAllocation feature gate.
//
// This field is immutable.
// This field is immutable. It can only be set for containers.
//
// +featureGate=DynamicResourceAllocation
// +optional
@ -2247,6 +2275,10 @@ type Container struct {
// Compute resource requirements.
// +optional
Resources ResourceRequirements
// Resources resize policy for the container.
// +featureGate=InPlacePodVerticalScaling
// +optional
ResizePolicy []ContainerResizePolicy
// +optional
VolumeMounts []VolumeMount
// volumeDevices is the list of block devices to be used by the container.
@ -2296,8 +2328,6 @@ type ProbeHandler struct {
TCPSocket *TCPSocketAction
// GRPC specifies an action involving a GRPC port.
// This is a beta field and requires enabling GRPCContainerProbe feature gate.
// +featureGate=GRPCContainerProbe
// +optional
GRPC *GRPCAction
}
@ -2413,24 +2443,68 @@ type ContainerState struct {
Terminated *ContainerStateTerminated
}
// ContainerStatus represents the status of a container
// ContainerStatus contains details for the current status of this container.
type ContainerStatus struct {
// Each container in a pod must have a unique name.
// Name is a DNS_LABEL representing the unique name of the container.
// Each container in a pod must have a unique name across all container types.
// Cannot be updated.
Name string
// State holds details about the container's current condition.
// +optional
State ContainerState
// LastTerminationState holds the last termination state of the container to
// help debug container crashes and restarts. This field is not
// populated if the container is still running and RestartCount is 0.
// +optional
LastTerminationState ContainerState
// Ready specifies whether the container has passed its readiness check.
// Ready specifies whether the container is currently passing its readiness check.
// The value will change as readiness probes keep executing. If no readiness
// probes are specified, this field defaults to true once the container is
// fully started (see Started field).
//
// The value is typically used to determine whether a container is ready to
// accept traffic.
Ready bool
// Note that this is calculated from dead containers. But those containers are subject to
// garbage collection. This value will get capped at 5 by GC.
// RestartCount holds the number of times the container has been restarted.
// Kubelet makes an effort to always increment the value, but there
// are cases when the state may be lost due to node restarts and then the value
// may be reset to 0. The value is never negative.
RestartCount int32
Image string
ImageID string
// Image is the name of container image that the container is running.
// The container image may not match the image used in the PodSpec,
// as it may have been resolved by the runtime.
// More info: https://kubernetes.io/docs/concepts/containers/images.
Image string
// ImageID is the image ID of the container's image. The image ID may not
// match the image ID of the image used in the PodSpec, as it may have been
// resolved by the runtime.
ImageID string
// ContainerID is the ID of the container in the format '<type>://<container_id>'.
// Where type is a container runtime identifier, returned from Version call of CRI API
// (for example "containerd").
// +optional
ContainerID string
Started *bool
// Started indicates whether the container has finished its postStart lifecycle hook
// and passed its startup probe.
// Initialized as false, becomes true after startupProbe is considered
// successful. Resets to false when the container is restarted, or if kubelet
// loses state temporarily. In both cases, startup probes will run again.
// Is always true when no startupProbe is defined and container is running and
// has passed the postStart lifecycle hook. The null value must be treated the
// same as false.
// +optional
Started *bool
// AllocatedResources represents the compute resources allocated for this container by the
// node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission
// and after successfully admitting desired pod resize.
// +featureGate=InPlacePodVerticalScaling
// +optional
AllocatedResources ResourceList
// Resources represents the compute resource requests and limits that have been successfully
// enacted on the running container after it has been started or has been successfully resized.
// +featureGate=InPlacePodVerticalScaling
// +optional
Resources *ResourceRequirements
}
// PodPhase is a label for the condition of a pod at the current time.
@ -2496,6 +2570,20 @@ type PodCondition struct {
Message string
}
// PodResizeStatus shows status of desired resize of a pod's containers.
type PodResizeStatus string
const (
// Pod resources resize has been requested and will be evaluated by node.
PodResizeStatusProposed PodResizeStatus = "Proposed"
// Pod resources resize has been accepted by node and is being actuated.
PodResizeStatusInProgress PodResizeStatus = "InProgress"
// Node cannot resize the pod at this time and will keep retrying.
PodResizeStatusDeferred PodResizeStatus = "Deferred"
// Requested pod resize is not feasible and will not be re-evaluated.
PodResizeStatusInfeasible PodResizeStatus = "Infeasible"
)
// RestartPolicy describes how the container should be restarted.
// Only one of the following restart policies may be specified.
// If none of the following policies is specified, the default one
@ -3045,9 +3133,14 @@ type PodSpec struct {
OS *PodOS
// SchedulingGates is an opaque list of values that if specified will block scheduling the pod.
// More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness.
// If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the
// scheduler will not attempt to schedule the pod.
//
// This is an alpha-level feature enabled by PodSchedulingReadiness feature gate.
// SchedulingGates can only be set at pod creation time, and be removed only afterwards.
//
// This is a beta feature enabled by the PodSchedulingReadiness feature gate.
//
// +featureGate=PodSchedulingReadiness
// +optional
SchedulingGates []PodSchedulingGate
// ResourceClaims defines which ResourceClaims must be allocated
@ -3408,6 +3501,10 @@ type EphemeralContainerCommon struct {
// already allocated to the pod.
// +optional
Resources ResourceRequirements
// Resources resize policy for the container.
// +featureGate=InPlacePodVerticalScaling
// +optional
ResizePolicy []ContainerResizePolicy
// Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
// +optional
VolumeMounts []VolumeMount
@ -3524,6 +3621,13 @@ type PodStatus struct {
// Status for any ephemeral containers that have run in this pod.
// +optional
EphemeralContainerStatuses []ContainerStatus
// Status of resources resize desired for pod's containers.
// It is empty if no resources resize is pending.
// Any changes to container resources will automatically set this to "Proposed"
// +featureGate=InPlacePodVerticalScaling
// +optional
Resize PodResizeStatus
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@ -3617,6 +3721,7 @@ type ReplicationControllerSpec struct {
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected. Internally, this takes precedence over a
// TemplateRef.
// The only allowed template.spec.restartPolicy value is "Always".
// +optional
Template *PodTemplateSpec
}
@ -3781,33 +3886,33 @@ const (
ServiceTypeExternalName ServiceType = "ExternalName"
)
// ServiceInternalTrafficPolicyType describes the endpoint-selection policy for
// ServiceInternalTrafficPolicy describes the endpoint-selection policy for
// traffic sent to the ClusterIP.
type ServiceInternalTrafficPolicyType string
type ServiceInternalTrafficPolicy string
const (
// ServiceInternalTrafficPolicyCluster routes traffic to all endpoints.
ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicyType = "Cluster"
ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicy = "Cluster"
// ServiceInternalTrafficPolicyLocal routes traffic only to endpoints on the same
// node as the traffic was received on (dropping the traffic if there are no
// local endpoints).
ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicyType = "Local"
ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicy = "Local"
)
// ServiceExternalTrafficPolicyType describes the endpoint-selection policy for
// ServiceExternalTrafficPolicy describes the endpoint-selection policy for
// traffic to external service entrypoints (NodePorts, ExternalIPs, and
// LoadBalancer IPs).
type ServiceExternalTrafficPolicyType string
type ServiceExternalTrafficPolicy string
const (
// ServiceExternalTrafficPolicyTypeCluster routes traffic to all endpoints.
ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster"
// ServiceExternalTrafficPolicyCluster routes traffic to all endpoints.
ServiceExternalTrafficPolicyCluster ServiceExternalTrafficPolicy = "Cluster"
// ServiceExternalTrafficPolicyTypeLocal preserves the source IP of the traffic by
// ServiceExternalTrafficPolicyLocal preserves the source IP of the traffic by
// routing only to endpoints on the same node as the traffic was received on
// (dropping the traffic if there are no local endpoints).
ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local"
ServiceExternalTrafficPolicyLocal ServiceExternalTrafficPolicy = "Local"
)
// These are the valid conditions of a service.
@ -4013,7 +4118,7 @@ type ServiceSpec struct {
// a NodePort from within the cluster may need to take traffic policy into account
// when picking a node.
// +optional
ExternalTrafficPolicy ServiceExternalTrafficPolicyType
ExternalTrafficPolicy ServiceExternalTrafficPolicy
// healthCheckNodePort specifies the healthcheck nodePort for the service.
// If not specified, HealthCheckNodePort is created by the service api
@ -4064,7 +4169,7 @@ type ServiceSpec struct {
// "Cluster", uses the standard behavior of routing to all endpoints evenly
// (possibly modified by topology and other features).
// +optional
InternalTrafficPolicy *ServiceInternalTrafficPolicyType
InternalTrafficPolicy *ServiceInternalTrafficPolicy
}
// ServicePort represents the port on which the service is exposed
@ -4079,10 +4184,17 @@ type ServicePort struct {
Protocol Protocol
// The application protocol for this port.
// This is used as a hint for implementations to offer richer behavior for protocols that they understand.
// This field follows standard Kubernetes label syntax.
// Un-prefixed names are reserved for IANA standard service names (as per
// Valid values are either:
//
// * Un-prefixed protocol names - reserved for IANA standard service names (as per
// RFC-6335 and https://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names such as
//
// * Kubernetes-defined prefixed names:
// * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
//
// * Other protocols should use implementation-defined prefixed names such as
// mycompany.com/my-custom-protocol.
// +optional
AppProtocol *string
@ -4208,9 +4320,8 @@ type EndpointSubset struct {
// EndpointAddress is a tuple that describes single IP address.
type EndpointAddress struct {
// The IP of this endpoint.
// IPv6 is also accepted but not fully supported on all platforms. Also, certain
// kubernetes components, like kube-proxy, are not IPv6 ready.
// TODO: This should allow hostname or IP, see #4447.
// May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10),
// or link-local multicast (224.0.0.0/24 or ff02::/16).
IP string
// Optional: Hostname of this endpoint
// Meant to be used by DNS servers etc.
@ -5870,8 +5981,12 @@ type TopologySpreadConstraint struct {
// spreading will be calculated. The keys are used to lookup values from the
// incoming pod labels, those key-value labels are ANDed with labelSelector
// to select the group of existing pods over which spreading will be calculated
// for the incoming pod. Keys that don't exist in the incoming pod labels will
// for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
// MatchLabelKeys cannot be set when LabelSelector isn't set.
// Keys that don't exist in the incoming pod labels will
// be ignored. A null or empty list means only match against labelSelector.
//
// This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
// +listType=atomic
// +optional
MatchLabelKeys []string

View File

@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/core"
utilpointer "k8s.io/utils/pointer"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
@ -372,6 +373,11 @@ func Convert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) e
// drop init container annotations so they don't show up as differences when receiving requests from old clients
out.Annotations = dropInitContainerAnnotations(out.Annotations)
// Forcing the value of TerminationGracePeriodSeconds to 1 if it is negative.
// Just for Pod, not for PodSpec, because we don't want to change the behavior of the PodTemplate.
if in.Spec.TerminationGracePeriodSeconds != nil && *in.Spec.TerminationGracePeriodSeconds < 0 {
out.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(1)
}
return nil
}
@ -384,6 +390,11 @@ func Convert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) e
// remove this once the oldest supported kubelet no longer honors the annotations over the field.
out.Annotations = dropInitContainerAnnotations(out.Annotations)
// Forcing the value of TerminationGracePeriodSeconds to 1 if it is negative.
// Just for Pod, not for PodSpec, because we don't want to change the behavior of the PodTemplate.
if in.Spec.TerminationGracePeriodSeconds != nil && *in.Spec.TerminationGracePeriodSeconds < 0 {
out.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(1)
}
return nil
}

View File

@ -22,6 +22,8 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/parsers"
"k8s.io/utils/pointer"
)
@ -125,7 +127,7 @@ func SetDefaults_Service(obj *v1.Service) {
if (obj.Spec.Type == v1.ServiceTypeNodePort ||
obj.Spec.Type == v1.ServiceTypeLoadBalancer) &&
obj.Spec.ExternalTrafficPolicy == "" {
obj.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
obj.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyCluster
}
if obj.Spec.InternalTrafficPolicy == nil {
@ -137,7 +139,7 @@ func SetDefaults_Service(obj *v1.Service) {
if obj.Spec.Type == v1.ServiceTypeLoadBalancer {
if obj.Spec.AllocateLoadBalancerNodePorts == nil {
obj.Spec.AllocateLoadBalancerNodePorts = pointer.BoolPtr(true)
obj.Spec.AllocateLoadBalancerNodePorts = pointer.Bool(true)
}
}
}
@ -157,6 +159,29 @@ func SetDefaults_Pod(obj *v1.Pod) {
}
}
}
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) &&
obj.Spec.Containers[i].Resources.Requests != nil {
// For normal containers, set resize restart policy to default value (NotRequired), if not specified.
resizePolicySpecified := make(map[v1.ResourceName]bool)
for _, p := range obj.Spec.Containers[i].ResizePolicy {
resizePolicySpecified[p.ResourceName] = true
}
setDefaultResizePolicy := func(resourceName v1.ResourceName) {
if _, found := resizePolicySpecified[resourceName]; !found {
obj.Spec.Containers[i].ResizePolicy = append(obj.Spec.Containers[i].ResizePolicy,
v1.ContainerResizePolicy{
ResourceName: resourceName,
RestartPolicy: v1.NotRequired,
})
}
}
if _, exists := obj.Spec.Containers[i].Resources.Requests[v1.ResourceCPU]; exists {
setDefaultResizePolicy(v1.ResourceCPU)
}
if _, exists := obj.Spec.Containers[i].Resources.Requests[v1.ResourceMemory]; exists {
setDefaultResizePolicy(v1.ResourceMemory)
}
}
}
for i := range obj.Spec.InitContainers {
if obj.Spec.InitContainers[i].Resources.Limits != nil {

View File

@ -370,62 +370,3 @@ func ScopedResourceSelectorRequirementsAsSelector(ssr v1.ScopedResourceSelectorR
selector = selector.Add(*r)
return selector, nil
}
// nodeSelectorRequirementsAsLabelRequirements converts the NodeSelectorRequirement
// type to a labels.Requirement type.
func nodeSelectorRequirementsAsLabelRequirements(nsr v1.NodeSelectorRequirement) (*labels.Requirement, error) {
var op selection.Operator
switch nsr.Operator {
case v1.NodeSelectorOpIn:
op = selection.In
case v1.NodeSelectorOpNotIn:
op = selection.NotIn
case v1.NodeSelectorOpExists:
op = selection.Exists
case v1.NodeSelectorOpDoesNotExist:
op = selection.DoesNotExist
case v1.NodeSelectorOpGt:
op = selection.GreaterThan
case v1.NodeSelectorOpLt:
op = selection.LessThan
default:
return nil, fmt.Errorf("%q is not a valid node selector operator", nsr.Operator)
}
return labels.NewRequirement(nsr.Key, op, nsr.Values)
}
// NodeSelectorAsSelector converts the NodeSelector api type into a struct that
// implements labels.Selector
// Note: This function should be kept in sync with the selector methods in
// pkg/labels/selector.go
func NodeSelectorAsSelector(ns *v1.NodeSelector) (labels.Selector, error) {
if ns == nil {
return labels.Nothing(), nil
}
if len(ns.NodeSelectorTerms) == 0 {
return labels.Everything(), nil
}
var requirements []labels.Requirement
for _, nsTerm := range ns.NodeSelectorTerms {
for _, expr := range nsTerm.MatchExpressions {
req, err := nodeSelectorRequirementsAsLabelRequirements(expr)
if err != nil {
return nil, err
}
requirements = append(requirements, *req)
}
for _, field := range nsTerm.MatchFields {
req, err := nodeSelectorRequirementsAsLabelRequirements(field)
if err != nil {
return nil, err
}
requirements = append(requirements, *req)
}
}
selector := labels.NewSelector()
selector = selector.Add(requirements...)
return selector, nil
}

View File

@ -342,6 +342,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ContainerResizePolicy)(nil), (*core.ContainerResizePolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(a.(*v1.ContainerResizePolicy), b.(*core.ContainerResizePolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*core.ContainerResizePolicy)(nil), (*v1.ContainerResizePolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(a.(*core.ContainerResizePolicy), b.(*v1.ContainerResizePolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.ContainerState)(nil), (*core.ContainerState)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ContainerState_To_core_ContainerState(a.(*v1.ContainerState), b.(*core.ContainerState), scope)
}); err != nil {
@ -2975,6 +2985,7 @@ func autoConvert_v1_Container_To_core_Container(in *v1.Container, out *core.Cont
if err := Convert_v1_ResourceRequirements_To_core_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.ResizePolicy = *(*[]core.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy))
out.VolumeMounts = *(*[]core.VolumeMount)(unsafe.Pointer(&in.VolumeMounts))
out.VolumeDevices = *(*[]core.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices))
out.LivenessProbe = (*core.Probe)(unsafe.Pointer(in.LivenessProbe))
@ -3008,6 +3019,7 @@ func autoConvert_core_Container_To_v1_Container(in *core.Container, out *v1.Cont
if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.ResizePolicy = *(*[]v1.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy))
out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts))
out.VolumeDevices = *(*[]v1.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices))
out.LivenessProbe = (*v1.Probe)(unsafe.Pointer(in.LivenessProbe))
@ -3079,6 +3091,28 @@ func Convert_core_ContainerPort_To_v1_ContainerPort(in *core.ContainerPort, out
return autoConvert_core_ContainerPort_To_v1_ContainerPort(in, out, s)
}
func autoConvert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(in *v1.ContainerResizePolicy, out *core.ContainerResizePolicy, s conversion.Scope) error {
out.ResourceName = core.ResourceName(in.ResourceName)
out.RestartPolicy = core.ResourceResizeRestartPolicy(in.RestartPolicy)
return nil
}
// Convert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy is an autogenerated conversion function.
func Convert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(in *v1.ContainerResizePolicy, out *core.ContainerResizePolicy, s conversion.Scope) error {
return autoConvert_v1_ContainerResizePolicy_To_core_ContainerResizePolicy(in, out, s)
}
func autoConvert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(in *core.ContainerResizePolicy, out *v1.ContainerResizePolicy, s conversion.Scope) error {
out.ResourceName = v1.ResourceName(in.ResourceName)
out.RestartPolicy = v1.ResourceResizeRestartPolicy(in.RestartPolicy)
return nil
}
// Convert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy is an autogenerated conversion function.
func Convert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(in *core.ContainerResizePolicy, out *v1.ContainerResizePolicy, s conversion.Scope) error {
return autoConvert_core_ContainerResizePolicy_To_v1_ContainerResizePolicy(in, out, s)
}
func autoConvert_v1_ContainerState_To_core_ContainerState(in *v1.ContainerState, out *core.ContainerState, s conversion.Scope) error {
out.Waiting = (*core.ContainerStateWaiting)(unsafe.Pointer(in.Waiting))
out.Running = (*core.ContainerStateRunning)(unsafe.Pointer(in.Running))
@ -3191,6 +3225,8 @@ func autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in *v1.ContainerStat
out.ImageID = in.ImageID
out.ContainerID = in.ContainerID
out.Started = (*bool)(unsafe.Pointer(in.Started))
out.AllocatedResources = *(*core.ResourceList)(unsafe.Pointer(&in.AllocatedResources))
out.Resources = (*core.ResourceRequirements)(unsafe.Pointer(in.Resources))
return nil
}
@ -3213,6 +3249,8 @@ func autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerSt
out.ImageID = in.ImageID
out.ContainerID = in.ContainerID
out.Started = (*bool)(unsafe.Pointer(in.Started))
out.AllocatedResources = *(*v1.ResourceList)(unsafe.Pointer(&in.AllocatedResources))
out.Resources = (*v1.ResourceRequirements)(unsafe.Pointer(in.Resources))
return nil
}
@ -3563,6 +3601,7 @@ func autoConvert_v1_EphemeralContainerCommon_To_core_EphemeralContainerCommon(in
if err := Convert_v1_ResourceRequirements_To_core_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.ResizePolicy = *(*[]core.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy))
out.VolumeMounts = *(*[]core.VolumeMount)(unsafe.Pointer(&in.VolumeMounts))
out.VolumeDevices = *(*[]core.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices))
out.LivenessProbe = (*core.Probe)(unsafe.Pointer(in.LivenessProbe))
@ -3596,6 +3635,7 @@ func autoConvert_core_EphemeralContainerCommon_To_v1_EphemeralContainerCommon(in
if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil {
return err
}
out.ResizePolicy = *(*[]v1.ContainerResizePolicy)(unsafe.Pointer(&in.ResizePolicy))
out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts))
out.VolumeDevices = *(*[]v1.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices))
out.LivenessProbe = (*v1.Probe)(unsafe.Pointer(in.LivenessProbe))
@ -6380,6 +6420,7 @@ func autoConvert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodS
out.ContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses))
out.QOSClass = core.PodQOSClass(in.QOSClass)
out.EphemeralContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.EphemeralContainerStatuses))
out.Resize = core.PodResizeStatus(in.Resize)
return nil
}
@ -6396,6 +6437,7 @@ func autoConvert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodS
out.InitContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses))
out.ContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses))
out.EphemeralContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.EphemeralContainerStatuses))
out.Resize = v1.PodResizeStatus(in.Resize)
return nil
}
@ -7850,7 +7892,7 @@ func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *cor
out.LoadBalancerIP = in.LoadBalancerIP
out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges))
out.ExternalName = in.ExternalName
out.ExternalTrafficPolicy = core.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy)
out.ExternalTrafficPolicy = core.ServiceExternalTrafficPolicy(in.ExternalTrafficPolicy)
out.HealthCheckNodePort = in.HealthCheckNodePort
out.PublishNotReadyAddresses = in.PublishNotReadyAddresses
out.SessionAffinityConfig = (*core.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig))
@ -7858,7 +7900,7 @@ func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *cor
out.IPFamilyPolicy = (*core.IPFamilyPolicy)(unsafe.Pointer(in.IPFamilyPolicy))
out.AllocateLoadBalancerNodePorts = (*bool)(unsafe.Pointer(in.AllocateLoadBalancerNodePorts))
out.LoadBalancerClass = (*string)(unsafe.Pointer(in.LoadBalancerClass))
out.InternalTrafficPolicy = (*core.ServiceInternalTrafficPolicyType)(unsafe.Pointer(in.InternalTrafficPolicy))
out.InternalTrafficPolicy = (*core.ServiceInternalTrafficPolicy)(unsafe.Pointer(in.InternalTrafficPolicy))
return nil
}
@ -7881,12 +7923,12 @@ func autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *v
out.SessionAffinity = v1.ServiceAffinity(in.SessionAffinity)
out.SessionAffinityConfig = (*v1.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig))
out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges))
out.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy)
out.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicy(in.ExternalTrafficPolicy)
out.HealthCheckNodePort = in.HealthCheckNodePort
out.PublishNotReadyAddresses = in.PublishNotReadyAddresses
out.AllocateLoadBalancerNodePorts = (*bool)(unsafe.Pointer(in.AllocateLoadBalancerNodePorts))
out.LoadBalancerClass = (*string)(unsafe.Pointer(in.LoadBalancerClass))
out.InternalTrafficPolicy = (*v1.ServiceInternalTrafficPolicyType)(unsafe.Pointer(in.InternalTrafficPolicy))
out.InternalTrafficPolicy = (*v1.ServiceInternalTrafficPolicy)(unsafe.Pointer(in.InternalTrafficPolicy))
return nil
}

View File

@ -48,6 +48,7 @@ func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&v1.PersistentVolumeList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeList(obj.(*v1.PersistentVolumeList)) })
scheme.AddTypeDefaultingFunc(&v1.Pod{}, func(obj interface{}) { SetObjectDefaults_Pod(obj.(*v1.Pod)) })
scheme.AddTypeDefaultingFunc(&v1.PodList{}, func(obj interface{}) { SetObjectDefaults_PodList(obj.(*v1.PodList)) })
scheme.AddTypeDefaultingFunc(&v1.PodStatusResult{}, func(obj interface{}) { SetObjectDefaults_PodStatusResult(obj.(*v1.PodStatusResult)) })
scheme.AddTypeDefaultingFunc(&v1.PodTemplate{}, func(obj interface{}) { SetObjectDefaults_PodTemplate(obj.(*v1.PodTemplate)) })
scheme.AddTypeDefaultingFunc(&v1.PodTemplateList{}, func(obj interface{}) { SetObjectDefaults_PodTemplateList(obj.(*v1.PodTemplateList)) })
scheme.AddTypeDefaultingFunc(&v1.ReplicationController{}, func(obj interface{}) { SetObjectDefaults_ReplicationController(obj.(*v1.ReplicationController)) })
@ -438,6 +439,30 @@ func SetObjectDefaults_Pod(in *v1.Pod) {
}
}
SetDefaults_ResourceList(&in.Spec.Overhead)
for i := range in.Status.InitContainerStatuses {
a := &in.Status.InitContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
for i := range in.Status.ContainerStatuses {
a := &in.Status.ContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
for i := range in.Status.EphemeralContainerStatuses {
a := &in.Status.EphemeralContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
}
func SetObjectDefaults_PodList(in *v1.PodList) {
@ -447,6 +472,33 @@ func SetObjectDefaults_PodList(in *v1.PodList) {
}
}
func SetObjectDefaults_PodStatusResult(in *v1.PodStatusResult) {
for i := range in.Status.InitContainerStatuses {
a := &in.Status.InitContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
for i := range in.Status.ContainerStatuses {
a := &in.Status.ContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
for i := range in.Status.EphemeralContainerStatuses {
a := &in.Status.EphemeralContainerStatuses[i]
SetDefaults_ResourceList(&a.AllocatedResources)
if a.Resources != nil {
SetDefaults_ResourceList(&a.Resources.Limits)
SetDefaults_ResourceList(&a.Resources.Requests)
}
}
}
func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) {
SetDefaults_PodSpec(&in.Template.Spec)
for i := range in.Template.Spec.Volumes {

View File

@ -26,6 +26,7 @@ import (
"reflect"
"regexp"
"strings"
"sync"
"unicode"
"unicode/utf8"
@ -36,6 +37,7 @@ import (
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
@ -43,9 +45,11 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
utilfeature "k8s.io/apiserver/pkg/util/feature"
schedulinghelper "k8s.io/component-helpers/scheduling/corev1"
kubeletapis "k8s.io/kubelet/pkg/apis"
apiservice "k8s.io/kubernetes/pkg/api/service"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/helper"
"k8s.io/kubernetes/pkg/apis/core/helper/qos"
podshelper "k8s.io/kubernetes/pkg/apis/core/pods"
corev1 "k8s.io/kubernetes/pkg/apis/core/v1"
"k8s.io/kubernetes/pkg/capabilities"
@ -295,6 +299,14 @@ var ValidateClassName = apimachineryvalidation.NameIsDNSSubdomain
// class name is valid.
var ValidatePriorityClassName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateResourceClaimName can be used to check whether the given
// name for a ResourceClaim is valid.
var ValidateResourceClaimName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateResourceClaimTemplateName can be used to check whether the given
// name for a ResourceClaimTemplate is valid.
var ValidateResourceClaimTemplateName = apimachineryvalidation.NameIsDNSSubdomain
// ValidateRuntimeClassName can be used to check whether the given RuntimeClass name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
@ -1044,10 +1056,7 @@ func validateDownwardAPIVolumeFile(file *core.DownwardAPIVolumeFile, fldPath *fi
allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously"))
}
} else if file.ResourceFieldRef != nil {
localValidContainerResourceFieldPathPrefixes := validContainerResourceFieldPathPrefixes
if opts.AllowDownwardAPIHugePages {
localValidContainerResourceFieldPathPrefixes = validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages
}
localValidContainerResourceFieldPathPrefixes := validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages
allErrs = append(allErrs, validateContainerResourceFieldSelector(file.ResourceFieldRef, &validContainerResourceFieldPathExpressions, &localValidContainerResourceFieldPathPrefixes, fldPath.Child("resourceFieldRef"), true)...)
} else {
allErrs = append(allErrs, field.Required(fldPath, "one of fieldRef and resourceFieldRef is required"))
@ -1531,14 +1540,12 @@ func validateStorageOSPersistentVolumeSource(storageos *core.StorageOSPersistent
// validatePVSecretReference check whether provided SecretReference object is valid in terms of secret name and namespace.
func validatePVSecretReference(secretRef *core.SecretReference, allowDNSSubDomainSecretName bool, fldPath *field.Path) field.ErrorList {
func validatePVSecretReference(secretRef *core.SecretReference, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if len(secretRef.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
} else if allowDNSSubDomainSecretName {
allErrs = append(allErrs, ValidateDNS1123Subdomain(secretRef.Name, fldPath.Child("name"))...)
} else {
allErrs = append(allErrs, ValidateDNS1123Label(secretRef.Name, fldPath.Child("name"))...)
allErrs = append(allErrs, ValidateDNS1123Subdomain(secretRef.Name, fldPath.Child("name"))...)
}
if len(secretRef.Namespace) == 0 {
@ -1567,7 +1574,7 @@ func ValidateCSIDriverName(driverName string, fldPath *field.Path) field.ErrorLi
return allErrs
}
func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, allowDNSSubDomainSecretName bool, fldPath *field.Path) field.ErrorList {
func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, ValidateCSIDriverName(csi.Driver, fldPath.Child("driver"))...)
@ -1576,16 +1583,16 @@ func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, allo
allErrs = append(allErrs, field.Required(fldPath.Child("volumeHandle"), ""))
}
if csi.ControllerPublishSecretRef != nil {
allErrs = append(allErrs, validatePVSecretReference(csi.ControllerPublishSecretRef, allowDNSSubDomainSecretName, fldPath.Child("controllerPublishSecretRef"))...)
allErrs = append(allErrs, validatePVSecretReference(csi.ControllerPublishSecretRef, fldPath.Child("controllerPublishSecretRef"))...)
}
if csi.ControllerExpandSecretRef != nil {
allErrs = append(allErrs, validatePVSecretReference(csi.ControllerExpandSecretRef, allowDNSSubDomainSecretName, fldPath.Child("controllerExpandSecretRef"))...)
allErrs = append(allErrs, validatePVSecretReference(csi.ControllerExpandSecretRef, fldPath.Child("controllerExpandSecretRef"))...)
}
if csi.NodePublishSecretRef != nil {
allErrs = append(allErrs, validatePVSecretReference(csi.NodePublishSecretRef, allowDNSSubDomainSecretName, fldPath.Child("nodePublishSecretRef"))...)
allErrs = append(allErrs, validatePVSecretReference(csi.NodePublishSecretRef, fldPath.Child("nodePublishSecretRef"))...)
}
if csi.NodeExpandSecretRef != nil {
allErrs = append(allErrs, validatePVSecretReference(csi.NodeExpandSecretRef, allowDNSSubDomainSecretName, fldPath.Child("nodeExpandSecretRef"))...)
allErrs = append(allErrs, validatePVSecretReference(csi.NodeExpandSecretRef, fldPath.Child("nodeExpandSecretRef"))...)
}
return allErrs
}
@ -1647,8 +1654,6 @@ var allowedTemplateObjectMetaFields = map[string]bool{
type PersistentVolumeSpecValidationOptions struct {
// Allow spec to contain the "ReadWiteOncePod" access mode
AllowReadWriteOncePod bool
// Allow the secretRef Name field to be of DNSSubDomain Format
AllowDNSSubDomainSecretName bool
}
// ValidatePersistentVolumeName checks that a name is appropriate for a
@ -1663,8 +1668,7 @@ var supportedVolumeModes = sets.NewString(string(core.PersistentVolumeBlock), st
func ValidationOptionsForPersistentVolume(pv, oldPv *core.PersistentVolume) PersistentVolumeSpecValidationOptions {
opts := PersistentVolumeSpecValidationOptions{
AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
AllowDNSSubDomainSecretName: false,
AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
}
if oldPv == nil {
// If there's no old PV, use the options based solely on feature enablement
@ -1674,21 +1678,9 @@ func ValidationOptionsForPersistentVolume(pv, oldPv *core.PersistentVolume) Pers
// If the old object allowed "ReadWriteOncePod", continue to allow it in the new object
opts.AllowReadWriteOncePod = true
}
if oldCSI := oldPv.Spec.CSI; oldCSI != nil {
opts.AllowDNSSubDomainSecretName =
secretRefRequiresSubdomainSecretName(oldCSI.ControllerExpandSecretRef) ||
secretRefRequiresSubdomainSecretName(oldCSI.ControllerPublishSecretRef) ||
secretRefRequiresSubdomainSecretName(oldCSI.NodeStageSecretRef) ||
secretRefRequiresSubdomainSecretName(oldCSI.NodePublishSecretRef)
}
return opts
}
func secretRefRequiresSubdomainSecretName(secretRef *core.SecretReference) bool {
// ref and name were specified and name didn't fit within label validation
return secretRef != nil && len(secretRef.Name) > 0 && len(validation.IsDNS1123Label(secretRef.Name)) > 0
}
func ValidatePersistentVolumeSpec(pvSpec *core.PersistentVolumeSpec, pvName string, validateInlinePersistentVolumeSpec bool, fldPath *field.Path, opts PersistentVolumeSpecValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
@ -1943,7 +1935,7 @@ func ValidatePersistentVolumeSpec(pvSpec *core.PersistentVolumeSpec, pvName stri
allErrs = append(allErrs, field.Forbidden(fldPath.Child("csi"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateCSIPersistentVolumeSource(pvSpec.CSI, opts.AllowDNSSubDomainSecretName, fldPath.Child("csi"))...)
allErrs = append(allErrs, validateCSIPersistentVolumeSource(pvSpec.CSI, fldPath.Child("csi"))...)
}
}
@ -2007,7 +1999,7 @@ func ValidatePersistentVolumeUpdate(newPv, oldPv *core.PersistentVolume, opts Pe
// Allow setting NodeAffinity if oldPv NodeAffinity was not set
if oldPv.Spec.NodeAffinity != nil {
allErrs = append(allErrs, ValidateImmutableField(newPv.Spec.NodeAffinity, oldPv.Spec.NodeAffinity, field.NewPath("nodeAffinity"))...)
allErrs = append(allErrs, validatePvNodeAffinity(newPv.Spec.NodeAffinity, oldPv.Spec.NodeAffinity, field.NewPath("nodeAffinity"))...)
}
return allErrs
@ -2421,8 +2413,6 @@ var validEnvDownwardAPIFieldPathExpressions = sets.NewString(
var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "limits.ephemeral-storage", "requests.cpu", "requests.memory", "requests.ephemeral-storage")
// NOTE: this is only valid with DownwardAPIHugePages enabled
var validContainerResourceFieldPathPrefixes = sets.NewString()
var validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages = sets.NewString(hugepagesRequestsPrefixDownwardAPI, hugepagesLimitsPrefixDownwardAPI)
const hugepagesRequestsPrefixDownwardAPI string = `requests.hugepages-`
@ -2443,10 +2433,7 @@ func validateEnvVarValueFrom(ev core.EnvVar, fldPath *field.Path, opts PodValida
}
if ev.ValueFrom.ResourceFieldRef != nil {
numSources++
localValidContainerResourceFieldPathPrefixes := validContainerResourceFieldPathPrefixes
if opts.AllowDownwardAPIHugePages {
localValidContainerResourceFieldPathPrefixes = validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages
}
localValidContainerResourceFieldPathPrefixes := validContainerResourceFieldPathPrefixesWithDownwardAPIHugePages
allErrs = append(allErrs, validateContainerResourceFieldSelector(ev.ValueFrom.ResourceFieldRef, &validContainerResourceFieldPathExpressions, &localValidContainerResourceFieldPathPrefixes, fldPath.Child("resourceFieldRef"), false)...)
}
if ev.ValueFrom.ConfigMapKeyRef != nil {
@ -2768,11 +2755,11 @@ func ValidateVolumeDevices(devices []core.VolumeDevice, volmounts map[string]str
return allErrs
}
func validatePodResourceClaims(claims []core.PodResourceClaim, fldPath *field.Path) field.ErrorList {
func validatePodResourceClaims(podMeta *metav1.ObjectMeta, claims []core.PodResourceClaim, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
podClaimNames := sets.NewString()
for i, claim := range claims {
allErrs = append(allErrs, validatePodResourceClaim(claim, &podClaimNames, fldPath.Index(i))...)
allErrs = append(allErrs, validatePodResourceClaim(podMeta, claim, &podClaimNames, fldPath.Index(i))...)
}
return allErrs
}
@ -2790,14 +2777,22 @@ func gatherPodResourceClaimNames(claims []core.PodResourceClaim) sets.String {
return podClaimNames
}
func validatePodResourceClaim(claim core.PodResourceClaim, podClaimNames *sets.String, fldPath *field.Path) field.ErrorList {
func validatePodResourceClaim(podMeta *metav1.ObjectMeta, claim core.PodResourceClaim, podClaimNames *sets.String, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if claim.Name == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
} else if podClaimNames.Has(claim.Name) {
allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), claim.Name))
} else {
allErrs = append(allErrs, ValidateDNS1123Label(claim.Name, fldPath.Child("name"))...)
nameErrs := ValidateDNS1123Label(claim.Name, fldPath.Child("name"))
if len(nameErrs) > 0 {
allErrs = append(allErrs, nameErrs...)
} else if podMeta != nil && claim.Source.ResourceClaimTemplateName != nil {
claimName := podMeta.Name + "-" + claim.Name
for _, detail := range ValidateResourceClaimName(claimName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), claimName, "final ResourceClaim name: "+detail))
}
}
podClaimNames.Insert(claim.Name)
}
allErrs = append(allErrs, validatePodResourceClaimSource(claim.Source, fldPath.Child("source"))...)
@ -2813,6 +2808,16 @@ func validatePodResourceClaimSource(claimSource core.ClaimSource, fldPath *field
if claimSource.ResourceClaimName == nil && claimSource.ResourceClaimTemplateName == nil {
allErrs = append(allErrs, field.Invalid(fldPath, claimSource, "must specify one of: `resourceClaimName`, `resourceClaimTemplateName`"))
}
if claimSource.ResourceClaimName != nil {
for _, detail := range ValidateResourceClaimName(*claimSource.ResourceClaimName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceClaimName"), *claimSource.ResourceClaimName, detail))
}
}
if claimSource.ResourceClaimTemplateName != nil {
for _, detail := range ValidateResourceClaimTemplateName(*claimSource.ResourceClaimTemplateName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceClaimTemplateName"), *claimSource.ResourceClaimTemplateName, detail))
}
}
return allErrs
}
@ -3036,6 +3041,37 @@ func validatePullPolicy(policy core.PullPolicy, fldPath *field.Path) field.Error
return allErrors
}
var supportedResizeResources = sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory))
var supportedResizePolicies = sets.NewString(string(core.NotRequired), string(core.RestartContainer))
func validateResizePolicy(policyList []core.ContainerResizePolicy, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
// validate that resource name is not repeated, supported resource names and policy values are specified
resources := make(map[core.ResourceName]bool)
for i, p := range policyList {
if _, found := resources[p.ResourceName]; found {
allErrors = append(allErrors, field.Duplicate(fldPath.Index(i), p.ResourceName))
}
resources[p.ResourceName] = true
switch p.ResourceName {
case core.ResourceCPU, core.ResourceMemory:
case "":
allErrors = append(allErrors, field.Required(fldPath, ""))
default:
allErrors = append(allErrors, field.NotSupported(fldPath, p.ResourceName, supportedResizeResources.List()))
}
switch p.RestartPolicy {
case core.NotRequired, core.RestartContainer:
case "":
allErrors = append(allErrors, field.Required(fldPath, ""))
default:
allErrors = append(allErrors, field.NotSupported(fldPath, p.RestartPolicy, supportedResizePolicies.List()))
}
}
return allErrors
}
// validateEphemeralContainers is called by pod spec and template validation to validate the list of ephemeral containers.
// Note that this is called for pod template even though ephemeral containers aren't allowed in pod templates.
func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer, containers, initContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
@ -3158,6 +3194,9 @@ func validateInitContainers(containers []core.Container, regularContainers []cor
if ctr.StartupProbe != nil {
allErrs = append(allErrs, field.Forbidden(idxPath.Child("startupProbe"), "may not be set for init containers"))
}
if len(ctr.ResizePolicy) > 0 {
allErrs = append(allErrs, field.Invalid(idxPath.Child("resizePolicy"), ctr.ResizePolicy, "must not be set for init containers"))
}
}
return allErrs
@ -3203,6 +3242,7 @@ func validateContainerCommon(ctr *core.Container, volumes map[string]core.Volume
allErrs = append(allErrs, ValidateVolumeDevices(ctr.VolumeDevices, volMounts, volumes, path.Child("volumeDevices"))...)
allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, path.Child("imagePullPolicy"))...)
allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, podClaimNames, path.Child("resources"), opts)...)
allErrs = append(allErrs, validateResizePolicy(ctr.ResizePolicy, path.Child("resizePolicy"))...)
allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, path.Child("securityContext"))...)
return allErrs
}
@ -3382,9 +3422,7 @@ func validateSchedulingGates(schedulingGates []core.PodSchedulingGate, fldPath *
// There should be no duplicates in the list of scheduling gates.
seen := sets.String{}
for i, schedulingGate := range schedulingGates {
if schedulingGate.Name == "" {
allErrs = append(allErrs, field.Required(fldPath.Index(i), "must not be empty"))
}
allErrs = append(allErrs, ValidateQualifiedName(schedulingGate.Name, fldPath.Index(i))...)
if seen.Has(schedulingGate.Name) {
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), schedulingGate.Name))
}
@ -3644,8 +3682,6 @@ func validateContainerOnlyForPod(ctr *core.Container, path *field.Path) field.Er
// PodValidationOptions contains the different settings for pod validation
type PodValidationOptions struct {
// Allow pod spec to use hugepages in downward API
AllowDownwardAPIHugePages bool
// Allow invalid pod-deletion-cost annotation value for backward compatibility.
AllowInvalidPodDeletionCost bool
// Allow invalid label-value in LabelSelector
@ -3654,6 +3690,10 @@ type PodValidationOptions struct {
AllowIndivisibleHugePagesValues bool
// Allow more DNSSearchPaths and longer DNSSearchListChars
AllowExpandedDNSConfig bool
// Allow invalid topologySpreadConstraint labelSelector for backward compatibility
AllowInvalidTopologySpreadConstraintLabelSelector bool
// Allow node selector additions for gated pods.
AllowMutableNodeSelectorAndNodeAffinity bool
}
// validatePodMetadataAndSpec tests if required fields in the pod.metadata and pod.spec are set,
@ -3746,7 +3786,7 @@ func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *fi
vols, vErrs := ValidateVolumes(spec.Volumes, podMeta, fldPath.Child("volumes"), opts)
allErrs = append(allErrs, vErrs...)
podClaimNames := gatherPodResourceClaimNames(spec.ResourceClaims)
allErrs = append(allErrs, validatePodResourceClaims(spec.ResourceClaims, fldPath.Child("resourceClaims"))...)
allErrs = append(allErrs, validatePodResourceClaims(podMeta, spec.ResourceClaims, fldPath.Child("resourceClaims"))...)
allErrs = append(allErrs, validateContainers(spec.Containers, vols, podClaimNames, fldPath.Child("containers"), opts)...)
allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, podClaimNames, fldPath.Child("initContainers"), opts)...)
allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, podClaimNames, fldPath.Child("ephemeralContainers"), opts)...)
@ -3759,7 +3799,7 @@ func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *fi
allErrs = append(allErrs, validatePodDNSConfig(spec.DNSConfig, &spec.DNSPolicy, fldPath.Child("dnsConfig"), opts)...)
allErrs = append(allErrs, validateReadinessGates(spec.ReadinessGates, fldPath.Child("readinessGates"))...)
allErrs = append(allErrs, validateSchedulingGates(spec.SchedulingGates, fldPath.Child("schedulingGates"))...)
allErrs = append(allErrs, validateTopologySpreadConstraints(spec.TopologySpreadConstraints, fldPath.Child("topologySpreadConstraints"))...)
allErrs = append(allErrs, validateTopologySpreadConstraints(spec.TopologySpreadConstraints, fldPath.Child("topologySpreadConstraints"), opts)...)
allErrs = append(allErrs, validateWindowsHostProcessPod(spec, fldPath)...)
allErrs = append(allErrs, validateHostUsers(spec, fldPath)...)
if len(spec.ServiceAccountName) > 0 {
@ -4506,6 +4546,24 @@ func validateSeccompAnnotationsAndFieldsMatch(annotationValue string, seccompFie
return nil
}
var updatablePodSpecFields = []string{
"`spec.containers[*].image`",
"`spec.initContainers[*].image`",
"`spec.activeDeadlineSeconds`",
"`spec.tolerations` (only additions to existing tolerations)",
"`spec.terminationGracePeriodSeconds` (allow it to be set to 1 if it was previously negative)",
"`spec.containers[*].resources` (for CPU/memory only)",
}
// TODO(vinaykul,InPlacePodVerticalScaling): Drop this var once InPlacePodVerticalScaling goes GA and featuregate is gone.
var updatablePodSpecFieldsNoResources = []string{
"`spec.containers[*].image`",
"`spec.initContainers[*].image`",
"`spec.activeDeadlineSeconds`",
"`spec.tolerations` (only additions to existing tolerations)",
"`spec.terminationGracePeriodSeconds` (allow it to be set to 1 if it was previously negative)",
}
// ValidatePodUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields
// that cannot be changed.
func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList {
@ -4565,12 +4623,56 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
return allErrs
}
//TODO(vinaykul,InPlacePodVerticalScaling): With KEP 2527, we can rely on persistence of PodStatus.QOSClass
// We can use PodStatus.QOSClass instead of GetPodQOS here, in kubelet, and elsewhere, as PodStatus.QOSClass
// does not change once it is bootstrapped in podCreate. This needs to be addressed before beta as a
// separate PR covering all uses of GetPodQOS. With that change, we can drop the below block.
// Ref: https://github.com/kubernetes/kubernetes/pull/102884#discussion_r1093790446
// Ref: https://github.com/kubernetes/kubernetes/pull/102884/#discussion_r663280487
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
// reject attempts to change pod qos
oldQoS := qos.GetPodQOS(oldPod)
newQoS := qos.GetPodQOS(newPod)
if newQoS != oldQoS {
allErrs = append(allErrs, field.Invalid(fldPath, newQoS, "Pod QoS is immutable"))
}
}
// handle updateable fields by munging those fields prior to deep equal comparison.
mungedPodSpec := *newPod.Spec.DeepCopy()
// munge spec.containers[*].image
var newContainers []core.Container
for ix, container := range mungedPodSpec.Containers {
container.Image = oldPod.Spec.Containers[ix].Image // +k8s:verify-mutation:reason=clone
// When the feature-gate is turned off, any new requests attempting to update CPU or memory
// resource values will result in validation failure.
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
// Resources are mutable for CPU & memory only
// - user can now modify Resources to express new desired Resources
mungeCpuMemResources := func(resourceList, oldResourceList core.ResourceList) core.ResourceList {
if oldResourceList == nil {
return nil
}
var mungedResourceList core.ResourceList
if resourceList == nil {
mungedResourceList = make(core.ResourceList)
} else {
mungedResourceList = resourceList.DeepCopy()
}
delete(mungedResourceList, core.ResourceCPU)
delete(mungedResourceList, core.ResourceMemory)
if cpu, found := oldResourceList[core.ResourceCPU]; found {
mungedResourceList[core.ResourceCPU] = cpu
}
if mem, found := oldResourceList[core.ResourceMemory]; found {
mungedResourceList[core.ResourceMemory] = mem
}
return mungedResourceList
}
lim := mungeCpuMemResources(container.Resources.Limits, oldPod.Spec.Containers[ix].Resources.Limits)
req := mungeCpuMemResources(container.Resources.Requests, oldPod.Spec.Containers[ix].Resources.Requests)
container.Resources = core.ResourceRequirements{Limits: lim, Requests: req}
}
newContainers = append(newContainers, container)
}
mungedPodSpec.Containers = newContainers
@ -4598,13 +4700,49 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
mungedPodSpec.TerminationGracePeriodSeconds = oldPod.Spec.TerminationGracePeriodSeconds // +k8s:verify-mutation:reason=clone
}
// Handle validations specific to gated pods.
podIsGated := len(oldPod.Spec.SchedulingGates) > 0
if opts.AllowMutableNodeSelectorAndNodeAffinity && podIsGated {
// Additions to spec.nodeSelector are allowed (no deletions or mutations) for gated pods.
if !apiequality.Semantic.DeepEqual(mungedPodSpec.NodeSelector, oldPod.Spec.NodeSelector) {
allErrs = append(allErrs, validateNodeSelectorMutation(specPath.Child("nodeSelector"), mungedPodSpec.NodeSelector, oldPod.Spec.NodeSelector)...)
mungedPodSpec.NodeSelector = oldPod.Spec.NodeSelector // +k8s:verify-mutation:reason=clone
}
// Validate node affinity mutations.
var oldNodeAffinity *core.NodeAffinity
if oldPod.Spec.Affinity != nil {
oldNodeAffinity = oldPod.Spec.Affinity.NodeAffinity // +k8s:verify-mutation:reason=clone
}
var mungedNodeAffinity *core.NodeAffinity
if mungedPodSpec.Affinity != nil {
mungedNodeAffinity = mungedPodSpec.Affinity.NodeAffinity // +k8s:verify-mutation:reason=clone
}
if !apiequality.Semantic.DeepEqual(oldNodeAffinity, mungedNodeAffinity) {
allErrs = append(allErrs, validateNodeAffinityMutation(specPath.Child("affinity").Child("nodeAffinity"), mungedNodeAffinity, oldNodeAffinity)...)
switch {
case mungedPodSpec.Affinity == nil && oldNodeAffinity == nil:
// already effectively nil, no change needed
case mungedPodSpec.Affinity == nil && oldNodeAffinity != nil:
mungedPodSpec.Affinity = &core.Affinity{NodeAffinity: oldNodeAffinity} // +k8s:verify-mutation:reason=clone
default:
mungedPodSpec.Affinity.NodeAffinity = oldNodeAffinity // +k8s:verify-mutation:reason=clone
}
}
}
if !apiequality.Semantic.DeepEqual(mungedPodSpec, oldPod.Spec) {
// This diff isn't perfect, but it's a helluva lot better an "I'm not going to tell you what the difference is".
// TODO: Pinpoint the specific field that causes the invalid error after we have strategic merge diff
specDiff := cmp.Diff(oldPod.Spec, mungedPodSpec)
allErrs = append(allErrs, field.Forbidden(specPath, fmt.Sprintf("pod updates may not change fields other than `spec.containers[*].image`, `spec.initContainers[*].image`, `spec.activeDeadlineSeconds`, `spec.tolerations` (only additions to existing tolerations) or `spec.terminationGracePeriodSeconds` (allow it to be set to 1 if it was previously negative)\n%v", specDiff)))
errs := field.Forbidden(specPath, fmt.Sprintf("pod updates may not change fields other than %s\n%v", strings.Join(updatablePodSpecFieldsNoResources, ","), specDiff))
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
errs = field.Forbidden(specPath, fmt.Sprintf("pod updates may not change fields other than %s\n%v", strings.Join(updatablePodSpecFields, ","), specDiff))
}
allErrs = append(allErrs, errs)
}
return allErrs
}
@ -4687,6 +4825,11 @@ func ValidatePodEphemeralContainersUpdate(newPod, oldPod *core.Pod, opts PodVali
allErrs = append(allErrs, validatePodMetadataAndSpec(newPod, opts)...)
allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"), opts)...)
// static pods don't support ephemeral containers #113935
if _, ok := oldPod.Annotations[core.MirrorPodAnnotationKey]; ok {
return field.ErrorList{field.Forbidden(field.NewPath(""), "static pods do not support ephemeral containers")}
}
// Part 2: Validate that the changes between oldPod.Spec.EphemeralContainers and
// newPod.Spec.EphemeralContainers are allowed.
//
@ -4743,14 +4886,23 @@ var supportedSessionAffinityType = sets.NewString(string(core.ServiceAffinityCli
var supportedServiceType = sets.NewString(string(core.ServiceTypeClusterIP), string(core.ServiceTypeNodePort),
string(core.ServiceTypeLoadBalancer), string(core.ServiceTypeExternalName))
var supportedServiceInternalTrafficPolicy = sets.NewString(string(core.ServiceInternalTrafficPolicyCluster), string(core.ServiceExternalTrafficPolicyTypeLocal))
var supportedServiceInternalTrafficPolicy = sets.NewString(string(core.ServiceInternalTrafficPolicyCluster), string(core.ServiceExternalTrafficPolicyLocal))
var supportedServiceIPFamily = sets.NewString(string(core.IPv4Protocol), string(core.IPv6Protocol))
var supportedServiceIPFamilyPolicy = sets.NewString(string(core.IPFamilyPolicySingleStack), string(core.IPFamilyPolicyPreferDualStack), string(core.IPFamilyPolicyRequireDualStack))
// ValidateService tests if required fields/annotations of a Service are valid.
func ValidateService(service *core.Service) field.ErrorList {
allErrs := ValidateObjectMeta(&service.ObjectMeta, true, ValidateServiceName, field.NewPath("metadata"))
metaPath := field.NewPath("metadata")
allErrs := ValidateObjectMeta(&service.ObjectMeta, true, ValidateServiceName, metaPath)
topologyHintsVal, topologyHintsSet := service.Annotations[core.DeprecatedAnnotationTopologyAwareHints]
topologyModeVal, topologyModeSet := service.Annotations[core.AnnotationTopologyMode]
if topologyModeSet && topologyHintsSet && topologyModeVal != topologyHintsVal {
message := fmt.Sprintf("must match annotations[%s] when both are specified", core.DeprecatedAnnotationTopologyAwareHints)
allErrs = append(allErrs, field.Invalid(metaPath.Child("annotations").Key(core.AnnotationTopologyMode), topologyModeVal, message))
}
specPath := field.NewPath("spec")
@ -4975,8 +5127,8 @@ func needsExternalTrafficPolicy(svc *core.Service) bool {
}
var validExternalTrafficPolicies = sets.NewString(
string(core.ServiceExternalTrafficPolicyTypeCluster),
string(core.ServiceExternalTrafficPolicyTypeLocal))
string(core.ServiceExternalTrafficPolicyCluster),
string(core.ServiceExternalTrafficPolicyLocal))
func validateServiceExternalTrafficPolicy(service *core.Service) field.ErrorList {
allErrs := field.ErrorList{}
@ -5811,7 +5963,6 @@ func ValidateSecret(secret *core.Secret) field.ErrorList {
if _, exists := secret.Data[core.TLSPrivateKeyKey]; !exists {
allErrs = append(allErrs, field.Required(dataPath.Key(core.TLSPrivateKeyKey), ""))
}
// TODO: Verify that the key matches the cert.
default:
// no-op
}
@ -5946,9 +6097,9 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, podCl
if exists {
// For non overcommitable resources, not only requests can't exceed limits, they also can't be lower, i.e. must be equal.
if quantity.Cmp(limitQuantity) != 0 && !helper.IsOvercommitAllowed(resourceName) {
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit", resourceName)))
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit of %s", resourceName, limitQuantity.String())))
} else if quantity.Cmp(limitQuantity) > 0 {
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit", resourceName)))
allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit of %s", resourceName, limitQuantity.String())))
}
} else if !helper.IsOvercommitAllowed(resourceName) {
allErrs = append(allErrs, field.Required(limPath, "Limit must be set for non overcommitable resources"))
@ -6741,7 +6892,7 @@ var (
)
// validateTopologySpreadConstraints validates given TopologySpreadConstraints.
func validateTopologySpreadConstraints(constraints []core.TopologySpreadConstraint, fldPath *field.Path) field.ErrorList {
func validateTopologySpreadConstraints(constraints []core.TopologySpreadConstraint, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
allErrs := field.ErrorList{}
for i, constraint := range constraints {
@ -6767,6 +6918,9 @@ func validateTopologySpreadConstraints(constraints []core.TopologySpreadConstrai
allErrs = append(allErrs, err)
}
allErrs = append(allErrs, validateMatchLabelKeys(subFldPath.Child("matchLabelKeys"), constraint.MatchLabelKeys, constraint.LabelSelector)...)
if !opts.AllowInvalidTopologySpreadConstraintLabelSelector {
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(constraint.LabelSelector, unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: false}, subFldPath.Child("labelSelector"))...)
}
}
return allErrs
@ -6846,7 +7000,9 @@ func validateMatchLabelKeys(fldPath *field.Path, matchLabelKeys []string, labelS
return nil
}
var allErrs field.ErrorList
labelSelectorKeys := sets.String{}
if labelSelector != nil {
for key := range labelSelector.MatchLabels {
labelSelectorKeys.Insert(key)
@ -6854,9 +7010,10 @@ func validateMatchLabelKeys(fldPath *field.Path, matchLabelKeys []string, labelS
for _, matchExpression := range labelSelector.MatchExpressions {
labelSelectorKeys.Insert(matchExpression.Key)
}
} else {
allErrs = append(allErrs, field.Forbidden(fldPath, "must not be specified when labelSelector is not set"))
}
allErrs := field.ErrorList{}
for i, key := range matchLabelKeys {
allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(key, fldPath.Index(i))...)
if labelSelectorKeys.Has(key) {
@ -7172,3 +7329,115 @@ func ValidatePodAffinityTermSelector(podAffinityTerm core.PodAffinityTerm, allow
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.NamespaceSelector, labelSelectorValidationOptions, fldPath.Child("namespaceSelector"))...)
return allErrs
}
var betaToGALabel = map[string]string{
v1.LabelFailureDomainBetaZone: v1.LabelTopologyZone,
v1.LabelFailureDomainBetaRegion: v1.LabelTopologyRegion,
kubeletapis.LabelOS: v1.LabelOSStable,
kubeletapis.LabelArch: v1.LabelArchStable,
v1.LabelInstanceType: v1.LabelInstanceTypeStable,
}
var (
maskNodeSelectorLabelChangeEqualities conversion.Equalities
initMaskNodeSelectorLabelChangeEqualities sync.Once
)
func getMaskNodeSelectorLabelChangeEqualities() conversion.Equalities {
initMaskNodeSelectorLabelChangeEqualities.Do(func() {
var eqs = apiequality.Semantic.Copy()
err := eqs.AddFunc(
func(newReq, oldReq core.NodeSelectorRequirement) bool {
// allow newReq to change to a GA key
if oldReq.Key != newReq.Key && betaToGALabel[oldReq.Key] == newReq.Key {
oldReq.Key = newReq.Key // +k8s:verify-mutation:reason=clone
}
return apiequality.Semantic.DeepEqual(newReq, oldReq)
},
)
if err != nil {
panic(fmt.Errorf("failed to instantiate semantic equalities: %w", err))
}
maskNodeSelectorLabelChangeEqualities = eqs
})
return maskNodeSelectorLabelChangeEqualities
}
func validatePvNodeAffinity(newPvNodeAffinity, oldPvNodeAffinity *core.VolumeNodeAffinity, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if !getMaskNodeSelectorLabelChangeEqualities().DeepEqual(newPvNodeAffinity, oldPvNodeAffinity) {
allErrs = append(allErrs, field.Invalid(fldPath, newPvNodeAffinity, fieldImmutableErrorMsg+", except for updating from beta label to GA"))
}
return allErrs
}
func validateNodeSelectorMutation(fldPath *field.Path, newNodeSelector, oldNodeSelector map[string]string) field.ErrorList {
var allErrs field.ErrorList
// Validate no existing node selectors were deleted or mutated.
for k, v1 := range oldNodeSelector {
if v2, ok := newNodeSelector[k]; !ok || v1 != v2 {
allErrs = append(allErrs, field.Invalid(fldPath, newNodeSelector, "only additions to spec.nodeSelector are allowed (no mutations or deletions)"))
return allErrs
}
}
return allErrs
}
func validateNodeAffinityMutation(nodeAffinityPath *field.Path, newNodeAffinity, oldNodeAffinity *core.NodeAffinity) field.ErrorList {
var allErrs field.ErrorList
// If old node affinity was nil, anything can be set.
if oldNodeAffinity == nil || oldNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
return allErrs
}
oldTerms := oldNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
var newTerms []core.NodeSelectorTerm
if newNodeAffinity != nil && newNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
newTerms = newNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
}
// If there are no old terms, we can set the new terms to anything.
// If there are old terms, we cannot add any new ones.
if len(oldTerms) > 0 && len(oldTerms) != len(newTerms) {
return append(allErrs, field.Invalid(nodeAffinityPath.Child("requiredDuringSchedulingIgnoredDuringExecution").Child("nodeSelectorTerms"), newTerms, "no additions/deletions to non-empty NodeSelectorTerms list are allowed"))
}
// For requiredDuringSchedulingIgnoredDuringExecution, if old NodeSelectorTerms
// was empty, anything can be set. If non-empty, only additions of NodeSelectorRequirements
// to matchExpressions or fieldExpressions are allowed.
for i := range oldTerms {
if !validateNodeSelectorTermHasOnlyAdditions(newTerms[i], oldTerms[i]) {
allErrs = append(allErrs, field.Invalid(nodeAffinityPath.Child("requiredDuringSchedulingIgnoredDuringExecution").Child("nodeSelectorTerms").Index(i), newTerms[i], "only additions are allowed (no mutations or deletions)"))
}
}
return allErrs
}
func validateNodeSelectorTermHasOnlyAdditions(newTerm, oldTerm core.NodeSelectorTerm) bool {
if len(oldTerm.MatchExpressions) == 0 && len(oldTerm.MatchFields) == 0 {
if len(newTerm.MatchExpressions) > 0 || len(newTerm.MatchFields) > 0 {
return false
}
}
// Validate MatchExpressions only has additions (no deletions or mutations)
if l := len(oldTerm.MatchExpressions); l > 0 {
if len(newTerm.MatchExpressions) < l {
return false
}
if !apiequality.Semantic.DeepEqual(newTerm.MatchExpressions[:l], oldTerm.MatchExpressions) {
return false
}
}
// Validate MatchFields only has additions (no deletions or mutations)
if l := len(oldTerm.MatchFields); l > 0 {
if len(newTerm.MatchFields) < l {
return false
}
if !apiequality.Semantic.DeepEqual(newTerm.MatchFields[:l], oldTerm.MatchFields) {
return false
}
}
return true
}

View File

@ -788,6 +788,11 @@ func (in *Container) DeepCopyInto(out *Container) {
}
}
in.Resources.DeepCopyInto(&out.Resources)
if in.ResizePolicy != nil {
in, out := &in.ResizePolicy, &out.ResizePolicy
*out = make([]ContainerResizePolicy, len(*in))
copy(*out, *in)
}
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]VolumeMount, len(*in))
@ -875,6 +880,22 @@ func (in *ContainerPort) DeepCopy() *ContainerPort {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerResizePolicy) DeepCopyInto(out *ContainerResizePolicy) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResizePolicy.
func (in *ContainerResizePolicy) DeepCopy() *ContainerResizePolicy {
if in == nil {
return nil
}
out := new(ContainerResizePolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerState) DeepCopyInto(out *ContainerState) {
*out = *in
@ -967,6 +988,18 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) {
*out = new(bool)
**out = **in
}
if in.AllocatedResources != nil {
in, out := &in.AllocatedResources, &out.AllocatedResources
*out = make(ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
*out = new(ResourceRequirements)
(*in).DeepCopyInto(*out)
}
return
}
@ -1382,6 +1415,11 @@ func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon)
}
}
in.Resources.DeepCopyInto(&out.Resources)
if in.ResizePolicy != nil {
in, out := &in.ResizePolicy, &out.ResizePolicy
*out = make([]ContainerResizePolicy, len(*in))
copy(*out, *in)
}
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]VolumeMount, len(*in))
@ -5502,7 +5540,7 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
}
if in.InternalTrafficPolicy != nil {
in, out := &in.InternalTrafficPolicy, &out.InternalTrafficPolicy
*out = new(ServiceInternalTrafficPolicyType)
*out = new(ServiceInternalTrafficPolicy)
**out = **in
}
return

View File

@ -22,7 +22,6 @@ import (
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/networking"
"k8s.io/kubernetes/pkg/apis/policy"
)
// GroupName is the group name use in this package
@ -60,8 +59,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&networking.IngressList{},
&apps.ReplicaSet{},
&apps.ReplicaSetList{},
&policy.PodSecurityPolicy{},
&policy.PodSecurityPolicyList{},
&autoscaling.Scale{},
&networking.NetworkPolicy{},
&networking.NetworkPolicyList{},

View File

@ -54,6 +54,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&IngressClassList{},
&ClusterCIDR{},
&ClusterCIDRList{},
&IPAddress{},
&IPAddressList{},
)
return nil
}

View File

@ -18,23 +18,25 @@ package networking
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
api "k8s.io/kubernetes/pkg/apis/core"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NetworkPolicy describes what network traffic is allowed for a set of Pods
// NetworkPolicy describes what network traffic is allowed for a set of pods
type NetworkPolicy struct {
metav1.TypeMeta
// +optional
metav1.ObjectMeta
// Specification of the desired behavior for this NetworkPolicy.
// spec represents the specification of the desired behavior for this NetworkPolicy.
// +optional
Spec NetworkPolicySpec
// Status is the current state of the NetworkPolicy.
// status represents the current state of the NetworkPolicy.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status NetworkPolicyStatus
@ -53,16 +55,16 @@ const (
// NetworkPolicySpec provides the specification of a NetworkPolicy
type NetworkPolicySpec struct {
// Selects the pods to which this NetworkPolicy object applies. The array of
// ingress rules is applied to any pods selected by this field. Multiple network
// policies can select the same set of pods. In this case, the ingress rules for
// each are combined additively. This field is NOT optional and follows standard
// label selector semantics. An empty podSelector matches all pods in this
// namespace.
// podSelector selects the pods to which this NetworkPolicy object applies.
// The array of ingress rules is applied to any pods selected by this field.
// Multiple network policies can select the same set of pods. In this case,
// the ingress rules for each are combined additively.
// This field is NOT optional and follows standard label selector semantics.
// An empty podSelector matches all pods in this namespace.
PodSelector metav1.LabelSelector
// List of ingress rules to be applied to the selected pods. Traffic is allowed to
// a pod if there are no NetworkPolicies selecting the pod
// ingress is a list of ingress rules to be applied to the selected pods.
// Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod
// (and cluster policy otherwise allows the traffic), OR if the traffic source is
// the pod's local node, OR if the traffic matches at least one ingress rule
// across all of the NetworkPolicy objects whose podSelector matches the pod. If
@ -71,8 +73,8 @@ type NetworkPolicySpec struct {
// +optional
Ingress []NetworkPolicyIngressRule
// List of egress rules to be applied to the selected pods. Outgoing traffic is
// allowed if there are no NetworkPolicies selecting the pod (and cluster policy
// egress is a list of egress rules to be applied to the selected pods. Outgoing traffic
// is allowed if there are no NetworkPolicies selecting the pod (and cluster policy
// otherwise allows the traffic), OR if the traffic matches at least one egress rule
// across all of the NetworkPolicy objects whose podSelector matches the pod. If
// this field is empty then this NetworkPolicy limits all outgoing traffic (and serves
@ -81,15 +83,15 @@ type NetworkPolicySpec struct {
// +optional
Egress []NetworkPolicyEgressRule
// List of rule types that the NetworkPolicy relates to.
// policyTypes is a list of rule types that the NetworkPolicy relates to.
// Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"].
// If this field is not specified, it will default based on the existence of Ingress or Egress rules;
// policies that contain an Egress section are assumed to affect Egress, and all policies
// (whether or not they contain an Ingress section) are assumed to affect Ingress.
// If this field is not specified, it will default based on the existence of ingress or egress rules;
// policies that contain an egress section are assumed to affect egress, and all policies
// (whether or not they contain an ingress section) are assumed to affect ingress.
// If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ].
// Likewise, if you want to write a policy that specifies that no egress is allowed,
// you must specify a policyTypes value that include "Egress" (since such a policy would not include
// an Egress section and would otherwise default to just [ "Ingress" ]).
// an egress section and would otherwise default to just [ "Ingress" ]).
// This field is beta-level in 1.8
// +optional
PolicyTypes []PolicyType
@ -98,15 +100,15 @@ type NetworkPolicySpec struct {
// NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods
// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.
type NetworkPolicyIngressRule struct {
// List of ports which should be made accessible on the pods selected for this
// rule. Each item in this list is combined using a logical OR. If this field is
// ports is a list of ports which should be made accessible on the pods selected for
// this rule. Each item in this list is combined using a logical OR. If this field is
// empty or missing, this rule matches all ports (traffic not restricted by port).
// If this field is present and contains at least one item, then this rule allows
// traffic only if the traffic matches at least one port in the list.
// +optional
Ports []NetworkPolicyPort
// List of sources which should be able to access the pods selected for this rule.
// from is a list of sources which should be able to access the pods selected for this rule.
// Items in this list are combined using a logical OR operation. If this field is
// empty or missing, this rule matches all sources (traffic not restricted by
// source). If this field is present and contains at least one item, this rule
@ -119,7 +121,7 @@ type NetworkPolicyIngressRule struct {
// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to.
// This type is beta-level in 1.8
type NetworkPolicyEgressRule struct {
// List of destination ports for outgoing traffic.
// ports is a list of destination ports for outgoing traffic.
// Each item in this list is combined using a logical OR. If this field is
// empty or missing, this rule matches all ports (traffic not restricted by port).
// If this field is present and contains at least one item, then this rule allows
@ -127,7 +129,7 @@ type NetworkPolicyEgressRule struct {
// +optional
Ports []NetworkPolicyPort
// List of destinations for outgoing traffic of pods selected for this rule.
// to is a list of destinations for outgoing traffic of pods selected for this rule.
// Items in this list are combined using a logical OR operation. If this field is
// empty or missing, this rule matches all destinations (traffic not restricted by
// destination). If this field is present and contains at least one item, this rule
@ -138,19 +140,19 @@ type NetworkPolicyEgressRule struct {
// NetworkPolicyPort describes a port to allow traffic on
type NetworkPolicyPort struct {
// The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this
// field defaults to TCP.
// protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match.
// If not specified, this field defaults to TCP.
// +optional
Protocol *api.Protocol
// The port on the given protocol. This can either be a numerical or named
// port represents the port on the given protocol. This can either be a numerical or named
// port on a pod. If this field is not provided, this matches all port names and
// numbers.
// If present, only traffic on the specified protocol AND port will be matched.
// +optional
Port *intstr.IntOrString
// If set, indicates that the range of ports from port to endPort, inclusive,
// endPort indicates that the range of ports from port to endPort if set, inclusive,
// should be allowed by the policy. This field cannot be defined if the port field
// is not defined or if the port field is defined as a named (string) port.
// The endPort must be equal or greater than port.
@ -162,37 +164,38 @@ type NetworkPolicyPort struct {
// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs
// that should not be included within this rule.
type IPBlock struct {
// CIDR is a string representing the IP Block
// cidr is a string representing the IPBlock
// Valid examples are "192.168.1.0/24" or "2001:db8::/64"
CIDR string
// Except is a slice of CIDRs that should not be included within an IP Block
// except is a list of CIDRs that should not be included within the IPBlock
// Valid examples are "192.168.1.0/24" or "2001:db8::/64"
// Except values will be rejected if they are outside the CIDR range
// Except values will be rejected if they are outside the cidr range
// +optional
Except []string
}
// NetworkPolicyPeer describes a peer to allow traffic to/from.
type NetworkPolicyPeer struct {
// This is a label selector which selects Pods. This field follows standard label
// podSelector is a label selector which selects pods. This field follows standard label
// selector semantics; if present but empty, it selects all pods.
//
// If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects
// the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
// Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.
// If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects
// the pods matching podSelector in the Namespaces selected by namespaceSelector.
// Otherwise it selects the pods matching podSelector in the policy's own namespace.
// +optional
PodSelector *metav1.LabelSelector
// Selects Namespaces using cluster-scoped labels. This field follows standard label
// selector semantics; if present but empty, it selects all namespaces.
// namespaceSelector selects namespaces using cluster-scoped labels. This field follows
// standard label selector semantics; if present but empty, it selects all namespaces.
//
// If PodSelector is also set, then the NetworkPolicyPeer as a whole selects
// the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
// Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.
// If podSelector is also set, then the NetworkPolicyPeer as a whole selects
// the pods matching podSelector in the namespaces selected by namespaceSelector.
// Otherwise it selects all pods in the namespaces selected by namespaceSelector.
// +optional
NamespaceSelector *metav1.LabelSelector
// IPBlock defines policy on a particular IPBlock. If this field is set then
// ipBlock defines policy on a particular IPBlock. If this field is set then
// neither of the other fields can be.
// +optional
IPBlock *IPBlock
@ -228,9 +231,9 @@ const (
NetworkPolicyConditionReasonFeatureNotSupported NetworkPolicyConditionReason = "FeatureNotSupported"
)
// NetworkPolicyStatus describe the current state of the NetworkPolicy.
// NetworkPolicyStatus describes the current state of the NetworkPolicy.
type NetworkPolicyStatus struct {
// Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy.
// conditions holds an array of metav1.Condition that describes the state of the NetworkPolicy.
Conditions []metav1.Condition
}
@ -239,6 +242,7 @@ type NetworkPolicyStatus struct {
// NetworkPolicyList is a list of NetworkPolicy objects.
type NetworkPolicyList struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
@ -253,17 +257,18 @@ type NetworkPolicyList struct {
// based virtual hosting etc.
type Ingress struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta
// Spec is the desired state of the Ingress.
// spec is the desired state of the Ingress.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec IngressSpec
// Status is the current state of the Ingress.
// status is the current state of the Ingress.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status IngressStatus
@ -274,18 +279,19 @@ type Ingress struct {
// IngressList is a collection of Ingress.
type IngressList struct {
metav1.TypeMeta
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta
// Items is the list of Ingress.
// items is the list of Ingress.
Items []Ingress
}
// IngressSpec describes the Ingress the user wishes to exist.
type IngressSpec struct {
// IngressClassName is the name of the IngressClass cluster resource. The
// ingressClassName is the name of the IngressClass cluster resource. The
// associated IngressClass defines which controller will implement the
// resource. This replaces the deprecated `kubernetes.io/ingress.class`
// annotation. For backwards compatibility, when that annotation is set, it
@ -298,23 +304,23 @@ type IngressSpec struct {
// +optional
IngressClassName *string
// DefaultBackend is the backend that should handle requests that don't
// defaultBackend is the backend that should handle requests that don't
// match any rule. If Rules are not specified, DefaultBackend must be specified.
// If DefaultBackend is not set, the handling of requests that do not match any
// of the rules will be up to the Ingress controller.
// +optional
DefaultBackend *IngressBackend
// TLS configuration. Currently the Ingress only supports a single TLS
// port, 443. If multiple members of this list specify different hosts, they
// will be multiplexed on the same port according to the hostname specified
// tls represents the TLS configuration. Currently the ingress only supports a
// single TLS port, 443. If multiple members of this list specify different hosts,
// they will be multiplexed on the same port according to the hostname specified
// through the SNI TLS extension, if the ingress controller fulfilling the
// ingress supports SNI.
// +listType=atomic
// +optional
TLS []IngressTLS
// A list of host rules used to configure the Ingress. If unspecified, or
// rules is a list of host rules used to configure the Ingress. If unspecified, or
// no rule matches, all traffic is sent to the default backend.
// +listType=atomic
// +optional
@ -330,9 +336,10 @@ type IngressSpec struct {
// resources without a class specified will be assigned this default class.
type IngressClass struct {
metav1.TypeMeta
metav1.ObjectMeta
// Spec is the desired state of the IngressClass.
// spec is the desired state of the IngressClass.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec IngressClassSpec
@ -340,15 +347,15 @@ type IngressClass struct {
// IngressClassSpec provides information about the class of an Ingress.
type IngressClassSpec struct {
// Controller refers to the name of the controller that should handle this
// controller refers to the name of the controller that should handle this
// class. This allows for different "flavors" that are controlled by the
// same controller. For example, you may have different Parameters for the
// same controller. For example, you may have different parameters for the
// same implementing controller. This should be specified as a
// domain-prefixed path no more than 250 characters in length, e.g.
// "acme.io/ingress-controller". This field is immutable.
Controller string
// Parameters is a link to a custom resource containing additional
// parameters is a link to a custom resource containing additional
// configuration for the controller. This is optional if the controller does
// not require extra parameters.
// +optional
@ -367,20 +374,24 @@ const (
// IngressClassParametersReference identifies an API object. This can be used
// to specify a cluster or namespace-scoped resource.
type IngressClassParametersReference struct {
// APIGroup is the group for the resource being referenced. If APIGroup is
// not specified, the specified Kind must be in the core API group. For any
// other third-party types, APIGroup is required.
// apiGroup is the group for the resource being referenced. If apiGroup is
// not specified, the specified kind must be in the core API group. For any
// other third-party types, apiGroup is required.
// +optional
APIGroup *string
// Kind is the type of resource being referenced.
// kind is the type of resource being referenced.
Kind string
// Name is the name of resource being referenced.
// name is the name of resource being referenced.
Name string
// Scope represents if this refers to a cluster or namespace scoped resource.
// scope represents if this refers to a cluster or namespace scoped resource.
// This may be set to "Cluster" (default) or "Namespace".
// +optional
Scope *string
// Namespace is the namespace of the resource being referenced. This field is
// namespace is the namespace of the resource being referenced. This field is
// required when scope is set to "Namespace" and must be unset when scope is set to
// "Cluster".
// +optional
@ -392,71 +403,73 @@ type IngressClassParametersReference struct {
// IngressClassList is a collection of IngressClasses.
type IngressClassList struct {
metav1.TypeMeta
// Standard object's metadata.
// +optional
metav1.ListMeta
// Items is the list of IngressClasses.
// items is the list of IngressClasses.
Items []IngressClass
}
// IngressTLS describes the transport layer security associated with an Ingress.
// IngressTLS describes the transport layer security associated with an ingress.
type IngressTLS struct {
// Hosts are a list of hosts included in the TLS certificate. The values in
// hosts is a list of hosts included in the TLS certificate. The values in
// this list must match the name/s used in the tlsSecret. Defaults to the
// wildcard host setting for the loadbalancer controller fulfilling this
// Ingress, if left unspecified.
// +listType=atomic
// +optional
Hosts []string
// SecretName is the name of the secret used to terminate TLS traffic on
// secretName is the name of the secret used to terminate TLS traffic on
// port 443. Field is left optional to allow TLS routing based on SNI
// hostname alone. If the SNI host in a listener conflicts with the "Host"
// header field used by an IngressRule, the SNI host is used for termination
// and value of the Host header is used for routing.
// and value of the "Host" header is used for routing.
// +optional
SecretName string
// TODO: Consider specifying different modes of termination, protocols etc.
}
// IngressStatus describe the current state of the Ingress.
// IngressStatus describes the current state of the Ingress.
type IngressStatus struct {
// LoadBalancer contains the current status of the load-balancer.
// loadBalancer contains the current status of the load-balancer.
// +optional
LoadBalancer IngressLoadBalancerStatus
}
// IngressLoadBalancerStatus represents the status of a load-balancer
type IngressLoadBalancerStatus struct {
// Ingress is a list containing ingress points for the load-balancer.
// ingress is a list containing ingress points for the load-balancer.
// +optional
Ingress []IngressLoadBalancerIngress
}
// IngressLoadBalancerIngress represents the status of a load-balancer ingress point.
type IngressLoadBalancerIngress struct {
// IP is set for load-balancer ingress points that are IP based.
// ip is set for load-balancer ingress points that are IP based.
// +optional
IP string
// Hostname is set for load-balancer ingress points that are DNS based.
// hostname is set for load-balancer ingress points that are DNS based.
// +optional
Hostname string
// Ports provides information about the ports exposed by this LoadBalancer.
// ports provides information about the ports exposed by this LoadBalancer.
// +optional
Ports []IngressPortStatus
}
// IngressPortStatus represents the error condition of an ingress port
type IngressPortStatus struct {
// Port is the port number of the ingress port.
// port is the port number of the ingress port.
Port int32
// Protocol is the protocol of the ingress port.
// protocol is the protocol of the ingress port.
Protocol api.Protocol
// Error indicates a problem on this port.
// error indicates a problem on this port.
// The format of the error must comply with the following rules:
// - Kubernetes-defined error values use CamelCase names
// - Provider-specific error values must follow label-name style (e.g.
@ -469,7 +482,7 @@ type IngressPortStatus struct {
// host match, then routed to the backend associated with the matching
// IngressRuleValue.
type IngressRule struct {
// Host is the fully qualified domain name of a network host, as defined by RFC 3986.
// host is the fully qualified domain name of a network host, as defined by RFC 3986.
// Note the following deviations from the "host" part of the
// URI as defined in RFC 3986:
// 1. IPs are not allowed. Currently an IngressRuleValue can only apply to
@ -482,17 +495,18 @@ type IngressRule struct {
// IngressRuleValue. If the host is unspecified, the Ingress routes all
// traffic based on the specified IngressRuleValue.
//
// Host can be "precise" which is a domain name without the terminating dot of
// host can be "precise" which is a domain name without the terminating dot of
// a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name
// prefixed with a single wildcard label (e.g. "*.foo.com").
// The wildcard character '*' must appear by itself as the first DNS label and
// matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*").
// Requests will be matched against the Host field in the following way:
// 1. If Host is precise, the request matches this rule if the http host header is equal to Host.
// 2. If Host is a wildcard, then the request matches this rule if the http host header
// Requests will be matched against the host field in the following way:
// 1. If host is precise, the request matches this rule if the http host header is equal to Host.
// 2. If host is a wildcard, then the request matches this rule if the http host header
// is to equal to the suffix (removing the first label) of the wildcard rule.
// +optional
Host string
// IngressRuleValue represents a rule to route requests for this
// IngressRule. If unspecified, the rule defaults to a http catch-all.
// Whether that sends just traffic matching the host to the default backend
@ -524,7 +538,7 @@ type IngressRuleValue struct {
// to match against everything after the last '/' and before the first '?'
// or '#'.
type HTTPIngressRuleValue struct {
// A collection of paths that map requests to backends.
// paths is a collection of paths that map requests to backends.
// +listType=atomic
Paths []HTTPIngressPath
// TODO: Consider adding fields for ingress-type specific global
@ -564,32 +578,32 @@ const (
// HTTPIngressPath associates a path with a backend. Incoming urls matching the
// path are forwarded to the backend.
type HTTPIngressPath struct {
// Path is matched against the path of an incoming request. Currently it can
// path is matched against the path of an incoming request. Currently it can
// contain characters disallowed from the conventional "path" part of a URL
// as defined by RFC 3986. Paths must begin with a '/' and must be present
// when using PathType with value "Exact" or "Prefix".
// +optional
Path string
// PathType determines the interpretation of the Path matching. PathType can
// pathType determines the interpretation of the path matching. PathType can
// be one of Exact, Prefix, or ImplementationSpecific. Implementations are
// required to support all path types.
// +optional
PathType *PathType
// Backend defines the referenced service endpoint to which the traffic
// backend defines the referenced service endpoint to which the traffic
// will be forwarded to.
Backend IngressBackend
}
// IngressBackend describes all endpoints for a given service and port.
type IngressBackend struct {
// Service references a Service as a Backend.
// service references a service as a backend.
// This is a mutually exclusive setting with "Resource".
// +optional
Service *IngressServiceBackend
// Resource is an ObjectRef to another Kubernetes resource in the namespace
// resource is an ObjectRef to another Kubernetes resource in the namespace
// of the Ingress object. If resource is specified, a service.Name and
// service.Port must not be specified.
// This is a mutually exclusive setting with "Service".
@ -599,24 +613,24 @@ type IngressBackend struct {
// IngressServiceBackend references a Kubernetes Service as a Backend.
type IngressServiceBackend struct {
// Name is the referenced service. The service must exist in
// name is the referenced service. The service must exist in
// the same namespace as the Ingress object.
Name string
// Port of the referenced service. A port name or port number
// port of the referenced service. A port name or port number
// is required for a IngressServiceBackend.
Port ServiceBackendPort
}
// ServiceBackendPort is the service port being referenced.
type ServiceBackendPort struct {
// Name is the name of the port on the Service.
// name is the name of the port on the Service.
// This must be an IANA_SVC_NAME (following RFC6335).
// This is a mutually exclusive setting with "Number".
// +optional
Name string
// Number is the numerical port number (e.g. 80) on the Service.
// number is the numerical port number (e.g. 80) on the Service.
// This is a mutually exclusive setting with "Name".
// +optional
Number int32
@ -637,6 +651,7 @@ type ServiceBackendPort struct {
// selector matches the Node may be used.
type ClusterCIDR struct {
metav1.TypeMeta
metav1.ObjectMeta
Spec ClusterCIDRSpec
@ -644,13 +659,13 @@ type ClusterCIDR struct {
// ClusterCIDRSpec defines the desired state of ClusterCIDR.
type ClusterCIDRSpec struct {
// NodeSelector defines which nodes the config is applicable to.
// An empty or nil NodeSelector selects all nodes.
// nodeSelector defines which nodes the config is applicable to.
// An empty or nil nodeSelector selects all nodes.
// This field is immutable.
// +optional
NodeSelector *api.NodeSelector
// PerNodeHostBits defines the number of host bits to be configured per node.
// perNodeHostBits defines the number of host bits to be configured per node.
// A subnet mask determines how much of the address is used for network bits
// and host bits. For example an IPv4 address of 192.168.0.0/24, splits the
// address into 24 bits for the network portion and 8 bits for the host portion.
@ -660,14 +675,14 @@ type ClusterCIDRSpec struct {
// +required
PerNodeHostBits int32
// IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8").
// At least one of IPv4 and IPv6 must be specified.
// ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8").
// At least one of ipv4 and ipv6 must be specified.
// This field is immutable.
// +optional
IPv4 string
// IPv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64").
// At least one of IPv4 and IPv6 must be specified.
// ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64").
// At least one of ipv4 and ipv6 must be specified.
// This field is immutable.
// +optional
IPv6 string
@ -682,6 +697,58 @@ type ClusterCIDRList struct {
// +optional
metav1.ListMeta
// Items is the list of ClusterCIDRs.
// items is the list of ClusterCIDRs.
Items []ClusterCIDR
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
// the name of the object is the IP address in canonical format, four decimal digits separated
// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
type IPAddress struct {
metav1.TypeMeta
// +optional
metav1.ObjectMeta
// +optional
Spec IPAddressSpec
}
// IPAddressSpec describe the attributes in an IP Address,
type IPAddressSpec struct {
// ParentRef references the resource that an IPAddress is attached to.
// An IPAddress must reference a parent object.
// +required
ParentRef *ParentReference
}
type ParentReference struct {
// Group is the group of the object being referenced.
Group string
// Resource is the resource of the object being referenced.
Resource string
// Namespace is the namespace of the object being referenced.
Namespace string
// Name is the name of the object being referenced.
Name string
// UID is the uid of the object being referenced.
// +optional
UID types.UID
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IPAddressList contains a list of IPAddress.
type IPAddressList struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
// Items is the list of IPAddress
Items []IPAddress
}

View File

@ -154,6 +154,87 @@ func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAddress) DeepCopyInto(out *IPAddress) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress.
func (in *IPAddress) DeepCopy() *IPAddress {
if in == nil {
return nil
}
out := new(IPAddress)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IPAddress) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAddressList) DeepCopyInto(out *IPAddressList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]IPAddress, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList.
func (in *IPAddressList) DeepCopy() *IPAddressList {
if in == nil {
return nil
}
out := new(IPAddressList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IPAddressList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) {
*out = *in
if in.ParentRef != nil {
in, out := &in.ParentRef, &out.ParentRef
*out = new(ParentReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec.
func (in *IPAddressSpec) DeepCopy() *IPAddressSpec {
if in == nil {
return nil
}
out := new(IPAddressSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IPBlock) DeepCopyInto(out *IPBlock) {
*out = *in
@ -816,6 +897,22 @@ func (in *NetworkPolicyStatus) DeepCopy() *NetworkPolicyStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ParentReference) DeepCopyInto(out *ParentReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference.
func (in *ParentReference) DeepCopy() *ParentReference {
if in == nil {
return nil
}
out := new(ParentReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceBackendPort) DeepCopyInto(out *ServiceBackendPort) {
*out = *in

View File

@ -1,11 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
# approval on api packages bubbles to api-approvers
reviewers:
- sig-apps-api-reviewers
- sig-apps-api-approvers
- sig-auth-policy-approvers
- sig-auth-policy-reviewers
labels:
- sig/auth
- sig/apps

View File

@ -1,51 +0,0 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package policy
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
PDBV1beta1Label = "pdb.kubernetes.io/deprecated-v1beta1-empty-selector-match"
)
var (
NonV1beta1MatchAllSelector = &metav1.LabelSelector{}
NonV1beta1MatchNoneSelector = &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{Key: PDBV1beta1Label, Operator: metav1.LabelSelectorOpExists}},
}
V1beta1MatchNoneSelector = &metav1.LabelSelector{}
V1beta1MatchAllSelector = &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{Key: PDBV1beta1Label, Operator: metav1.LabelSelectorOpDoesNotExist}},
}
)
func StripPDBV1beta1Label(selector *metav1.LabelSelector) {
if selector == nil {
return
}
trimmedMatchExpressions := selector.MatchExpressions[:0]
for _, exp := range selector.MatchExpressions {
if exp.Key != PDBV1beta1Label {
trimmedMatchExpressions = append(trimmedMatchExpressions, exp)
}
}
selector.MatchExpressions = trimmedMatchExpressions
}

View File

@ -1,58 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package policy
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "policy"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
// TODO this gets cleaned up when the types are fixed
scheme.AddKnownTypes(SchemeGroupVersion,
&PodDisruptionBudget{},
&PodDisruptionBudgetList{},
&PodSecurityPolicy{},
&PodSecurityPolicyList{},
&Eviction{},
)
return nil
}

View File

@ -1,529 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package policy
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
api "k8s.io/kubernetes/pkg/apis/core"
)
// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.
type PodDisruptionBudgetSpec struct {
// An eviction is allowed if at least "minAvailable" pods selected by
// "selector" will still be available after the eviction, i.e. even in the
// absence of the evicted pod. So for example you can prevent all voluntary
// evictions by specifying "100%".
// +optional
MinAvailable *intstr.IntOrString
// Label query over pods whose evictions are managed by the disruption
// budget.
// +optional
Selector *metav1.LabelSelector
// An eviction is allowed if at most "maxUnavailable" pods selected by
// "selector" are unavailable after the eviction, i.e. even in absence of
// the evicted pod. For example, one can prevent all voluntary evictions
// by specifying 0. This is a mutually exclusive setting with "minAvailable".
// +optional
MaxUnavailable *intstr.IntOrString
// UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods
// should be considered for eviction. Current implementation considers healthy pods,
// as pods that have status.conditions item with type="Ready",status="True".
//
// Valid policies are IfHealthyBudget and AlwaysAllow.
// If no policy is specified, the default behavior will be used,
// which corresponds to the IfHealthyBudget policy.
//
// IfHealthyBudget policy means that running pods (status.phase="Running"),
// but not yet healthy can be evicted only if the guarded application is not
// disrupted (status.currentHealthy is at least equal to status.desiredHealthy).
// Healthy pods will be subject to the PDB for eviction.
//
// AlwaysAllow policy means that all running pods (status.phase="Running"),
// but not yet healthy are considered disrupted and can be evicted regardless
// of whether the criteria in a PDB is met. This means perspective running
// pods of a disrupted application might not get a chance to become healthy.
// Healthy pods will be subject to the PDB for eviction.
//
// Additional policies may be added in the future.
// Clients making eviction decisions should disallow eviction of unhealthy pods
// if they encounter an unrecognized policy in this field.
//
// This field is alpha-level. The eviction API uses this field when
// the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default).
// +optional
UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType
}
// UnhealthyPodEvictionPolicyType defines the criteria for when unhealthy pods
// should be considered for eviction.
// +enum
type UnhealthyPodEvictionPolicyType string
const (
// IfHealthyBudget policy means that running pods (status.phase="Running"),
// but not yet healthy can be evicted only if the guarded application is not
// disrupted (status.currentHealthy is at least equal to status.desiredHealthy).
// Healthy pods will be subject to the PDB for eviction.
IfHealthyBudget UnhealthyPodEvictionPolicyType = "IfHealthyBudget"
// AlwaysAllow policy means that all running pods (status.phase="Running"),
// but not yet healthy are considered disrupted and can be evicted regardless
// of whether the criteria in a PDB is met. This means perspective running
// pods of a disrupted application might not get a chance to become healthy.
// Healthy pods will be subject to the PDB for eviction.
AlwaysAllow UnhealthyPodEvictionPolicyType = "AlwaysAllow"
)
// PodDisruptionBudgetStatus represents information about the status of a
// PodDisruptionBudget. Status may trail the actual state of a system.
type PodDisruptionBudgetStatus struct {
// Most recent generation observed when updating this PDB status. DisruptionsAllowed and other
// status information is valid only if observedGeneration equals to PDB's object generation.
// +optional
ObservedGeneration int64
// DisruptedPods contains information about pods whose eviction was
// processed by the API server eviction subresource handler but has not
// yet been observed by the PodDisruptionBudget controller.
// A pod will be in this map from the time when the API server processed the
// eviction request to the time when the pod is seen by PDB controller
// as having been marked for deletion (or after a timeout). The key in the map is the name of the pod
// and the value is the time when the API server processed the eviction request. If
// the deletion didn't occur and a pod is still there it will be removed from
// the list automatically by PodDisruptionBudget controller after some time.
// If everything goes smooth this map should be empty for the most of the time.
// Large number of entries in the map may indicate problems with pod deletions.
// +optional
DisruptedPods map[string]metav1.Time
// Number of pod disruptions that are currently allowed.
DisruptionsAllowed int32
// current number of healthy pods
CurrentHealthy int32
// minimum desired number of healthy pods
DesiredHealthy int32
// total number of pods counted by this disruption budget
ExpectedPods int32
// Conditions contain conditions for PDB
// +optional
Conditions []metav1.Condition
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
type PodDisruptionBudget struct {
metav1.TypeMeta
// +optional
metav1.ObjectMeta
// Specification of the desired behavior of the PodDisruptionBudget.
// +optional
Spec PodDisruptionBudgetSpec
// Most recently observed status of the PodDisruptionBudget.
// +optional
Status PodDisruptionBudgetStatus
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodDisruptionBudgetList is a collection of PodDisruptionBudgets.
type PodDisruptionBudgetList struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
Items []PodDisruptionBudget
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Eviction evicts a pod from its node subject to certain policies and safety constraints.
// This is a subresource of Pod. A request to cause such an eviction is
// created by POSTing to .../pods/<pod name>/eviction.
type Eviction struct {
metav1.TypeMeta
// ObjectMeta describes the pod that is being evicted.
// +optional
metav1.ObjectMeta
// DeleteOptions may be provided
// +optional
DeleteOptions *metav1.DeleteOptions
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodSecurityPolicy governs the ability to make requests that affect the SecurityContext
// that will be applied to a pod and container.
type PodSecurityPolicy struct {
metav1.TypeMeta
// +optional
metav1.ObjectMeta
// Spec defines the policy enforced.
// +optional
Spec PodSecurityPolicySpec
}
// PodSecurityPolicySpec defines the policy enforced.
type PodSecurityPolicySpec struct {
// Privileged determines if a pod can request to be run as privileged.
// +optional
Privileged bool
// DefaultAddCapabilities is the default set of capabilities that will be added to the container
// unless the pod spec specifically drops the capability. You may not list a capability in both
// DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly
// allowed, and need not be included in the AllowedCapabilities list.
// +optional
DefaultAddCapabilities []api.Capability
// RequiredDropCapabilities are the capabilities that will be dropped from the container. These
// are required to be dropped and cannot be added.
// +optional
RequiredDropCapabilities []api.Capability
// AllowedCapabilities is a list of capabilities that can be requested to add to the container.
// Capabilities in this field may be added at the pod author's discretion.
// You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.
// To allow all capabilities you may use '*'.
// +optional
AllowedCapabilities []api.Capability
// Volumes is an allowlist of volume plugins. Empty indicates that
// no volumes may be used. To allow all volumes you may use '*'.
// +optional
Volumes []FSType
// HostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
// +optional
HostNetwork bool
// HostPorts determines which host port ranges are allowed to be exposed.
// +optional
HostPorts []HostPortRange
// HostPID determines if the policy allows the use of HostPID in the pod spec.
// +optional
HostPID bool
// HostIPC determines if the policy allows the use of HostIPC in the pod spec.
// +optional
HostIPC bool
// SELinux is the strategy that will dictate the allowable labels that may be set.
SELinux SELinuxStrategyOptions
// RunAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
RunAsUser RunAsUserStrategyOptions
// RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set.
// If this field is omitted, the pod's RunAsGroup can take any value. This field requires the
// RunAsGroup feature gate to be enabled.
RunAsGroup *RunAsGroupStrategyOptions
// SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
SupplementalGroups SupplementalGroupsStrategyOptions
// FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.
FSGroup FSGroupStrategyOptions
// ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file
// system. If the container specifically requests to run with a non-read only root file system
// the PSP should deny the pod.
// If set to false the container may run with a read only root file system if it wishes but it
// will not be forced to.
// +optional
ReadOnlyRootFilesystem bool
// DefaultAllowPrivilegeEscalation controls the default setting for whether a
// process can gain more privileges than its parent process.
// +optional
DefaultAllowPrivilegeEscalation *bool
// AllowPrivilegeEscalation determines if a pod can request to allow
// privilege escalation. If unspecified, defaults to true.
// +optional
AllowPrivilegeEscalation bool
// AllowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used.
// +optional
AllowedHostPaths []AllowedHostPath
// AllowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all
// Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes
// is allowed in the "Volumes" field.
// +optional
AllowedFlexVolumes []AllowedFlexVolume
// AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec.
// An empty value indicates that any CSI driver can be used for inline ephemeral volumes.
// +optional
AllowedCSIDrivers []AllowedCSIDriver
// AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none.
// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
// as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed.
// Kubelet has to allowlist all unsafe sysctls explicitly to avoid rejection.
//
// Examples:
// e.g. "foo/*" allows "foo/bar", "foo/baz", etc.
// e.g. "foo.*" allows "foo.bar", "foo.baz", etc.
// +optional
AllowedUnsafeSysctls []string
// ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none.
// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
// as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.
//
// Examples:
// e.g. "foo/*" forbids "foo/bar", "foo/baz", etc.
// e.g. "foo.*" forbids "foo.bar", "foo.baz", etc.
// +optional
ForbiddenSysctls []string
// AllowedProcMountTypes is an allowlist of ProcMountTypes.
// Empty or nil indicates that only the DefaultProcMountType may be used.
// +optional
AllowedProcMountTypes []api.ProcMountType
// runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod.
// If this field is omitted, the pod's runtimeClassName field is unrestricted.
// Enforcement of this field depends on the RuntimeClass feature gate being enabled.
// +optional
RuntimeClass *RuntimeClassStrategyOptions
}
// AllowedHostPath defines the host volume conditions that will be enabled by a policy
// for pods to use. It requires the path prefix to be defined.
type AllowedHostPath struct {
// PathPrefix is the path prefix that the host volume must match.
// PathPrefix does not support `*`.
// Trailing slashes are trimmed when validating the path prefix with a host path.
//
// Examples:
// `/foo` would allow `/foo`, `/foo/` and `/foo/bar`
// `/foo` would not allow `/food` or `/etc/foo`
PathPrefix string
// when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.
ReadOnly bool
}
// HostPortRange defines a range of host ports that will be enabled by a policy
// for pods to use. It requires both the start and end to be defined.
type HostPortRange struct {
// Min is the start of the range, inclusive.
Min int32
// Max is the end of the range, inclusive.
Max int32
}
// AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities
// field and means that any capabilities are allowed to be requested.
var AllowAllCapabilities api.Capability = "*"
// FSType gives strong typing to different file systems that are used by volumes.
type FSType string
// Exported FSTypes.
const (
AzureFile FSType = "azureFile"
Flocker FSType = "flocker"
FlexVolume FSType = "flexVolume"
HostPath FSType = "hostPath"
EmptyDir FSType = "emptyDir"
GCEPersistentDisk FSType = "gcePersistentDisk"
AWSElasticBlockStore FSType = "awsElasticBlockStore"
GitRepo FSType = "gitRepo"
Secret FSType = "secret"
NFS FSType = "nfs"
ISCSI FSType = "iscsi"
Glusterfs FSType = "glusterfs"
PersistentVolumeClaim FSType = "persistentVolumeClaim"
RBD FSType = "rbd"
Cinder FSType = "cinder"
CephFS FSType = "cephFS"
DownwardAPI FSType = "downwardAPI"
FC FSType = "fc"
ConfigMap FSType = "configMap"
VsphereVolume FSType = "vsphereVolume"
Quobyte FSType = "quobyte"
AzureDisk FSType = "azureDisk"
PhotonPersistentDisk FSType = "photonPersistentDisk"
StorageOS FSType = "storageos"
Projected FSType = "projected"
PortworxVolume FSType = "portworxVolume"
ScaleIO FSType = "scaleIO"
CSI FSType = "csi"
Ephemeral FSType = "ephemeral"
All FSType = "*"
)
// AllowedFlexVolume represents a single Flexvolume that is allowed to be used.
type AllowedFlexVolume struct {
// Driver is the name of the Flexvolume driver.
Driver string
}
// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.
type AllowedCSIDriver struct {
// Name is the registered name of the CSI driver
Name string
}
// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.
type SELinuxStrategyOptions struct {
// Rule is the strategy that will dictate the allowable labels that may be set.
Rule SELinuxStrategy
// SELinuxOptions required to run as; required for MustRunAs
// More info: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#selinux
// +optional
SELinuxOptions *api.SELinuxOptions
}
// SELinuxStrategy denotes strategy types for generating SELinux options for a
// Security.
type SELinuxStrategy string
const (
// SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied.
SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs"
// SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels.
SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny"
)
// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.
type RunAsUserStrategyOptions struct {
// Rule is the strategy that will dictate the allowable RunAsUser values that may be set.
Rule RunAsUserStrategy
// Ranges are the allowed ranges of uids that may be used. If you would like to force a single uid
// then supply a single range with the same start and end. Required for MustRunAs.
// +optional
Ranges []IDRange
}
// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.
type RunAsGroupStrategyOptions struct {
// Rule is the strategy that will dictate the allowable RunAsGroup values that may be set.
Rule RunAsGroupStrategy
// Ranges are the allowed ranges of gids that may be used. If you would like to force a single gid
// then supply a single range with the same start and end. Required for MustRunAs.
// +optional
Ranges []IDRange
}
// IDRange provides a min/max of an allowed range of IDs.
type IDRange struct {
// Min is the start of the range, inclusive.
Min int64
// Max is the end of the range, inclusive.
Max int64
}
// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a
// SecurityContext.
type RunAsUserStrategy string
const (
// RunAsUserStrategyMustRunAs means that container must run as a particular uid.
RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs"
// RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid
RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot"
// RunAsUserStrategyRunAsAny means that container may make requests for any uid.
RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny"
)
// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a
// SecurityContext.
type RunAsGroupStrategy string
const (
// RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid.
// However, when RunAsGroup are specified, they have to fall in the defined range.
RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs"
// RunAsGroupStrategyMustRunAs means that container must run as a particular gid.
RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs"
// RunAsGroupStrategyRunAsAny means that container may make requests for any gid.
RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny"
)
// FSGroupStrategyOptions defines the strategy type and options used to create the strategy.
type FSGroupStrategyOptions struct {
// Rule is the strategy that will dictate what FSGroup is used in the SecurityContext.
// +optional
Rule FSGroupStrategyType
// Ranges are the allowed ranges of fs groups. If you would like to force a single
// fs group then supply a single range with the same start and end. Required for MustRunAs.
// +optional
Ranges []IDRange
}
// FSGroupStrategyType denotes strategy types for generating FSGroup values for a
// SecurityContext
type FSGroupStrategyType string
const (
// FSGroupStrategyMayRunAs means that container does not need to have FSGroup of X applied.
// However, when FSGroups are specified, they have to fall in the defined range.
FSGroupStrategyMayRunAs FSGroupStrategyType = "MayRunAs"
// FSGroupStrategyMustRunAs means that container must have FSGroup of X applied.
FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs"
// FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels.
FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny"
)
// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.
type SupplementalGroupsStrategyOptions struct {
// Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.
// +optional
Rule SupplementalGroupsStrategyType
// Ranges are the allowed ranges of supplemental groups. If you would like to force a single
// supplemental group then supply a single range with the same start and end. Required for MustRunAs.
// +optional
Ranges []IDRange
}
// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental
// groups for a SecurityContext.
type SupplementalGroupsStrategyType string
const (
// SupplementalGroupsStrategyMayRunAs means that container does not need to run with a particular gid.
// However, when gids are specified, they have to fall in the defined range.
SupplementalGroupsStrategyMayRunAs SupplementalGroupsStrategyType = "MayRunAs"
// SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid.
SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs"
// SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid.
SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny"
)
// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses
// for a pod.
type RuntimeClassStrategyOptions struct {
// allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod.
// A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the
// list. An empty list requires the RuntimeClassName field to be unset.
AllowedRuntimeClassNames []string
// defaultRuntimeClassName is the default RuntimeClassName to set on the pod.
// The default MUST be allowed by the allowedRuntimeClassNames list.
// A value of nil does not mutate the Pod.
// +optional
DefaultRuntimeClassName *string
}
// AllowAllRuntimeClassNames can be used as a value for the
// RuntimeClassStrategyOptions.allowedRuntimeClassNames field and means that any runtimeClassName is
// allowed.
const AllowAllRuntimeClassNames = "*"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodSecurityPolicyList is a list of PodSecurityPolicy objects.
type PodSecurityPolicyList struct {
metav1.TypeMeta
// +optional
metav1.ListMeta
Items []PodSecurityPolicy
}

View File

@ -1,548 +0,0 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package policy
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
core "k8s.io/kubernetes/pkg/apis/core"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver.
func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver {
if in == nil {
return nil
}
out := new(AllowedCSIDriver)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume.
func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume {
if in == nil {
return nil
}
out := new(AllowedFlexVolume)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath.
func (in *AllowedHostPath) DeepCopy() *AllowedHostPath {
if in == nil {
return nil
}
out := new(AllowedHostPath)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Eviction) DeepCopyInto(out *Eviction) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.DeleteOptions != nil {
in, out := &in.DeleteOptions, &out.DeleteOptions
*out = new(v1.DeleteOptions)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Eviction.
func (in *Eviction) DeepCopy() *Eviction {
if in == nil {
return nil
}
out := new(Eviction)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Eviction) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) {
*out = *in
if in.Ranges != nil {
in, out := &in.Ranges, &out.Ranges
*out = make([]IDRange, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions.
func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions {
if in == nil {
return nil
}
out := new(FSGroupStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostPortRange) DeepCopyInto(out *HostPortRange) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange.
func (in *HostPortRange) DeepCopy() *HostPortRange {
if in == nil {
return nil
}
out := new(HostPortRange)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IDRange) DeepCopyInto(out *IDRange) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange.
func (in *IDRange) DeepCopy() *IDRange {
if in == nil {
return nil
}
out := new(IDRange)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodDisruptionBudget) DeepCopyInto(out *PodDisruptionBudget) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudget.
func (in *PodDisruptionBudget) DeepCopy() *PodDisruptionBudget {
if in == nil {
return nil
}
out := new(PodDisruptionBudget)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodDisruptionBudget) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodDisruptionBudgetList) DeepCopyInto(out *PodDisruptionBudgetList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PodDisruptionBudget, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetList.
func (in *PodDisruptionBudgetList) DeepCopy() *PodDisruptionBudgetList {
if in == nil {
return nil
}
out := new(PodDisruptionBudgetList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodDisruptionBudgetList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodDisruptionBudgetSpec) DeepCopyInto(out *PodDisruptionBudgetSpec) {
*out = *in
if in.MinAvailable != nil {
in, out := &in.MinAvailable, &out.MinAvailable
*out = new(intstr.IntOrString)
**out = **in
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
*out = new(intstr.IntOrString)
**out = **in
}
if in.UnhealthyPodEvictionPolicy != nil {
in, out := &in.UnhealthyPodEvictionPolicy, &out.UnhealthyPodEvictionPolicy
*out = new(UnhealthyPodEvictionPolicyType)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetSpec.
func (in *PodDisruptionBudgetSpec) DeepCopy() *PodDisruptionBudgetSpec {
if in == nil {
return nil
}
out := new(PodDisruptionBudgetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodDisruptionBudgetStatus) DeepCopyInto(out *PodDisruptionBudgetStatus) {
*out = *in
if in.DisruptedPods != nil {
in, out := &in.DisruptedPods, &out.DisruptedPods
*out = make(map[string]v1.Time, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetStatus.
func (in *PodDisruptionBudgetStatus) DeepCopy() *PodDisruptionBudgetStatus {
if in == nil {
return nil
}
out := new(PodDisruptionBudgetStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy.
func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy {
if in == nil {
return nil
}
out := new(PodSecurityPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PodSecurityPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList.
func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList {
if in == nil {
return nil
}
out := new(PodSecurityPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) {
*out = *in
if in.DefaultAddCapabilities != nil {
in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities
*out = make([]core.Capability, len(*in))
copy(*out, *in)
}
if in.RequiredDropCapabilities != nil {
in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities
*out = make([]core.Capability, len(*in))
copy(*out, *in)
}
if in.AllowedCapabilities != nil {
in, out := &in.AllowedCapabilities, &out.AllowedCapabilities
*out = make([]core.Capability, len(*in))
copy(*out, *in)
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]FSType, len(*in))
copy(*out, *in)
}
if in.HostPorts != nil {
in, out := &in.HostPorts, &out.HostPorts
*out = make([]HostPortRange, len(*in))
copy(*out, *in)
}
in.SELinux.DeepCopyInto(&out.SELinux)
in.RunAsUser.DeepCopyInto(&out.RunAsUser)
if in.RunAsGroup != nil {
in, out := &in.RunAsGroup, &out.RunAsGroup
*out = new(RunAsGroupStrategyOptions)
(*in).DeepCopyInto(*out)
}
in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups)
in.FSGroup.DeepCopyInto(&out.FSGroup)
if in.DefaultAllowPrivilegeEscalation != nil {
in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation
*out = new(bool)
**out = **in
}
if in.AllowedHostPaths != nil {
in, out := &in.AllowedHostPaths, &out.AllowedHostPaths
*out = make([]AllowedHostPath, len(*in))
copy(*out, *in)
}
if in.AllowedFlexVolumes != nil {
in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes
*out = make([]AllowedFlexVolume, len(*in))
copy(*out, *in)
}
if in.AllowedCSIDrivers != nil {
in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers
*out = make([]AllowedCSIDriver, len(*in))
copy(*out, *in)
}
if in.AllowedUnsafeSysctls != nil {
in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ForbiddenSysctls != nil {
in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.AllowedProcMountTypes != nil {
in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes
*out = make([]core.ProcMountType, len(*in))
copy(*out, *in)
}
if in.RuntimeClass != nil {
in, out := &in.RuntimeClass, &out.RuntimeClass
*out = new(RuntimeClassStrategyOptions)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec.
func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec {
if in == nil {
return nil
}
out := new(PodSecurityPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) {
*out = *in
if in.Ranges != nil {
in, out := &in.Ranges, &out.Ranges
*out = make([]IDRange, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions.
func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions {
if in == nil {
return nil
}
out := new(RunAsGroupStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) {
*out = *in
if in.Ranges != nil {
in, out := &in.Ranges, &out.Ranges
*out = make([]IDRange, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions.
func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions {
if in == nil {
return nil
}
out := new(RunAsUserStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) {
*out = *in
if in.AllowedRuntimeClassNames != nil {
in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.DefaultRuntimeClassName != nil {
in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions.
func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions {
if in == nil {
return nil
}
out := new(RuntimeClassStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) {
*out = *in
if in.SELinuxOptions != nil {
in, out := &in.SELinuxOptions, &out.SELinuxOptions
*out = new(core.SELinuxOptions)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions.
func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions {
if in == nil {
return nil
}
out := new(SELinuxStrategyOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) {
*out = *in
if in.Ranges != nil {
in, out := &in.Ranges, &out.Ranges
*out = make([]IDRange, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions.
func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions {
if in == nil {
return nil
}
out := new(SupplementalGroupsStrategyOptions)
in.DeepCopyInto(out)
return out
}

View File

@ -64,17 +64,3 @@ func PodCompleted(event watch.Event) (bool, error) {
}
return false, nil
}
// ServiceAccountHasSecrets returns true if the service account has at least one secret,
// false if it does not, or an error.
func ServiceAccountHasSecrets(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, errors.NewNotFound(schema.GroupResource{Resource: "serviceaccounts"}, "")
}
switch t := event.Object.(type) {
case *v1.ServiceAccount:
return len(t.Secrets) > 0, nil
}
return false, nil
}

View File

@ -16,6 +16,10 @@ limitations under the License.
package ports
import (
cpoptions "k8s.io/cloud-provider/options"
)
// In this file, we can see all default port of cluster.
// It's also an important documentation for us. So don't remove them easily.
const (
@ -43,4 +47,8 @@ const (
// CloudControllerManagerPort is the default port for the cloud controller manager server.
// This value may be overridden by a flag at startup.
CloudControllerManagerPort = 10258
// CloudControllerManagerWebhookPort is the default port for the cloud
// controller manager webhook server. May be overridden by a flag at
// startup.
CloudControllerManagerWebhookPort = cpoptions.CloudControllerManagerWebhookPort
)

View File

@ -143,9 +143,8 @@ type PodControllerRefManager struct {
// If CanAdopt() returns a non-nil error, all adoptions will fail.
//
// NOTE: Once CanAdopt() is called, it will not be called again by the same
//
// PodControllerRefManager instance. Create a new instance if it makes
// sense to check CanAdopt() again (e.g. in a different sync pass).
// PodControllerRefManager instance. Create a new instance if it makes
// sense to check CanAdopt() again (e.g. in a different sync pass).
func NewPodControllerRefManager(
podControl PodControlInterface,
controller metav1.Object,
@ -284,9 +283,8 @@ type ReplicaSetControllerRefManager struct {
// If CanAdopt() returns a non-nil error, all adoptions will fail.
//
// NOTE: Once CanAdopt() is called, it will not be called again by the same
//
// ReplicaSetControllerRefManager instance. Create a new instance if it
// makes sense to check CanAdopt() again (e.g. in a different sync pass).
// ReplicaSetControllerRefManager instance. Create a new instance if it
// makes sense to check CanAdopt() again (e.g. in a different sync pass).
func NewReplicaSetControllerRefManager(
rsControl RSControlInterface,
controller metav1.Object,
@ -423,9 +421,8 @@ type ControllerRevisionControllerRefManager struct {
// If canAdopt() returns a non-nil error, all adoptions will fail.
//
// NOTE: Once canAdopt() is called, it will not be called again by the same
//
// ControllerRevisionControllerRefManager instance. Create a new instance if it
// makes sense to check canAdopt() again (e.g. in a different sync pass).
// ControllerRevisionControllerRefManager instance. Create a new instance if it
// makes sense to check canAdopt() again (e.g. in a different sync pass).
func NewControllerRevisionControllerRefManager(
crControl ControllerRevisionControlInterface,
controller metav1.Object,

View File

@ -1039,12 +1039,12 @@ func AddOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName
var oldNode *v1.Node
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
option := metav1.GetOptions{}
if firstTry {
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"})
option.ResourceVersion = "0"
firstTry = false
} else {
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
}
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, option)
if err != nil {
return err
}
@ -1096,12 +1096,12 @@ func RemoveTaintOffNode(ctx context.Context, c clientset.Interface, nodeName str
var oldNode *v1.Node
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
option := metav1.GetOptions{}
if firstTry {
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"})
option.ResourceVersion = "0"
firstTry = false
} else {
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
}
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, option)
if err != nil {
return err
}
@ -1178,12 +1178,12 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la
var node *v1.Node
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
option := metav1.GetOptions{}
if firstTry {
node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
option.ResourceVersion = "0"
firstTry = false
} else {
node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
}
node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, option)
if err != nil {
return err
}

View File

@ -189,7 +189,7 @@ func MaxRevision(allRSs []*apps.ReplicaSet) int64 {
for _, rs := range allRSs {
if v, err := Revision(rs); err != nil {
// Skip the replica sets when it failed to parse their revision information
klog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
klog.V(4).Info("Couldn't parse revision for replica set, deployment controller will skip it when reconciling revisions", "replicaSet", klog.KObj(rs), "err", err)
} else if v > max {
max = v
}
@ -203,7 +203,7 @@ func LastRevision(allRSs []*apps.ReplicaSet) int64 {
for _, rs := range allRSs {
if v, err := Revision(rs); err != nil {
// Skip the replica sets when it failed to parse their revision information
klog.V(4).Infof("Error: %v. Couldn't parse revision for replica set %#v, deployment controller will skip it when reconciling revisions.", err, rs)
klog.V(4).Info("Couldn't parse revision for replica set, deployment controller will skip it when reconciling revisions", "replicaSet", klog.KObj(rs), "err", err)
} else if v >= max {
secMax = max
max = v
@ -229,7 +229,8 @@ func Revision(obj runtime.Object) (int64, error) {
// SetNewReplicaSetAnnotations sets new replica set's annotations appropriately by updating its revision and
// copying required deployment annotations to it; it returns true if replica set's annotation is changed.
func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.ReplicaSet, newRevision string, exists bool, revHistoryLimitInChars int) bool {
func SetNewReplicaSetAnnotations(ctx context.Context, deployment *apps.Deployment, newRS *apps.ReplicaSet, newRevision string, exists bool, revHistoryLimitInChars int) bool {
logger := klog.FromContext(ctx)
// First, copy deployment's annotations (except for apply and revision annotations)
annotationChanged := copyDeploymentAnnotationsToReplicaSet(deployment, newRS)
// Then, update replica set's revision annotation
@ -244,7 +245,7 @@ func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.Replic
oldRevisionInt, err := strconv.ParseInt(oldRevision, 10, 64)
if err != nil {
if oldRevision != "" {
klog.Warningf("Updating replica set revision OldRevision not int %s", err)
logger.Info("Updating replica set revision OldRevision not int", "err", err)
return false
}
//If the RS annotation is empty then initialise it to 0
@ -252,13 +253,13 @@ func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.Replic
}
newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64)
if err != nil {
klog.Warningf("Updating replica set revision NewRevision not int %s", err)
logger.Info("Updating replica set revision NewRevision not int", "err", err)
return false
}
if oldRevisionInt < newRevisionInt {
newRS.Annotations[RevisionAnnotation] = newRevision
annotationChanged = true
klog.V(4).Infof("Updating replica set %q revision to %s", newRS.Name, newRevision)
logger.V(4).Info("Updating replica set revision", "replicaSet", klog.KObj(newRS), "newRevision", newRevision)
}
// If a revision annotation already existed and this replica set was updated with a new revision
// then that means we are rolling back to this replica set. We need to preserve the old revisions
@ -280,7 +281,7 @@ func SetNewReplicaSetAnnotations(deployment *apps.Deployment, newRS *apps.Replic
oldRevisions = append(oldRevisions[start:], oldRevision)
newRS.Annotations[RevisionHistoryAnnotation] = strings.Join(oldRevisions, ",")
} else {
klog.Warningf("Not appending revision due to length limit of %v reached", revHistoryLimitInChars)
logger.Info("Not appending revision due to revision history length limit reached", "revisionHistoryLimit", revHistoryLimitInChars)
}
}
}
@ -303,7 +304,7 @@ var annotationsToSkip = map[string]bool{
// skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key
// TODO: How to decide which annotations should / should not be copied?
//
// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615
// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615
func skipCopyAnnotation(key string) bool {
return annotationsToSkip[key]
}
@ -376,22 +377,22 @@ func FindActiveOrLatest(newRS *apps.ReplicaSet, oldRSs []*apps.ReplicaSet) *apps
}
// GetDesiredReplicasAnnotation returns the number of desired replicas
func GetDesiredReplicasAnnotation(rs *apps.ReplicaSet) (int32, bool) {
return getIntFromAnnotation(rs, DesiredReplicasAnnotation)
func GetDesiredReplicasAnnotation(logger klog.Logger, rs *apps.ReplicaSet) (int32, bool) {
return getIntFromAnnotation(logger, rs, DesiredReplicasAnnotation)
}
func getMaxReplicasAnnotation(rs *apps.ReplicaSet) (int32, bool) {
return getIntFromAnnotation(rs, MaxReplicasAnnotation)
func getMaxReplicasAnnotation(logger klog.Logger, rs *apps.ReplicaSet) (int32, bool) {
return getIntFromAnnotation(logger, rs, MaxReplicasAnnotation)
}
func getIntFromAnnotation(rs *apps.ReplicaSet, annotationKey string) (int32, bool) {
func getIntFromAnnotation(logger klog.Logger, rs *apps.ReplicaSet, annotationKey string) (int32, bool) {
annotationValue, ok := rs.Annotations[annotationKey]
if !ok {
return int32(0), false
}
intValue, err := strconv.Atoi(annotationValue)
if err != nil {
klog.V(2).Infof("Cannot convert the value %q with annotation key %q for the replica set %q", annotationValue, annotationKey, rs.Name)
logger.V(2).Info("Could not convert the value with annotation key for the replica set", "annotationValue", annotationValue, "annotationKey", annotationKey, "replicaSet", klog.KObj(rs))
return int32(0), false
}
return int32(intValue), true
@ -466,12 +467,12 @@ func MaxSurge(deployment apps.Deployment) int32 {
// GetProportion will estimate the proportion for the provided replica set using 1. the current size
// of the parent deployment, 2. the replica count that needs be added on the replica sets of the
// deployment, and 3. the total replicas added in the replica sets of the deployment so far.
func GetProportion(rs *apps.ReplicaSet, d apps.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 {
func GetProportion(logger klog.Logger, rs *apps.ReplicaSet, d apps.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 {
if rs == nil || *(rs.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded {
return int32(0)
}
rsFraction := getReplicaSetFraction(*rs, d)
rsFraction := getReplicaSetFraction(logger, *rs, d)
allowed := deploymentReplicasToAdd - deploymentReplicasAdded
if deploymentReplicasToAdd > 0 {
@ -488,14 +489,14 @@ func GetProportion(rs *apps.ReplicaSet, d apps.Deployment, deploymentReplicasToA
// getReplicaSetFraction estimates the fraction of replicas a replica set can have in
// 1. a scaling event during a rollout or 2. when scaling a paused deployment.
func getReplicaSetFraction(rs apps.ReplicaSet, d apps.Deployment) int32 {
func getReplicaSetFraction(logger klog.Logger, rs apps.ReplicaSet, d apps.Deployment) int32 {
// If we are scaling down to zero then the fraction of this replica set is its whole size (negative)
if *(d.Spec.Replicas) == int32(0) {
return -*(rs.Spec.Replicas)
}
deploymentReplicas := *(d.Spec.Replicas) + MaxSurge(d)
annotatedReplicas, ok := getMaxReplicasAnnotation(&rs)
annotatedReplicas, ok := getMaxReplicasAnnotation(logger, &rs)
if !ok {
// If we cannot find the annotation then fallback to the current deployment size. Note that this
// will not be an accurate proportion estimation in case other replica sets have different values
@ -734,7 +735,7 @@ var nowFn = func() time.Time { return time.Now() }
// DeploymentTimedOut considers a deployment to have timed out once its condition that reports progress
// is older than progressDeadlineSeconds or a Progressing condition with a TimedOutReason reason already
// exists.
func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
func DeploymentTimedOut(ctx context.Context, deployment *apps.Deployment, newStatus *apps.DeploymentStatus) bool {
if !HasProgressDeadline(deployment) {
return false
}
@ -763,7 +764,7 @@ func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentS
if condition.Reason == TimedOutReason {
return true
}
logger := klog.FromContext(ctx)
// Look at the difference in seconds between now and the last time we reported any
// progress or tried to create a replica set, or resumed a paused deployment and
// compare against progressDeadlineSeconds.
@ -772,7 +773,7 @@ func DeploymentTimedOut(deployment *apps.Deployment, newStatus *apps.DeploymentS
delta := time.Duration(*deployment.Spec.ProgressDeadlineSeconds) * time.Second
timedOut := from.Add(delta).Before(now)
klog.V(4).Infof("Deployment %q timed out (%t) [last progress check: %v - now: %v]", deployment.Name, timedOut, from, now)
logger.V(4).Info("Deployment timed out from last progress check", "deployment", klog.KObj(deployment), "timeout", timedOut, "from", from, "now", now)
return timedOut
}

View File

@ -52,6 +52,7 @@ const (
// owner: @nabokihms
// alpha: v1.26
// beta: v1.27
//
// Enables API to get self subject attributes after authentication.
APISelfSubjectReview featuregate.Feature = "APISelfSubjectReview"
@ -60,6 +61,18 @@ const (
// beta: v1.4
AppArmor featuregate.Feature = "AppArmor"
// owner: @danwinship
// alpha: v1.27
//
// Enables dual-stack --node-ip in kubelet with external cloud providers
CloudDualStackNodeIPs featuregate.Feature = "CloudDualStackNodeIPs"
// owner: @ahmedtd
// alpha: v1.26
//
// Enable ClusterTrustBundle objects and Kubelet integration.
ClusterTrustBundle featuregate.Feature = "ClusterTrustBundle"
// owner: @szuecs
// alpha: v1.12
//
@ -108,37 +121,6 @@ const (
// Allow the usage of options to fine-tune the cpumanager policies.
CPUManagerPolicyOptions featuregate.Feature = "CPUManagerPolicyOptions"
// owner: @pohly
// alpha: v1.14
// beta: v1.16
// GA: v1.25
//
// Enables CSI Inline volumes support for pods
CSIInlineVolume featuregate.Feature = "CSIInlineVolume"
// owner: @davidz627
// alpha: v1.14
// beta: v1.17
//
// Enables the in-tree storage to CSI Plugin migration feature.
CSIMigration featuregate.Feature = "CSIMigration"
// owner: @leakingtapan
// alpha: v1.14
// beta: v1.17
// GA: v1.25
//
// Enables the AWS EBS in-tree driver to AWS EBS CSI Driver migration feature.
CSIMigrationAWS featuregate.Feature = "CSIMigrationAWS"
// owner: @andyzhangx
// alpha: v1.15
// beta: v1.19
// GA: v1.24
//
// Enables the Azure Disk in-tree driver to Azure Disk Driver migration feature.
CSIMigrationAzureDisk featuregate.Feature = "CSIMigrationAzureDisk"
// owner: @andyzhangx
// alpha: v1.15
// beta: v1.21
@ -176,6 +158,7 @@ const (
// owner: @humblec, @zhucan
// kep: https://kep.k8s.io/3171
// alpha: v1.25
// beta: v1.27
//
// Enables SecretRef field in CSI NodeExpandVolume request.
CSINodeExpandSecret featuregate.Feature = "CSINodeExpandSecret"
@ -194,6 +177,12 @@ const (
// Enables kubelet to detect CSI volume condition and send the event of the abnormal volume to the corresponding pod that is using it.
CSIVolumeHealth featuregate.Feature = "CSIVolumeHealth"
// owner: @nckturner
// kep: http://kep.k8s.io/2699
// alpha: v1.27
// Enable webhooks in cloud controller manager
CloudControllerManagerWebhook featuregate.Feature = "CloudControllerManagerWebhook"
// owner: @adrianreber
// kep: https://kep.k8s.io/2008
// alpha: v1.25
@ -207,29 +196,15 @@ const (
// Normalize HttpGet URL and Header passing for lifecycle handlers with probers.
ConsistentHTTPGetHandlers featuregate.Feature = "ConsistentHTTPGetHandlers"
// owner: @jiahuif
// alpha: v1.21
// beta: v1.22
// GA: v1.24
//
// Enables Leader Migration for kube-controller-manager and cloud-controller-manager
ControllerManagerLeaderMigration featuregate.Feature = "ControllerManagerLeaderMigration"
// owner: @deejross, @soltysh
// kep: https://kep.k8s.io/3140
// alpha: v1.24
// beta: v1.25
// GA: 1.27
//
// Enables support for time zones in CronJobs.
CronJobTimeZone featuregate.Feature = "CronJobTimeZone"
// owner: @smarterclayton
// alpha: v1.21
// beta: v1.22
// GA: v1.25
// DaemonSets allow workloads to maintain availability during update per node
DaemonSetUpdateSurge featuregate.Feature = "DaemonSetUpdateSurge"
// owner: @gnufied, @verult, @bertinatto
// alpha: v1.22
// beta: v1.23
@ -270,6 +245,7 @@ const (
// owner: @derekwaynecarr
// alpha: v1.20
// beta: v1.21 (off by default until 1.22)
// ga: v1.27
//
// Enables usage of hugepages-<size> in downward API.
DownwardAPIHugePages featuregate.Feature = "DownwardAPIHugePages"
@ -291,17 +267,10 @@ const (
// Enable Terminating condition in Endpoint Slices.
EndpointSliceTerminatingCondition featuregate.Feature = "EndpointSliceTerminatingCondition"
// owner: @verb
// alpha: v1.16
// beta: v1.23
// GA: v1.25
//
// Allows running an ephemeral container in pod namespaces to troubleshoot a running pod.
EphemeralContainers featuregate.Feature = "EphemeralContainers"
// owner: @harche
// kep: http://kep.k8s.io/3386
// alpha: v1.25
// beta: v1.27
//
// Allows using event-driven PLEG (pod lifecycle event generator) through kubelet
// which avoids frequent relisting of containers which helps optimize performance.
@ -315,25 +284,6 @@ const (
// Lock to default and remove after v1.22 based on user feedback that should be reflected in KEP #1972 update
ExecProbeTimeout featuregate.Feature = "ExecProbeTimeout"
// owner: @gnufied
// alpha: v1.14
// beta: v1.16
// GA: 1.24
// Ability to expand CSI volumes
ExpandCSIVolumes featuregate.Feature = "ExpandCSIVolumes"
// owner: @mlmhl @gnufied
// beta: v1.15
// GA: 1.24
// Ability to expand persistent volumes' file system without unmounting volumes.
ExpandInUsePersistentVolumes featuregate.Feature = "ExpandInUsePersistentVolumes"
// owner: @gnufied
// beta: v1.11
// GA: 1.24
// Ability to Expand persistent volumes
ExpandPersistentVolumes featuregate.Feature = "ExpandPersistentVolumes"
// owner: @gjkim42
// kep: https://kep.k8s.io/2595
// alpha: v1.22
@ -354,6 +304,7 @@ const (
// kep: https://kep.k8s.io/2727
// alpha: v1.23
// beta: v1.24
// stable: v1.27
//
// Enables GRPC probe method for {Liveness,Readiness,Startup}Probe.
GRPCContainerProbe featuregate.Feature = "GRPCContainerProbe"
@ -370,8 +321,10 @@ const (
// Make the kubelet use shutdown configuration based on pod priority values for graceful shutdown.
GracefulNodeShutdownBasedOnPodPriority featuregate.Feature = "GracefulNodeShutdownBasedOnPodPriority"
// owner: @arjunrn @mwielgus @josephburnett
// owner: @arjunrn @mwielgus @josephburnett @sanposhiho
// kep: https://kep.k8s.io/1610
// alpha: v1.20
// beta: v1.27
//
// Add support for the HPA to scale based on metrics from individual containers
// in target pods
@ -391,14 +344,6 @@ const (
// deletion ordering.
HonorPVReclaimPolicy featuregate.Feature = "HonorPVReclaimPolicy"
// owner: @ravig
// alpha: v1.23
// beta: v1.24
// GA: v1.25
// IdentifyPodOS allows user to specify OS on which they'd like the Pod run. The user should still set the nodeSelector
// with appropriate `kubernetes.io/os` label for scheduler to identify appropriate node for the pod to run.
IdentifyPodOS featuregate.Feature = "IdentifyPodOS"
// owner: @leakingtapan
// alpha: v1.21
//
@ -450,6 +395,7 @@ const (
// owner: @danwinship
// kep: https://kep.k8s.io/3178
// alpha: v1.25
// beta: v1.27
//
// Causes kubelet to no longer create legacy IPTables rules
IPTablesOwnershipCleanup featuregate.Feature = "IPTablesOwnershipCleanup"
@ -465,6 +411,7 @@ const (
// owner: @ahg
// beta: v1.23
// stable: v1.27
//
// Allow updating node scheduling directives in the pod template of jobs. Specifically,
// node affinity, selector and tolerations. This is allowed only for suspended jobs
@ -512,6 +459,18 @@ const (
// Enables the kubelet's pod resources grpc endpoint
KubeletPodResources featuregate.Feature = "KubeletPodResources"
// owner: @moshe010
// alpha: v1.27
//
// Enable POD resources API to return resources allocated by Dynamic Resource Allocation
KubeletPodResourcesDynamicResources featuregate.Feature = "KubeletPodResourcesDynamicResources"
// owner: @moshe010
// alpha: v1.27
//
// Enable POD resources API with Get method
KubeletPodResourcesGet featuregate.Feature = "KubeletPodResourcesGet"
// owner: @fromanirh
// alpha: v1.21
// beta: v1.23
@ -521,6 +480,7 @@ const (
// owner: @sallyom
// kep: https://kep.k8s.io/2832
// alpha: v1.25
// beta: v1.27
//
// Add support for distributed tracing in the kubelet
KubeletTracing featuregate.Feature = "KubeletTracing"
@ -528,24 +488,19 @@ const (
// owner: @zshihang
// kep: https://kep.k8s.io/2800
// beta: v1.24
// ga: v1.26
//
// Stop auto-generation of secret-based service account tokens.
LegacyServiceAccountTokenNoAutoGeneration featuregate.Feature = "LegacyServiceAccountTokenNoAutoGeneration"
// owner: @zshihang
// kep: http://kep.k8s.io/2800
// alpha: v1.25
// alpha: v1.26
// beta: v1.27
//
// Enables tracking of secret-based service account tokens usage.
LegacyServiceAccountTokenTracking featuregate.Feature = "LegacyServiceAccountTokenTracking"
// owner: @jinxu
// beta: v1.10
// stable: v1.25
//
// Support local ephemeral storage types for local storage capacity isolation feature.
LocalStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation"
// owner: @RobertKrawitz
// alpha: v1.15
//
@ -563,6 +518,7 @@ const (
// owner: @denkensk
// kep: https://kep.k8s.io/3243
// alpha: v1.25
// beta: v1.27
//
// Enable MatchLabelKeys in PodTopologySpread.
MatchLabelKeysInPodTopologySpread featuregate.Feature = "MatchLabelKeysInPodTopologySpread"
@ -597,6 +553,7 @@ const (
// owner: @danwinship
// kep: http://kep.k8s.io/3453
// alpha: v1.26
// beta: v1.27
//
// Enables new performance-improving code in kube-proxy iptables mode
MinimizeIPTablesRestore featuregate.Feature = "MinimizeIPTablesRestore"
@ -617,14 +574,12 @@ const (
// Enables the MultiCIDR Range allocator.
MultiCIDRRangeAllocator featuregate.Feature = "MultiCIDRRangeAllocator"
// owner: @rikatz
// kep: https://kep.k8s.io/2079
// alpha: v1.21
// beta: v1.22
// ga: v1.25
// owner: @aojea
// kep: https://kep.k8s.io/1880
// alpha: v1.27
//
// Enables the endPort field in NetworkPolicy to enable a Port Range behavior in Network Policies.
NetworkPolicyEndPort featuregate.Feature = "NetworkPolicyEndPort"
// Enables the dynamic configuration of Service IP ranges
MultiCIDRServiceAllocator featuregate.Feature = "MultiCIDRServiceAllocator"
// owner: @rikatz
// kep: https://kep.k8s.io/2943
@ -633,6 +588,20 @@ const (
// Enables NetworkPolicy status subresource
NetworkPolicyStatus featuregate.Feature = "NetworkPolicyStatus"
// owner: @jsafrane
// kep: https://kep.k8s.io/3756
// alpha: v1.25 (as part of SELinuxMountReadWriteOncePod)
// beta: v1.27
// Robust VolumeManager reconstruction after kubelet restart.
NewVolumeManagerReconstruction featuregate.Feature = "NewVolumeManagerReconstruction"
// owner: @aravindhp @LorbusChris
// kep: http://kep.k8s.io/2271
// alpha: v1.27
//
// Enables querying logs of node services using the /logs endpoint
NodeLogQuery featuregate.Feature = "NodeLogQuery"
// owner: @xing-yang @sonasingh46
// kep: https://kep.k8s.io/2268
// alpha: v1.24
@ -650,6 +619,7 @@ const (
// owner: @mortent, @atiratree, @ravig
// kep: http://kep.k8s.io/3018
// alpha: v1.26
// beta: v1.27
//
// Enables PDBUnhealthyPodEvictionPolicy for PodDisruptionBudgets
PDBUnhealthyPodEvictionPolicy featuregate.Feature = "PDBUnhealthyPodEvictionPolicy"
@ -688,6 +658,7 @@ const (
// owner: @Huang-Wei
// kep: https://kep.k8s.io/3521
// alpha: v1.26
// beta: v1.27
//
// Enable users to specify when a Pod is ready for scheduling.
PodSchedulingReadiness featuregate.Feature = "PodSchedulingReadiness"
@ -729,7 +700,9 @@ const (
QOSReserved featuregate.Feature = "QOSReserved"
// owner: @chrishenzie
// kep: https://kep.k8s.io/2485
// alpha: v1.22
// beta: v1.27
//
// Enables usage of the ReadWriteOncePod PersistentVolume access mode.
ReadWriteOncePod featuregate.Feature = "ReadWriteOncePod"
@ -757,14 +730,32 @@ const (
// certificate as expiration approaches.
RotateKubeletServerCertificate featuregate.Feature = "RotateKubeletServerCertificate"
// owner: @danielvegamyhre
// kep: https://kep.k8s.io/2413
// beta: v1.27
//
// Allows mutating spec.completions for Indexed job when done in tandem with
// spec.parallelism. Specifically, spec.completions is mutable iff spec.completions
// equals to spec.parallelism before and after the update.
ElasticIndexedJob featuregate.Feature = "ElasticIndexedJob"
// owner: @saschagrunert
// kep: https://kep.k8s.io/2413
// alpha: v1.22
// beta: v1.25
// ga: v1.27
//
// Enables the use of `RuntimeDefault` as the default seccomp profile for all workloads.
SeccompDefault featuregate.Feature = "SeccompDefault"
// owner: @mtardy
// alpha: v1.0
//
// Putting this admission plugin behind a feature gate is part of the
// deprecation process. For details about the removal see:
// https://github.com/kubernetes/kubernetes/issues/111516
SecurityContextDeny featuregate.Feature = "SecurityContextDeny"
// owner: @maplain @andrewsykim
// kep: https://kep.k8s.io/2086
// alpha: v1.21
@ -783,6 +774,13 @@ const (
// Subdivide the ClusterIP range for dynamic and static IP allocation.
ServiceIPStaticSubrange featuregate.Feature = "ServiceIPStaticSubrange"
// owner: @xuzhenglun
// kep: http://kep.k8s.io/3682
// alpha: v1.27
//
// Subdivide the NodePort range for dynamic and static port allocation.
ServiceNodePortStaticSubrange featuregate.Feature = "ServiceNodePortStaticSubrange"
// owner: @derekwaynecarr
// alpha: v1.20
// beta: v1.22
@ -790,22 +788,24 @@ const (
// Enables kubelet support to size memory backed volumes
SizeMemoryBackedVolumes featuregate.Feature = "SizeMemoryBackedVolumes"
// owner: @alexanderConstantinescu
// kep: http://kep.k8s.io/3458
// beta: v1.27
//
// Enables less load balancer re-configurations by the service controller
// (KCCM) as an effect of changing node state.
StableLoadBalancerNodeSet featuregate.Feature = "StableLoadBalancerNodeSet"
// owner: @mattcary
// alpha: v1.22
// beta: v1.27
//
// Enables policies controlling deletion of PVCs created by a StatefulSet.
StatefulSetAutoDeletePVC featuregate.Feature = "StatefulSetAutoDeletePVC"
// owner: @ravig
// kep: https://kep.k8s.io/2607
// alpha: v1.22
// beta: v1.23
// GA: v1.25
// StatefulSetMinReadySeconds allows minReadySeconds to be respected by StatefulSet controller
StatefulSetMinReadySeconds featuregate.Feature = "StatefulSetMinReadySeconds"
// owner: @psch
// alpha: v1.26
// beta: v1.27
//
// Enables a StatefulSet to start from an arbitrary non zero ordinal
StatefulSetStartOrdinal featuregate.Feature = "StatefulSetStartOrdinal"
@ -818,9 +818,10 @@ const (
// Enables topology aware hints for EndpointSlices
TopologyAwareHints featuregate.Feature = "TopologyAwareHints"
// owner: @lmdaly
// owner: @lmdaly, @swatisehgal (for GA graduation)
// alpha: v1.16
// beta: v1.18
// GA: v1.27
//
// Enable resource managers to make NUMA aligned decisions
TopologyManager featuregate.Feature = "TopologyManager"
@ -904,10 +905,18 @@ const (
// owner: @jsafrane
// kep: https://kep.k8s.io/1710
// alpha: v1.25
// beta: v1.27
// Speed up container startup by mounting volumes with the correct SELinux label
// instead of changing each file on the volumes recursively.
// Initial implementation focused on ReadWriteOncePod volumes.
SELinuxMountReadWriteOncePod featuregate.Feature = "SELinuxMountReadWriteOncePod"
// owner: @vinaykul
// kep: http://kep.k8s.io/1287
// alpha: v1.27
//
// Enables In-Place Pod Vertical Scaling
InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling"
)
func init() {
@ -925,10 +934,14 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
AnyVolumeDataSource: {Default: true, PreRelease: featuregate.Beta}, // on by default in 1.24
APISelfSubjectReview: {Default: false, PreRelease: featuregate.Alpha},
APISelfSubjectReview: {Default: true, PreRelease: featuregate.Beta}, // on by default in 1.27
AppArmor: {Default: true, PreRelease: featuregate.Beta},
CloudDualStackNodeIPs: {Default: false, PreRelease: featuregate.Alpha},
ClusterTrustBundle: {Default: false, PreRelease: featuregate.Alpha},
CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha},
CPUManager: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.26
@ -939,15 +952,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
CPUManagerPolicyOptions: {Default: true, PreRelease: featuregate.Beta},
CSIInlineVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
CSIMigration: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
CSIMigrationAWS: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
CSIMigrationAzureDisk: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
CSIMigrationAzureFile: {Default: true, PreRelease: featuregate.GA}, // remove in 1.28
CSIMigrationAzureFile: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
CSIMigrationGCE: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
@ -955,23 +960,21 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
CSIMigrationRBD: {Default: false, PreRelease: featuregate.Alpha}, // Off by default (requires RBD CSI driver)
CSIMigrationvSphere: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
CSIMigrationvSphere: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
CSINodeExpandSecret: {Default: false, PreRelease: featuregate.Alpha},
CSINodeExpandSecret: {Default: true, PreRelease: featuregate.Beta},
CSIStorageCapacity: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
CSIVolumeHealth: {Default: false, PreRelease: featuregate.Alpha},
CloudControllerManagerWebhook: {Default: false, PreRelease: featuregate.Alpha},
ContainerCheckpoint: {Default: false, PreRelease: featuregate.Alpha},
ConsistentHTTPGetHandlers: {Default: true, PreRelease: featuregate.GA},
ControllerManagerLeaderMigration: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
CronJobTimeZone: {Default: true, PreRelease: featuregate.Beta},
DaemonSetUpdateSurge: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
CronJobTimeZone: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
DelegateFSGroupToCSIDriver: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
@ -983,40 +986,30 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
DisableKubeletCloudCredentialProviders: {Default: false, PreRelease: featuregate.Alpha},
DownwardAPIHugePages: {Default: true, PreRelease: featuregate.Beta}, // on by default in 1.22
DownwardAPIHugePages: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in v1.29
EndpointSliceTerminatingCondition: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in v1.28
DynamicResourceAllocation: {Default: false, PreRelease: featuregate.Alpha},
EphemeralContainers: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
EventedPLEG: {Default: false, PreRelease: featuregate.Alpha},
EventedPLEG: {Default: false, PreRelease: featuregate.Beta}, // off by default, requires CRI Runtime support
ExecProbeTimeout: {Default: true, PreRelease: featuregate.GA}, // lock to default and remove after v1.22 based on KEP #1972 update
ExpandCSIVolumes: {Default: true, PreRelease: featuregate.GA}, // remove in 1.26
ExpandInUsePersistentVolumes: {Default: true, PreRelease: featuregate.GA}, // remove in 1.26
ExpandPersistentVolumes: {Default: true, PreRelease: featuregate.GA}, // remove in 1.26
ExpandedDNSConfig: {Default: true, PreRelease: featuregate.Beta},
ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: featuregate.Beta},
GRPCContainerProbe: {Default: true, PreRelease: featuregate.Beta},
GRPCContainerProbe: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, //remove in 1.29
GracefulNodeShutdown: {Default: true, PreRelease: featuregate.Beta},
GracefulNodeShutdownBasedOnPodPriority: {Default: true, PreRelease: featuregate.Beta},
HPAContainerMetrics: {Default: false, PreRelease: featuregate.Alpha},
HPAContainerMetrics: {Default: true, PreRelease: featuregate.Beta},
HonorPVReclaimPolicy: {Default: false, PreRelease: featuregate.Alpha},
IdentifyPodOS: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
InTreePluginAWSUnregister: {Default: false, PreRelease: featuregate.Alpha},
InTreePluginAzureDiskUnregister: {Default: false, PreRelease: featuregate.Alpha},
@ -1033,11 +1026,11 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
InTreePluginvSphereUnregister: {Default: false, PreRelease: featuregate.Alpha},
IPTablesOwnershipCleanup: {Default: false, PreRelease: featuregate.Alpha},
IPTablesOwnershipCleanup: {Default: true, PreRelease: featuregate.Beta},
JobPodFailurePolicy: {Default: true, PreRelease: featuregate.Beta},
JobMutableNodeSchedulingDirectives: {Default: true, PreRelease: featuregate.Beta},
JobMutableNodeSchedulingDirectives: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
JobReadyPods: {Default: true, PreRelease: featuregate.Beta},
@ -1049,21 +1042,23 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
KubeletPodResources: {Default: true, PreRelease: featuregate.Beta},
KubeletPodResourcesDynamicResources: {Default: false, PreRelease: featuregate.Alpha},
KubeletPodResourcesGet: {Default: false, PreRelease: featuregate.Alpha},
KubeletPodResourcesGetAllocatable: {Default: true, PreRelease: featuregate.Beta},
KubeletTracing: {Default: false, PreRelease: featuregate.Alpha},
KubeletTracing: {Default: true, PreRelease: featuregate.Beta},
LegacyServiceAccountTokenNoAutoGeneration: {Default: true, PreRelease: featuregate.GA},
LegacyServiceAccountTokenNoAutoGeneration: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
LegacyServiceAccountTokenTracking: {Default: false, PreRelease: featuregate.Alpha},
LocalStorageCapacityIsolation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
LegacyServiceAccountTokenTracking: {Default: true, PreRelease: featuregate.Beta},
LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha},
LogarithmicScaleDown: {Default: true, PreRelease: featuregate.Beta},
MatchLabelKeysInPodTopologySpread: {Default: false, PreRelease: featuregate.Alpha},
MatchLabelKeysInPodTopologySpread: {Default: true, PreRelease: featuregate.Beta},
MaxUnavailableStatefulSet: {Default: false, PreRelease: featuregate.Alpha},
@ -1071,23 +1066,27 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
MemoryQoS: {Default: false, PreRelease: featuregate.Alpha},
MinDomainsInPodTopologySpread: {Default: false, PreRelease: featuregate.Beta},
MinDomainsInPodTopologySpread: {Default: true, PreRelease: featuregate.Beta},
MinimizeIPTablesRestore: {Default: false, PreRelease: featuregate.Alpha},
MinimizeIPTablesRestore: {Default: true, PreRelease: featuregate.Beta},
MixedProtocolLBService: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
MultiCIDRRangeAllocator: {Default: false, PreRelease: featuregate.Alpha},
NetworkPolicyEndPort: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
MultiCIDRServiceAllocator: {Default: false, PreRelease: featuregate.Alpha},
NetworkPolicyStatus: {Default: false, PreRelease: featuregate.Alpha},
NewVolumeManagerReconstruction: {Default: false, PreRelease: featuregate.Beta}, // disabled for https://github.com/kubernetes/kubernetes/issues/117745
NodeLogQuery: {Default: false, PreRelease: featuregate.Alpha},
NodeOutOfServiceVolumeDetach: {Default: true, PreRelease: featuregate.Beta},
NodeSwap: {Default: false, PreRelease: featuregate.Alpha},
PDBUnhealthyPodEvictionPolicy: {Default: false, PreRelease: featuregate.Alpha},
PDBUnhealthyPodEvictionPolicy: {Default: true, PreRelease: featuregate.Beta},
PodAndContainerStatsFromCRI: {Default: false, PreRelease: featuregate.Alpha},
@ -1097,7 +1096,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
PodHasNetworkCondition: {Default: false, PreRelease: featuregate.Alpha},
PodSchedulingReadiness: {Default: false, PreRelease: featuregate.Alpha},
PodSchedulingReadiness: {Default: true, PreRelease: featuregate.Beta},
PodSecurity: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
@ -1109,7 +1108,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
QOSReserved: {Default: false, PreRelease: featuregate.Alpha},
ReadWriteOncePod: {Default: false, PreRelease: featuregate.Alpha},
ReadWriteOncePod: {Default: true, PreRelease: featuregate.Beta},
RecoverVolumeExpansionFailure: {Default: false, PreRelease: featuregate.Alpha},
@ -1117,23 +1116,29 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
RotateKubeletServerCertificate: {Default: true, PreRelease: featuregate.Beta},
SeccompDefault: {Default: true, PreRelease: featuregate.Beta},
ElasticIndexedJob: {Default: true, PreRelease: featuregate.Beta},
SeccompDefault: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
SecurityContextDeny: {Default: false, PreRelease: featuregate.Alpha},
ServiceIPStaticSubrange: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
ServiceInternalTrafficPolicy: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
ServiceNodePortStaticSubrange: {Default: false, PreRelease: featuregate.Alpha},
SizeMemoryBackedVolumes: {Default: true, PreRelease: featuregate.Beta},
StatefulSetAutoDeletePVC: {Default: false, PreRelease: featuregate.Alpha},
StableLoadBalancerNodeSet: {Default: true, PreRelease: featuregate.Beta},
StatefulSetMinReadySeconds: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
StatefulSetAutoDeletePVC: {Default: true, PreRelease: featuregate.Beta},
StatefulSetStartOrdinal: {Default: false, PreRelease: featuregate.Alpha},
StatefulSetStartOrdinal: {Default: true, PreRelease: featuregate.Beta},
TopologyAwareHints: {Default: true, PreRelease: featuregate.Beta},
TopologyManager: {Default: true, PreRelease: featuregate.Beta},
TopologyManager: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.27; remove in 1.29
TopologyManagerPolicyAlphaOptions: {Default: false, PreRelease: featuregate.Alpha},
@ -1155,12 +1160,16 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
NodeInclusionPolicyInPodTopologySpread: {Default: true, PreRelease: featuregate.Beta},
SELinuxMountReadWriteOncePod: {Default: false, PreRelease: featuregate.Alpha},
SELinuxMountReadWriteOncePod: {Default: false, PreRelease: featuregate.Beta}, // disabled for https://github.com/kubernetes/kubernetes/issues/117745
InPlacePodVerticalScaling: {Default: false, PreRelease: featuregate.Alpha},
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
genericfeatures.AggregatedDiscoveryEndpoint: {Default: false, PreRelease: featuregate.Alpha},
genericfeatures.AdmissionWebhookMatchConditions: {Default: false, PreRelease: featuregate.Alpha},
genericfeatures.AggregatedDiscoveryEndpoint: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.APIListChunking: {Default: true, PreRelease: featuregate.Beta},
@ -1168,7 +1177,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
genericfeatures.APIResponseCompression: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.AdvancedAuditing: {Default: true, PreRelease: featuregate.GA},
genericfeatures.AdvancedAuditing: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
genericfeatures.ValidatingAdmissionPolicy: {Default: false, PreRelease: featuregate.Alpha},
@ -1178,11 +1187,11 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
genericfeatures.OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.OpenAPIV3: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.OpenAPIV3: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
genericfeatures.ServerSideFieldValidation: {Default: true, PreRelease: featuregate.Beta},
genericfeatures.ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
// features that enable backwards compatibility but are scheduled to be removed
// ...

View File

@ -18,26 +18,36 @@ package fieldpath
import (
"fmt"
"sort"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
)
// FormatMap formats map[string]string to a string.
func FormatMap(m map[string]string) (fmtStr string) {
// output with keys in sorted order to provide stable output
keys := sets.NewString()
for key := range m {
keys.Insert(key)
keys := make([]string, 0, len(m))
var grow int
for k, v := range m {
keys = append(keys, k)
// why add 4: (for =, \n, " and ")
grow += len(k) + len(v) + 4
}
for _, key := range keys.List() {
fmtStr += fmt.Sprintf("%v=%q\n", key, m[key])
sort.Strings(keys)
// allocate space to avoid expansion
dst := make([]byte, 0, grow)
for _, key := range keys {
if len(dst) > 0 {
dst = append(dst, '\n')
}
dst = append(dst, key...)
dst = append(dst, '=')
dst = strconv.AppendQuote(dst, m[key])
}
fmtStr = strings.TrimSuffix(fmtStr, "\n")
return
return string(dst)
}
// ExtractFieldPathAsString extracts the field from the given object

View File

@ -159,7 +159,7 @@ type KubeletConfiguration struct {
// enableDebuggingHandlers enables server endpoints for log collection
// and local running of containers and commands
EnableDebuggingHandlers bool
// enableContentionProfiling enables lock contention profiling, if enableDebuggingHandlers is true.
// enableContentionProfiling enables block profiling, if enableDebuggingHandlers is true.
EnableContentionProfiling bool
// healthzPort is the port of the localhost healthz endpoint (set to 0 to disable)
HealthzPort int32
@ -233,11 +233,9 @@ type KubeletConfiguration struct {
// Requires the MemoryManager feature gate to be enabled.
MemoryManagerPolicy string
// TopologyManagerPolicy is the name of the policy to use.
// Policies other than "none" require the TopologyManager feature gate to be enabled.
TopologyManagerPolicy string
// TopologyManagerScope represents the scope of topology hint generation
// that topology manager requests and hint providers generate.
// "pod" scope requires the TopologyManager feature gate to be enabled.
// Default: "container"
// +optional
TopologyManagerScope string
@ -292,6 +290,8 @@ type KubeletConfiguration struct {
KubeAPIBurst int32
// serializeImagePulls when enabled, tells the Kubelet to pull images one at a time.
SerializeImagePulls bool
// MaxParallelImagePulls sets the maximum number of image pulls in parallel.
MaxParallelImagePulls *int32
// Map of signal names to quantities that defines hard eviction thresholds. For example: {"memory.available": "300Mi"}.
// Some default signals are Linux only: nodefs.inodesFree
EvictionHard map[string]string
@ -369,14 +369,14 @@ type KubeletConfiguration struct {
// See http://kubernetes.io/docs/user-guide/compute-resources for more detail.
KubeReserved map[string]string
// This flag helps kubelet identify absolute name of top level cgroup used to enforce `SystemReserved` compute resource reservation for OS system daemons.
// Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information.
// Refer to [Node Allocatable](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable) doc for more information.
SystemReservedCgroup string
// This flag helps kubelet identify absolute name of top level cgroup used to enforce `KubeReserved` compute resource reservation for Kubernetes node system daemons.
// Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information.
// Refer to [Node Allocatable](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable) doc for more information.
KubeReservedCgroup string
// This flag specifies the various Node Allocatable enforcements that Kubelet needs to perform.
// This flag accepts a list of options. Acceptable options are `pods`, `system-reserved` & `kube-reserved`.
// Refer to [Node Allocatable](https://github.com/kubernetes/design-proposals-archive/blob/main/node/node-allocatable.md) doc for more information.
// Refer to [Node Allocatable](https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable) doc for more information.
EnforceNodeAllocatable []string
// This option specifies the cpu list reserved for the host level system threads and kubernetes related threads.
// This provide a "static" CPU list rather than the "dynamic" list by system-reserved and kube-reserved.
@ -393,6 +393,11 @@ type KubeletConfiguration struct {
Logging logsapi.LoggingConfiguration
// EnableSystemLogHandler enables /logs handler.
EnableSystemLogHandler bool
// EnableSystemLogQuery enables the node log query feature on the /logs endpoint.
// EnableSystemLogHandler has to be enabled in addition for this feature to work.
// +featureGate=NodeLogQuery
// +optional
EnableSystemLogQuery bool
// ShutdownGracePeriod specifies the total duration that the node should delay the shutdown and total grace period for pod termination during a node shutdown.
// Defaults to 0 seconds.
// +featureGate=GracefulNodeShutdown
@ -438,7 +443,7 @@ type KubeletConfiguration struct {
// Decreasing this factor will set lower high limit for container cgroups and put heavier reclaim pressure
// while increasing will put less reclaim pressure.
// See https://kep.k8s.io/2570 for more details.
// Default: 0.8
// Default: 0.9
// +featureGate=MemoryQoS
// +optional
MemoryThrottlingFactor *float64
@ -450,6 +455,7 @@ type KubeletConfiguration struct {
// registerNode enables automatic registration with the apiserver.
// +optional
RegisterNode bool
// Tracing specifies the versioned configuration for OpenTelemetry tracing clients.
// See https://kep.k8s.io/2832 for more details.
// +featureGate=KubeletTracing
@ -465,6 +471,16 @@ type KubeletConfiguration struct {
// disabled. Once disabled, user should not set request/limit for container's ephemeral storage, or sizeLimit for emptyDir.
// +optional
LocalStorageCapacityIsolation bool
// ContainerRuntimeEndpoint is the endpoint of container runtime.
// unix domain sockets supported on Linux while npipes and tcp endpoints are supported for windows.
// Examples:'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime'
ContainerRuntimeEndpoint string
// ImageServiceEndpoint is the endpoint of container image service.
// If not specified the default value is ContainerRuntimeEndpoint
// +optional
ImageServiceEndpoint string
}
// KubeletAuthorizationMode denotes the authorization mode for the kubelet

View File

@ -227,6 +227,11 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) {
}
out.RuntimeRequestTimeout = in.RuntimeRequestTimeout
out.CPUCFSQuotaPeriod = in.CPUCFSQuotaPeriod
if in.MaxParallelImagePulls != nil {
in, out := &in.MaxParallelImagePulls, &out.MaxParallelImagePulls
*out = new(int32)
**out = **in
}
if in.EvictionHard != nil {
in, out := &in.EvictionHard, &out.EvictionHard
*out = make(map[string]string, len(*in))

View File

@ -18,7 +18,6 @@ package format
import (
"fmt"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
@ -40,16 +39,3 @@ func PodDesc(podName, podNamespace string, podUID types.UID) string {
// (DNS subdomain format), while allowed in the container name format.
return fmt.Sprintf("%s_%s(%s)", podName, podNamespace, podUID)
}
// PodWithDeletionTimestamp is the same as Pod. In addition, it prints the
// deletion timestamp of the pod if it's not nil.
func PodWithDeletionTimestamp(pod *v1.Pod) string {
if pod == nil {
return "<nil>"
}
var deletionTimestamp string
if pod.DeletionTimestamp != nil {
deletionTimestamp = ":DeletionTimestamp=" + pod.DeletionTimestamp.UTC().Format(time.RFC3339)
}
return Pod(pod) + deletionTimestamp
}

View File

@ -1,36 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package format
import (
"fmt"
"sort"
"strings"
"k8s.io/api/core/v1"
)
// ResourceList returns a string representation of a resource list in a human readable format.
func ResourceList(resources v1.ResourceList) string {
resourceStrings := make([]string, 0, len(resources))
for key, value := range resources {
resourceStrings = append(resourceStrings, fmt.Sprintf("%v=%v", key, value.String()))
}
// sort the results for consistent log output
sort.Strings(resourceStrings)
return strings.Join(resourceStrings, ",")
}

View File

@ -0,0 +1,127 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"net"
"k8s.io/apimachinery/pkg/util/sets"
netutils "k8s.io/utils/net"
)
// NodePortAddresses is used to handle the --nodeport-addresses flag
type NodePortAddresses struct {
cidrStrings []string
cidrs []*net.IPNet
containsIPv4Loopback bool
}
// RFC 5735 127.0.0.0/8 - This block is assigned for use as the Internet host loopback address
var ipv4LoopbackStart = net.IPv4(127, 0, 0, 0)
// NewNodePortAddresses takes the `--nodeport-addresses` value (which is assumed to
// contain only valid CIDRs) and returns a NodePortAddresses object. If cidrStrings is
// empty, this is treated as `["0.0.0.0/0", "::/0"]`.
func NewNodePortAddresses(cidrStrings []string) *NodePortAddresses {
if len(cidrStrings) == 0 {
cidrStrings = []string{IPv4ZeroCIDR, IPv6ZeroCIDR}
}
npa := &NodePortAddresses{
cidrStrings: cidrStrings,
}
for _, str := range npa.cidrStrings {
_, cidr, _ := netutils.ParseCIDRSloppy(str)
npa.cidrs = append(npa.cidrs, cidr)
if netutils.IsIPv4CIDR(cidr) {
if cidr.IP.IsLoopback() || cidr.Contains(ipv4LoopbackStart) {
npa.containsIPv4Loopback = true
}
}
}
return npa
}
func (npa *NodePortAddresses) String() string {
return fmt.Sprintf("%v", npa.cidrStrings)
}
// GetNodeAddresses return all matched node IP addresses for npa's CIDRs.
// If npa's CIDRs include "0.0.0.0/0" and/or "::/0", then those values will be returned
// verbatim in the response and no actual IPs of that family will be returned.
// If no matching IPs are found, GetNodeAddresses will return an error.
// NetworkInterfacer is injected for test purpose.
func (npa *NodePortAddresses) GetNodeAddresses(nw NetworkInterfacer) (sets.String, error) {
uniqueAddressList := sets.NewString()
// First round of iteration to pick out `0.0.0.0/0` or `::/0` for the sake of excluding non-zero IPs.
for _, cidr := range npa.cidrStrings {
if IsZeroCIDR(cidr) {
uniqueAddressList.Insert(cidr)
}
}
addrs, err := nw.InterfaceAddrs()
if err != nil {
return nil, fmt.Errorf("error listing all interfaceAddrs from host, error: %v", err)
}
// Second round of iteration to parse IPs based on cidr.
for _, cidr := range npa.cidrs {
if IsZeroCIDR(cidr.String()) {
continue
}
for _, addr := range addrs {
var ip net.IP
// nw.InterfaceAddrs may return net.IPAddr or net.IPNet on windows, and it will return net.IPNet on linux.
switch v := addr.(type) {
case *net.IPAddr:
ip = v.IP
case *net.IPNet:
ip = v.IP
default:
continue
}
if cidr.Contains(ip) {
if netutils.IsIPv6(ip) && !uniqueAddressList.Has(IPv6ZeroCIDR) {
uniqueAddressList.Insert(ip.String())
}
if !netutils.IsIPv6(ip) && !uniqueAddressList.Has(IPv4ZeroCIDR) {
uniqueAddressList.Insert(ip.String())
}
}
}
}
if uniqueAddressList.Len() == 0 {
return nil, fmt.Errorf("no addresses found for cidrs %v", npa.cidrStrings)
}
return uniqueAddressList, nil
}
// ContainsIPv4Loopback returns true if npa's CIDRs contain an IPv4 loopback address.
func (npa *NodePortAddresses) ContainsIPv4Loopback() bool {
return npa.containsIPv4Loopback
}

View File

@ -78,37 +78,6 @@ func BuildPortsToEndpointsMap(endpoints *v1.Endpoints) map[string][]string {
return portsToEndpoints
}
// ContainsIPv4Loopback returns true if the input is empty or one of the CIDR contains an IPv4 loopback address.
func ContainsIPv4Loopback(cidrStrings []string) bool {
if len(cidrStrings) == 0 {
return true
}
// RFC 5735 127.0.0.0/8 - This block is assigned for use as the Internet host loopback address
ipv4LoopbackStart := netutils.ParseIPSloppy("127.0.0.0")
for _, cidr := range cidrStrings {
if IsZeroCIDR(cidr) {
return true
}
ip, ipnet, err := netutils.ParseCIDRSloppy(cidr)
if err != nil {
continue
}
if netutils.IsIPv6CIDR(ipnet) {
continue
}
if ip.IsLoopback() {
return true
}
if ipnet.Contains(ipv4LoopbackStart) {
return true
}
}
return false
}
// IsZeroCIDR checks whether the input CIDR string is either
// the IPv4 or IPv6 zero CIDR
func IsZeroCIDR(cidr string) bool {
@ -228,74 +197,10 @@ func ShouldSkipService(service *v1.Service) bool {
return false
}
// GetNodeAddresses return all matched node IP addresses based on given cidr slice.
// Some callers, e.g. IPVS proxier, need concrete IPs, not ranges, which is why this exists.
// NetworkInterfacer is injected for test purpose.
// We expect the cidrs passed in is already validated.
// Given an empty input `[]`, it will return `0.0.0.0/0` and `::/0` directly.
// If multiple cidrs is given, it will return the minimal IP sets, e.g. given input `[1.2.0.0/16, 0.0.0.0/0]`, it will
// only return `0.0.0.0/0`.
// NOTE: GetNodeAddresses only accepts CIDRs, if you want concrete IPs, e.g. 1.2.3.4, then the input should be 1.2.3.4/32.
func GetNodeAddresses(cidrs []string, nw NetworkInterfacer) (sets.String, error) {
uniqueAddressList := sets.NewString()
if len(cidrs) == 0 {
uniqueAddressList.Insert(IPv4ZeroCIDR)
uniqueAddressList.Insert(IPv6ZeroCIDR)
return uniqueAddressList, nil
}
// First round of iteration to pick out `0.0.0.0/0` or `::/0` for the sake of excluding non-zero IPs.
for _, cidr := range cidrs {
if IsZeroCIDR(cidr) {
uniqueAddressList.Insert(cidr)
}
}
addrs, err := nw.InterfaceAddrs()
if err != nil {
return nil, fmt.Errorf("error listing all interfaceAddrs from host, error: %v", err)
}
// Second round of iteration to parse IPs based on cidr.
for _, cidr := range cidrs {
if IsZeroCIDR(cidr) {
continue
}
_, ipNet, _ := netutils.ParseCIDRSloppy(cidr)
for _, addr := range addrs {
var ip net.IP
// nw.InterfaceAddrs may return net.IPAddr or net.IPNet on windows, and it will return net.IPNet on linux.
switch v := addr.(type) {
case *net.IPAddr:
ip = v.IP
case *net.IPNet:
ip = v.IP
default:
continue
}
if ipNet.Contains(ip) {
if netutils.IsIPv6(ip) && !uniqueAddressList.Has(IPv6ZeroCIDR) {
uniqueAddressList.Insert(ip.String())
}
if !netutils.IsIPv6(ip) && !uniqueAddressList.Has(IPv4ZeroCIDR) {
uniqueAddressList.Insert(ip.String())
}
}
}
}
if uniqueAddressList.Len() == 0 {
return nil, fmt.Errorf("no addresses found for cidrs %v", cidrs)
}
return uniqueAddressList, nil
}
// AddressSet validates the addresses in the slice using the "isValid" function.
// Addresses that pass the validation are returned as a string Set.
func AddressSet(isValid func(ip net.IP) bool, addrs []net.Addr) sets.String {
ips := sets.NewString()
func AddressSet(isValid func(ip net.IP) bool, addrs []net.Addr) sets.Set[string] {
ips := sets.New[string]()
for _, a := range addrs {
var ip net.IP
switch v := a.(type) {

View File

@ -31,6 +31,7 @@ type PodSecurityContextAccessor interface {
RunAsUser() *int64
RunAsGroup() *int64
RunAsNonRoot() *bool
SeccompProfile() *api.SeccompProfile
SupplementalGroups() []int64
FSGroup() *int64
}
@ -46,6 +47,7 @@ type PodSecurityContextMutator interface {
SetRunAsUser(*int64)
SetRunAsGroup(*int64)
SetRunAsNonRoot(*bool)
SetSeccompProfile(*api.SeccompProfile)
SetSupplementalGroups([]int64)
SetFSGroup(*int64)
@ -171,6 +173,19 @@ func (w *podSecurityContextWrapper) SetRunAsNonRoot(v *bool) {
w.ensurePodSC()
w.podSC.RunAsNonRoot = v
}
func (w *podSecurityContextWrapper) SeccompProfile() *api.SeccompProfile {
if w.podSC == nil {
return nil
}
return w.podSC.SeccompProfile
}
func (w *podSecurityContextWrapper) SetSeccompProfile(p *api.SeccompProfile) {
if w.podSC == nil && p == nil {
return
}
w.ensurePodSC()
w.podSC.SeccompProfile = p
}
func (w *podSecurityContextWrapper) SupplementalGroups() []int64 {
if w.podSC == nil {
return nil
@ -211,6 +226,7 @@ type ContainerSecurityContextAccessor interface {
RunAsGroup() *int64
RunAsNonRoot() *bool
ReadOnlyRootFilesystem() *bool
SeccompProfile() *api.SeccompProfile
AllowPrivilegeEscalation() *bool
}
@ -227,6 +243,7 @@ type ContainerSecurityContextMutator interface {
SetRunAsGroup(*int64)
SetRunAsNonRoot(*bool)
SetReadOnlyRootFilesystem(*bool)
SetSeccompProfile(*api.SeccompProfile)
SetAllowPrivilegeEscalation(*bool)
}
@ -357,6 +374,20 @@ func (w *containerSecurityContextWrapper) SetReadOnlyRootFilesystem(v *bool) {
w.ensureContainerSC()
w.containerSC.ReadOnlyRootFilesystem = v
}
func (w *containerSecurityContextWrapper) SeccompProfile() *api.SeccompProfile {
if w.containerSC == nil {
return nil
}
return w.containerSC.SeccompProfile
}
func (w *containerSecurityContextWrapper) SetSeccompProfile(p *api.SeccompProfile) {
if w.containerSC == nil && p == nil {
return
}
w.ensureContainerSC()
w.containerSC.SeccompProfile = p
}
func (w *containerSecurityContextWrapper) AllowPrivilegeEscalation() *bool {
if w.containerSC == nil {
return nil
@ -464,6 +495,14 @@ func (w *effectiveContainerSecurityContextWrapper) SetReadOnlyRootFilesystem(v *
w.containerSC.SetReadOnlyRootFilesystem(v)
}
}
func (w *effectiveContainerSecurityContextWrapper) SeccompProfile() *api.SeccompProfile {
return w.containerSC.SeccompProfile()
}
func (w *effectiveContainerSecurityContextWrapper) SetSeccompProfile(p *api.SeccompProfile) {
if !reflect.DeepEqual(w.SeccompProfile(), p) {
w.containerSC.SetSeccompProfile(p)
}
}
func (w *effectiveContainerSecurityContextWrapper) AllowPrivilegeEscalation() *bool {
return w.containerSC.AllowPrivilegeEscalation()
}

View File

@ -20,30 +20,6 @@ import (
v1 "k8s.io/api/core/v1"
)
// HasPrivilegedRequest returns the value of SecurityContext.Privileged, taking into account
// the possibility of nils
func HasPrivilegedRequest(container *v1.Container) bool {
if container.SecurityContext == nil {
return false
}
if container.SecurityContext.Privileged == nil {
return false
}
return *container.SecurityContext.Privileged
}
// HasCapabilitiesRequest returns true if Adds or Drops are defined in the security context
// capabilities, taking into account nils
func HasCapabilitiesRequest(container *v1.Container) bool {
if container.SecurityContext == nil {
return false
}
if container.SecurityContext.Capabilities == nil {
return false
}
return len(container.SecurityContext.Capabilities.Add) > 0 || len(container.SecurityContext.Capabilities.Drop) > 0
}
// HasWindowsHostProcessRequest returns true if container should run as HostProcess container,
// taking into account nils
func HasWindowsHostProcessRequest(pod *v1.Pod, container *v1.Container) bool {
@ -212,9 +188,10 @@ func AddNoNewPrivileges(sc *v1.SecurityContext) bool {
var (
// These *must* be kept in sync with moby/moby.
// https://github.com/moby/moby/blob/master/oci/defaults.go#L116-L134
// https://github.com/moby/moby/blob/master/oci/defaults.go#L105-L123
// @jessfraz will watch changes to those files upstream.
defaultMaskedPaths = []string{
"/proc/asound",
"/proc/acpi",
"/proc/kcore",
"/proc/keys",
@ -226,7 +203,6 @@ var (
"/sys/firmware",
}
defaultReadonlyPaths = []string{
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",

View File

@ -213,7 +213,7 @@ type DeletableVolumePlugin interface {
// NewDeleter creates a new volume.Deleter which knows how to delete this
// resource in accordance with the underlying storage provider after the
// volume's release from a claim
NewDeleter(spec *Spec) (Deleter, error)
NewDeleter(logger klog.Logger, spec *Spec) (Deleter, error)
}
// ProvisionableVolumePlugin is an extended interface of VolumePlugin and is
@ -223,7 +223,7 @@ type ProvisionableVolumePlugin interface {
// NewProvisioner creates a new volume.Provisioner which knows how to
// create PersistentVolumes in accordance with the plugin's underlying
// storage provider
NewProvisioner(options VolumeOptions) (Provisioner, error)
NewProvisioner(logger klog.Logger, options VolumeOptions) (Provisioner, error)
}
// AttachableVolumePlugin is an extended interface of VolumePlugin and is used for volumes that require attachment
@ -334,13 +334,6 @@ type KubeletVolumeHost interface {
WaitForCacheSync() error
// Returns hostutil.HostUtils
GetHostUtil() hostutil.HostUtils
// GetHostIDsForPod if the pod uses user namespaces, takes the uid and
// gid inside the container and returns the host UID and GID those are
// mapped to on the host. If containerUID/containerGID is nil, then it
// returns the host UID/GID for ID 0 inside the container.
// If the pod is not using user namespaces, as there is no mapping needed, the
// same containerUID and containerGID params are returned.
GetHostIDsForPod(pod *v1.Pod, containerUID, containerGID *int64) (hostUID, hostGID *int64, err error)
}
// AttachDetachVolumeHost is a AttachDetach Controller specific interface that plugins can use

View File

@ -86,11 +86,16 @@ const (
// Write does an atomic projection of the given payload into the writer's target
// directory. Input paths must not begin with '..'.
// setPerms is an optional pointer to a function that caller can provide to set the
// permissions of the newly created files before they are published. The function is
// passed subPath which is the name of the timestamped directory that was created
// under target directory.
//
// The Write algorithm is:
//
// 1. The payload is validated; if the payload is invalid, the function returns
// 2.  The current timestamped directory is detected by reading the data directory
//
// 2. The current timestamped directory is detected by reading the data directory
// symlink
//
// 3. The old version of the volume is walked to determine whether any
@ -98,13 +103,19 @@ const (
//
// 4. The data in the current timestamped directory is compared to the projected
// data to determine if an update is required.
// 5.  A new timestamped dir is created
//
// 6. The payload is written to the new timestamped directory
// 7.  A symlink to the new timestamped directory ..data_tmp is created that will
// become the new data directory
// 8.  The new data directory symlink is renamed to the data directory; rename is atomic
// 9.  Symlinks and directory for new user-visible files are created (if needed).
// 5. A new timestamped dir is created.
//
// 6. The payload is written to the new timestamped directory.
//
// 7. Permissions are set (if setPerms is not nil) on the new timestamped directory and files.
//
// 8. A symlink to the new timestamped directory ..data_tmp is created that will
// become the new data directory.
//
// 9. The new data directory symlink is renamed to the data directory; rename is atomic.
//
// 10. Symlinks and directory for new user-visible files are created (if needed).
//
// For example, consider the files:
// <target-dir>/podName
@ -123,9 +134,10 @@ const (
// linking everything else. On Windows, if a target does not exist, the created symlink
// will not work properly if the target ends up being a directory.
//
// 10. Old paths are removed from the user-visible portion of the target directory
// 11.  The previous timestamped directory is removed, if it exists
func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
// 11. Old paths are removed from the user-visible portion of the target directory.
//
// 12. The previous timestamped directory is removed, if it exists.
func (w *AtomicWriter) Write(payload map[string]FileProjection, setPerms func(subPath string) error) error {
// (1)
cleanPayload, err := validatePayload(payload)
if err != nil {
@ -185,6 +197,14 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
klog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir)
// (7)
if setPerms != nil {
if err := setPerms(tsDirName); err != nil {
klog.Errorf("%s: error applying ownership settings: %v", w.logContext, err)
return err
}
}
// (8)
newDataDirPath := filepath.Join(w.targetDir, newDataDirName)
if err = os.Symlink(tsDirName, newDataDirPath); err != nil {
os.RemoveAll(tsDir)
@ -192,7 +212,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
return err
}
// (8)
// (9)
if runtime.GOOS == "windows" {
os.Remove(dataDirPath)
err = os.Symlink(tsDirName, dataDirPath)
@ -207,19 +227,19 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
return err
}
// (9)
// (10)
if err = w.createUserVisibleFiles(cleanPayload); err != nil {
klog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err)
return err
}
// (10)
// (11)
if err = w.removeUserVisiblePaths(pathsToRemove); err != nil {
klog.Errorf("%s: error removing old visible symlinks: %v", w.logContext, err)
return err
}
// (11)
// (12)
if len(oldTsDir) > 0 {
if err = os.RemoveAll(oldTsPath); err != nil {
klog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err)

View File

@ -1,5 +1,5 @@
/*
Copyright 2016 The Kubernetes Authors.
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -14,6 +14,15 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
package common
package policy // import "k8s.io/kubernetes/pkg/apis/policy"
// QuotaID is generic quota identifier.
// Data type based on quotactl(2).
type QuotaID int32
const (
// UnknownQuotaID -- cannot determine whether a quota is in force
UnknownQuotaID QuotaID = -1
// BadQuotaID -- Invalid quota
BadQuotaID QuotaID = 0
)

View File

@ -23,17 +23,6 @@ import (
"regexp"
)
// QuotaID is generic quota identifier.
// Data type based on quotactl(2).
type QuotaID int32
const (
// UnknownQuotaID -- cannot determine whether a quota is in force
UnknownQuotaID QuotaID = -1
// BadQuotaID -- Invalid quota
BadQuotaID QuotaID = 0
)
// QuotaType -- type of quota to be applied
type QuotaType int

View File

@ -164,6 +164,9 @@ func readProjectFiles(projects *os.File, projid *os.File) projectsList {
return projectsList{parseProjFile(projects, parseProject), parseProjFile(projid, parseProjid)}
}
// findAvailableQuota finds the next available quota from the FirstQuota
// it returns error if QuotaIDIsInUse returns error when getting quota id in use;
// it searches at most maxUnusedQuotasToSearch(128) time
func findAvailableQuota(path string, idMap map[common.QuotaID]bool) (common.QuotaID, error) {
unusedQuotasSearched := 0
for id := common.FirstQuota; true; id++ {
@ -187,13 +190,13 @@ func addDirToProject(path string, id common.QuotaID, list *projectsList) (common
idMap := make(map[common.QuotaID]bool)
for _, project := range list.projects {
if project.data == path {
if id != project.id {
if id != common.BadQuotaID && id != project.id {
return common.BadQuotaID, false, fmt.Errorf("attempt to reassign project ID for %s", path)
}
// Trying to reassign a directory to the project it's
// already in. Maybe this should be an error, but for
// now treat it as an idempotent operation
return id, false, nil
return project.id, false, nil
}
idMap[project.id] = true
}
@ -318,6 +321,7 @@ func writeProjectFiles(fProjects *os.File, fProjid *os.File, writeProjid bool, l
return fmt.Errorf("unable to write project files: %v", err)
}
// if ID is common.BadQuotaID, generate new project id if the dir is not in a project
func createProjectID(path string, ID common.QuotaID) (common.QuotaID, error) {
quotaIDLock.Lock()
defer quotaIDLock.Unlock()

View File

@ -23,10 +23,15 @@ import (
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume/util/fsquota/common"
)
// Interface -- quota interface
type Interface interface {
// GetQuotaOnDir gets the quota ID (if any) that applies to
// this directory
GetQuotaOnDir(m mount.Interface, path string) (common.QuotaID, error)
// Does the path provided support quotas, and if so, what types
SupportsQuotas(m mount.Interface, path string) (bool, error)
// Assign a quota (picked by the quota mechanism) to a path,

View File

@ -35,6 +35,9 @@ import (
"k8s.io/kubernetes/pkg/volume/util/fsquota/common"
)
// Pod -> External Pod UID
var podUidMap = make(map[types.UID]types.UID)
// Pod -> ID
var podQuotaMap = make(map[types.UID]common.QuotaID)
@ -214,7 +217,7 @@ func setQuotaOnDir(path string, id common.QuotaID, bytes int64) error {
return getApplier(path).SetQuotaOnDir(path, id, bytes)
}
func getQuotaOnDir(m mount.Interface, path string) (common.QuotaID, error) {
func GetQuotaOnDir(m mount.Interface, path string) (common.QuotaID, error) {
_, _, err := getFSInfo(m, path)
if err != nil {
return common.BadQuotaID, err
@ -235,7 +238,7 @@ func clearQuotaOnDir(m mount.Interface, path string) error {
if !supportsQuotas {
return nil
}
projid, err := getQuotaOnDir(m, path)
projid, err := GetQuotaOnDir(m, path)
if err == nil && projid != common.BadQuotaID {
// This means that we have a quota on the directory but
// we can't clear it. That's not good.
@ -304,7 +307,7 @@ func SupportsQuotas(m mount.Interface, path string) (bool, error) {
// AssignQuota chooses the quota ID based on the pod UID and path.
// If the pod UID is identical to another one known, it may (but presently
// doesn't) choose the same quota ID as other volumes in the pod.
func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resource.Quantity) error { //nolint:staticcheck // SA4009 poduid is overwritten by design, see comment below
func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resource.Quantity) error { //nolint:staticcheck
if bytes == nil {
return fmt.Errorf("attempting to assign null quota to %s", path)
}
@ -314,20 +317,32 @@ func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resour
}
quotaLock.Lock()
defer quotaLock.Unlock()
// Current policy is to set individual quotas on each volumes.
// Current policy is to set individual quotas on each volume,
// for each new volume we generate a random UUID and we use that as
// the internal pod uid.
// From fsquota point of view each volume is attached to a
// single unique pod.
// If we decide later that we want to assign one quota for all
// volumes in a pod, we can simply remove this line of code.
// volumes in a pod, we can simply use poduid parameter directly
// If and when we decide permanently that we're going to adopt
// one quota per volume, we can rip all of the pod code out.
poduid = types.UID(uuid.NewUUID()) //nolint:staticcheck // SA4009 poduid is overwritten by design, see comment above
if pod, ok := dirPodMap[path]; ok && pod != poduid {
return fmt.Errorf("requesting quota on existing directory %s but different pod %s %s", path, pod, poduid)
externalPodUid := poduid
internalPodUid, ok := dirPodMap[path]
if ok {
if podUidMap[internalPodUid] != externalPodUid {
return fmt.Errorf("requesting quota on existing directory %s but different pod %s %s", path, podUidMap[internalPodUid], externalPodUid)
}
} else {
internalPodUid = types.UID(uuid.NewUUID())
}
oid, ok := podQuotaMap[poduid]
oid, ok := podQuotaMap[internalPodUid]
if ok {
if quotaSizeMap[oid] != ibytes {
return fmt.Errorf("requesting quota of different size: old %v new %v", quotaSizeMap[oid], bytes)
}
if _, ok := dirPodMap[path]; ok {
return nil
}
} else {
oid = common.BadQuotaID
}
@ -342,12 +357,13 @@ func AssignQuota(m mount.Interface, path string, poduid types.UID, bytes *resour
ibytes = -1
}
if err = setQuotaOnDir(path, id, ibytes); err == nil {
quotaPodMap[id] = poduid
quotaPodMap[id] = internalPodUid
quotaSizeMap[id] = ibytes
podQuotaMap[poduid] = id
podQuotaMap[internalPodUid] = id
dirQuotaMap[path] = id
dirPodMap[path] = poduid
podDirCountMap[poduid]++
dirPodMap[path] = internalPodUid
podUidMap[internalPodUid] = externalPodUid
podDirCountMap[internalPodUid]++
klog.V(4).Infof("Assigning quota ID %d (%d) to %s", id, ibytes, path)
return nil
}
@ -415,7 +431,7 @@ func ClearQuota(m mount.Interface, path string) error {
if !ok {
return fmt.Errorf("clearQuota: No quota available for %s", path)
}
projid, err := getQuotaOnDir(m, path)
projid, err := GetQuotaOnDir(m, path)
if err != nil {
// Log-and-continue instead of returning an error for now
// due to unspecified backwards compatibility concerns (a subject to revise)
@ -436,6 +452,7 @@ func ClearQuota(m mount.Interface, path string) error {
delete(quotaPodMap, podQuotaMap[poduid])
delete(podDirCountMap, poduid)
delete(podQuotaMap, poduid)
delete(podUidMap, poduid)
} else {
err = removeProjectID(path, projid)
podDirCountMap[poduid]--

View File

@ -22,6 +22,7 @@ package fsquota
import (
"errors"
"k8s.io/kubernetes/pkg/volume/util/fsquota/common"
"k8s.io/mount-utils"
"k8s.io/apimachinery/pkg/api/resource"
@ -33,6 +34,10 @@ import (
var errNotImplemented = errors.New("not implemented")
func GetQuotaOnDir(_ mount.Interface, _ string) (common.QuotaID, error) {
return common.BadQuotaID, errNotImplemented
}
// SupportsQuotas -- dummy implementation
func SupportsQuotas(_ mount.Interface, _ string) (bool, error) {
return false, errNotImplemented

View File

@ -120,5 +120,6 @@ func (hu *FakeHostUtil) GetMode(pathname string) (os.FileMode, error) {
// GetSELinuxMountContext returns value of -o context=XYZ mount option on
// given mount point.
func (hu *FakeHostUtil) GetSELinuxMountContext(pathname string) (string, error) {
return "", errors.New("not implemented")
// This pretends the OS does not support SELinux.
return "", nil
}

View File

@ -229,23 +229,6 @@ func MarkFSResizeFinished(
return updatedPVC, err
}
func MarkControllerExpansionFailed(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
expansionFailedOnController := v1.PersistentVolumeClaimControllerExpansionFailed
newPVC := pvc.DeepCopy()
newPVC.Status.ResizeStatus = &expansionFailedOnController
patchBytes, err := createPVCPatch(pvc, newPVC, false /* addResourceVersionCheck */)
if err != nil {
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", pvc.Name, err)
}
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).
Patch(context.TODO(), pvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
if updateErr != nil {
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", pvc.Name, updateErr)
}
return updatedClaim, nil
}
// MarkNodeExpansionFailed marks a PVC for node expansion as failed. Kubelet should not retry expansion
// of volumes which are in failed state.
func MarkNodeExpansionFailed(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {

View File

@ -576,6 +576,44 @@ func IsLocalEphemeralVolume(volume v1.Volume) bool {
volume.ConfigMap != nil
}
// GetLocalPersistentVolumeNodeNames returns the node affinity node name(s) for
// local PersistentVolumes. nil is returned if the PV does not have any
// specific node affinity node selector terms and match expressions.
// PersistentVolume with node affinity has select and match expressions
// in the form of:
//
// nodeAffinity:
// required:
// nodeSelectorTerms:
// - matchExpressions:
// - key: kubernetes.io/hostname
// operator: In
// values:
// - <node1>
// - <node2>
func GetLocalPersistentVolumeNodeNames(pv *v1.PersistentVolume) []string {
if pv == nil || pv.Spec.NodeAffinity == nil || pv.Spec.NodeAffinity.Required == nil {
return nil
}
var result sets.Set[string]
for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms {
var nodes sets.Set[string]
for _, matchExpr := range term.MatchExpressions {
if matchExpr.Key == v1.LabelHostname && matchExpr.Operator == v1.NodeSelectorOpIn {
if nodes == nil {
nodes = sets.New(matchExpr.Values...)
} else {
nodes = nodes.Intersection(sets.New(matchExpr.Values...))
}
}
}
result = result.Union(nodes)
}
return sets.List(result)
}
// GetPodVolumeNames returns names of volumes that are used in a pod,
// either as filesystem mount or raw block device, together with list
// of all SELinux contexts of all containers that use the volumes.

View File

@ -139,7 +139,7 @@ func getLoopDeviceFromSysfs(path string) (string, error) {
}
// Return the first match.
backingFilePath := strings.TrimSpace(string(data))
backingFilePath := cleanBackingFilePath(string(data))
if backingFilePath == path || backingFilePath == realPath {
return fmt.Sprintf("/dev/%s", filepath.Base(device)), nil
}
@ -148,6 +148,14 @@ func getLoopDeviceFromSysfs(path string) (string, error) {
return "", errors.New(ErrDeviceNotFound)
}
// cleanPath remove any trailing substrings that are not part of the backing file path.
func cleanBackingFilePath(path string) string {
// If the block device was deleted, the path will contain a "(deleted)" suffix
path = strings.TrimSpace(path)
path = strings.TrimSuffix(path, "(deleted)")
return strings.TrimSpace(path)
}
// FindGlobalMapPathUUIDFromPod finds {pod uuid} bind mount under globalMapPath
// corresponding to map path symlink, and then return global map path with pod uuid.
// (See pkg/volume/volume.go for details on a global map path and a pod device map path.)

View File

@ -40,22 +40,22 @@ const (
// SetVolumeOwnership modifies the given volume to be owned by
// fsGroup, and sets SetGid so that newly created files are owned by
// fsGroup. If fsGroup is nil nothing is done.
func SetVolumeOwnership(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error {
func SetVolumeOwnership(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error {
if fsGroup == nil {
return nil
}
timer := time.AfterFunc(30*time.Second, func() {
klog.Warningf("Setting volume ownership for %s and fsGroup set. If the volume has a lot of files then setting volume ownership could be slow, see https://github.com/kubernetes/kubernetes/issues/69699", mounter.GetPath())
klog.Warningf("Setting volume ownership for %s and fsGroup set. If the volume has a lot of files then setting volume ownership could be slow, see https://github.com/kubernetes/kubernetes/issues/69699", dir)
})
defer timer.Stop()
if skipPermissionChange(mounter, fsGroup, fsGroupChangePolicy) {
klog.V(3).InfoS("Skipping permission and ownership change for volume", "path", mounter.GetPath())
if skipPermissionChange(mounter, dir, fsGroup, fsGroupChangePolicy) {
klog.V(3).InfoS("Skipping permission and ownership change for volume", "path", dir)
return nil
}
err := walkDeep(mounter.GetPath(), func(path string, info os.FileInfo, err error) error {
err := walkDeep(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
@ -104,14 +104,12 @@ func changeFilePermission(filename string, fsGroup *int64, readonly bool, info o
return nil
}
func skipPermissionChange(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy) bool {
dir := mounter.GetPath()
func skipPermissionChange(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy) bool {
if fsGroupChangePolicy == nil || *fsGroupChangePolicy != v1.FSGroupChangeOnRootMismatch {
klog.V(4).InfoS("Perform recursive ownership change for directory", "path", dir)
return false
}
return !requiresPermissionChange(mounter.GetPath(), fsGroup, mounter.GetAttributes().ReadOnly)
return !requiresPermissionChange(dir, fsGroup, mounter.GetAttributes().ReadOnly)
}
func requiresPermissionChange(rootDir string, fsGroup *int64, readonly bool) bool {

View File

@ -24,6 +24,6 @@ import (
"k8s.io/kubernetes/pkg/volume/util/types"
)
func SetVolumeOwnership(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error {
func SetVolumeOwnership(mounter Mounter, dir string, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy, completeFunc func(types.CompleteFuncParam)) error {
return nil
}

View File

@ -1,272 +1,34 @@
rules:
- selectorRegexp: k8s[.]io/kubernetes/pkg/
# The core E2E framework is meant to be a normal Kubernetes client,
# which means that it shouldn't depend on internal
# code. But we are not there yet, so some exceptions
# have to be allowed. Over time the list of allowed
# packages should get shorter, not longer.
- selectorRegexp: ^k8s[.]io/kubernetes/pkg/
allowedPrefixes:
- k8s.io/kubernetes/pkg/api/legacyscheme
- k8s.io/kubernetes/pkg/api/service
- k8s.io/kubernetes/pkg/api/v1/pod
- k8s.io/kubernetes/pkg/api/v1/resource
- k8s.io/kubernetes/pkg/api/v1/service
- k8s.io/kubernetes/pkg/api/pod
- k8s.io/kubernetes/pkg/api/node
- k8s.io/kubernetes/pkg/api/persistentvolumeclaim
- k8s.io/kubernetes/pkg/apis/apps
- k8s.io/kubernetes/pkg/apis/apps/validation
- k8s.io/kubernetes/pkg/apis/autoscaling
- k8s.io/kubernetes/pkg/apis/batch
- k8s.io/kubernetes/pkg/apis/certificates
- k8s.io/kubernetes/pkg/apis/certificates/v1
- k8s.io/kubernetes/pkg/apis/core
- k8s.io/kubernetes/pkg/apis/core/helper
- k8s.io/kubernetes/pkg/apis/core/install
- k8s.io/kubernetes/pkg/apis/core/pods
- k8s.io/kubernetes/pkg/apis/core/v1
- k8s.io/kubernetes/pkg/apis/core/v1/helper
- k8s.io/kubernetes/pkg/apis/core/v1/helper/qos
- k8s.io/kubernetes/pkg/apis/core/validation
- k8s.io/kubernetes/pkg/apis/extensions
- k8s.io/kubernetes/pkg/apis/networking
- k8s.io/kubernetes/pkg/apis/node
- k8s.io/kubernetes/pkg/apis/policy
- k8s.io/kubernetes/pkg/apis/policy/validation
- k8s.io/kubernetes/pkg/apis/scheduling
- k8s.io/kubernetes/pkg/apis/storage/v1/util
- k8s.io/kubernetes/pkg/capabilities
- k8s.io/kubernetes/pkg/client/conditions
- k8s.io/kubernetes/pkg/cloudprovider/providers
- k8s.io/kubernetes/pkg/controller
- k8s.io/kubernetes/pkg/controller/deployment/util
- k8s.io/kubernetes/pkg/controller/nodelifecycle
- k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler
- k8s.io/kubernetes/pkg/controller/service
- k8s.io/kubernetes/pkg/controller/util/node
- k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util
- k8s.io/kubernetes/pkg/credentialprovider
- k8s.io/kubernetes/pkg/credentialprovider/aws
- k8s.io/kubernetes/pkg/credentialprovider/azure
- k8s.io/kubernetes/pkg/credentialprovider/gcp
- k8s.io/kubernetes/pkg/credentialprovider/secrets
- k8s.io/kubernetes/pkg/features
- k8s.io/kubernetes/pkg/fieldpath
- k8s.io/kubernetes/pkg/kubectl
- k8s.io/kubernetes/pkg/kubectl/apps
- k8s.io/kubernetes/pkg/kubectl/describe
- k8s.io/kubernetes/pkg/kubectl/describe/versioned
- k8s.io/kubernetes/pkg/kubectl/scheme
- k8s.io/kubernetes/pkg/kubectl/util
- k8s.io/kubernetes/pkg/kubectl/util/certificate
- k8s.io/kubernetes/pkg/kubectl/util/deployment
- k8s.io/kubernetes/pkg/kubectl/util/event
- k8s.io/kubernetes/pkg/kubectl/util/fieldpath
- k8s.io/kubernetes/pkg/kubectl/util/podutils
- k8s.io/kubernetes/pkg/kubectl/util/qos
- k8s.io/kubernetes/pkg/kubectl/util/rbac
- k8s.io/kubernetes/pkg/kubectl/util/resource
- k8s.io/kubernetes/pkg/kubectl/util/slice
- k8s.io/kubernetes/pkg/kubectl/util/storage
- k8s.io/kubernetes/pkg/kubelet
- k8s.io/kubernetes/pkg/kubelet/apis
- k8s.io/kubernetes/pkg/kubelet/apis/config
- k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1
- k8s.io/kubernetes/pkg/kubelet/cadvisor
- k8s.io/kubernetes/pkg/kubelet/certificate
- k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap
- k8s.io/kubernetes/pkg/kubelet/checkpoint
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum
- k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors
- k8s.io/kubernetes/pkg/kubelet/cloudresource
- k8s.io/kubernetes/pkg/kubelet/cm
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/containermap
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state
- k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology
- k8s.io/kubernetes/pkg/kubelet/cm/cpuset
- k8s.io/kubernetes/pkg/kubelet/cm/devicemanager
- k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/checkpoint
- k8s.io/kubernetes/pkg/kubelet/cm/topologymanager
- k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask
- k8s.io/kubernetes/pkg/kubelet/cm/util
- k8s.io/kubernetes/pkg/kubelet/config
- k8s.io/kubernetes/pkg/kubelet/configmap
- k8s.io/kubernetes/pkg/kubelet/container
- k8s.io/kubernetes/pkg/kubelet/envvars
- k8s.io/kubernetes/pkg/kubelet/eviction
- k8s.io/kubernetes/pkg/kubelet/eviction/api
- k8s.io/kubernetes/pkg/kubelet/events
- k8s.io/kubernetes/pkg/kubelet/images
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log
- k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic
- k8s.io/kubernetes/pkg/kubelet/kuberuntime
- k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs
- k8s.io/kubernetes/pkg/kubelet/leaky
- k8s.io/kubernetes/pkg/kubelet/lifecycle
- k8s.io/kubernetes/pkg/kubelet/logs
- k8s.io/kubernetes/pkg/kubelet/metrics
- k8s.io/kubernetes/pkg/kubelet/network/dns
- k8s.io/kubernetes/pkg/kubelet/nodelease
- k8s.io/kubernetes/pkg/kubelet/nodestatus
- k8s.io/kubernetes/pkg/kubelet/oom
- k8s.io/kubernetes/pkg/kubelet/pleg
- k8s.io/kubernetes/pkg/kubelet/pluginmanager
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/metrics
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2
- k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler
- k8s.io/kubernetes/pkg/kubelet/pod
- k8s.io/kubernetes/pkg/kubelet/preemption
- k8s.io/kubernetes/pkg/kubelet/prober
- k8s.io/kubernetes/pkg/kubelet/prober/results
- k8s.io/kubernetes/pkg/kubelet/qos
- k8s.io/kubernetes/pkg/kubelet/remote
- k8s.io/kubernetes/pkg/kubelet/runtimeclass
- k8s.io/kubernetes/pkg/kubelet/server
- k8s.io/kubernetes/pkg/kubelet/server/metrics
- k8s.io/kubernetes/pkg/kubelet/server/portforward
- k8s.io/kubernetes/pkg/kubelet/server/remotecommand
- k8s.io/kubernetes/pkg/kubelet/server/stats
- k8s.io/kubernetes/pkg/kubelet/server/streaming
- k8s.io/kubernetes/pkg/kubelet/stats
- k8s.io/kubernetes/pkg/kubelet/stats/pidlimit
- k8s.io/kubernetes/pkg/kubelet/status
- k8s.io/kubernetes/pkg/kubelet/secret
- k8s.io/kubernetes/pkg/kubelet/sysctl
- k8s.io/kubernetes/pkg/kubelet/types
- k8s.io/kubernetes/pkg/kubelet/token
- k8s.io/kubernetes/pkg/kubelet/util
- k8s.io/kubernetes/pkg/kubelet/util/format
- k8s.io/kubernetes/pkg/kubelet/util/manager
- k8s.io/kubernetes/pkg/kubelet/util/store
- k8s.io/kubernetes/pkg/kubelet/volumemanager
- k8s.io/kubernetes/pkg/kubelet/volumemanager/cache
- k8s.io/kubernetes/pkg/kubelet/volumemanager/metrics
- k8s.io/kubernetes/pkg/kubelet/volumemanager/populator
- k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler
- k8s.io/kubernetes/pkg/kubemark
- k8s.io/kubernetes/pkg/cluster/ports
- k8s.io/kubernetes/pkg/probe
- k8s.io/kubernetes/pkg/probe/exec
- k8s.io/kubernetes/pkg/probe/http
- k8s.io/kubernetes/pkg/probe/tcp
- k8s.io/kubernetes/pkg/proxy
- k8s.io/kubernetes/pkg/proxy/apis
- k8s.io/kubernetes/pkg/proxy/apis/config
- k8s.io/kubernetes/pkg/proxy/apis/config/scheme
- k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1
- k8s.io/kubernetes/pkg/proxy/apis/config/validation
- k8s.io/kubernetes/pkg/proxy/config
- k8s.io/kubernetes/pkg/proxy/healthcheck
- k8s.io/kubernetes/pkg/proxy/iptables
- k8s.io/kubernetes/pkg/proxy/ipvs
- k8s.io/kubernetes/pkg/proxy/metaproxier
- k8s.io/kubernetes/pkg/proxy/metrics
- k8s.io/kubernetes/pkg/proxy/util
- k8s.io/kubernetes/pkg/registry/core/service/allocator
- k8s.io/kubernetes/pkg/registry/core/service/portallocator
- k8s.io/kubernetes/pkg/scheduler/api
- k8s.io/kubernetes/pkg/scheduler/framework
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports
- k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources
- k8s.io/kubernetes/pkg/scheduler/framework/runtime
- k8s.io/kubernetes/pkg/scheduler/internal/heap
- k8s.io/kubernetes/pkg/scheduler/internal/parallelize
- k8s.io/kubernetes/pkg/scheduler/internal/queue
- k8s.io/kubernetes/pkg/scheduler/listers
- k8s.io/kubernetes/pkg/scheduler/testing
- k8s.io/kubernetes/pkg/scheduler/metrics
- k8s.io/kubernetes/pkg/scheduler/nodeinfo
- k8s.io/kubernetes/pkg/scheduler/util
- k8s.io/kubernetes/pkg/scheduler/volumebinder
- k8s.io/kubernetes/pkg/scheduler
- k8s.io/kubernetes/pkg/scheduler/profile
- k8s.io/kubernetes/pkg/scheduler/testing
- k8s.io/kubernetes/pkg/security/apparmor
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl
- k8s.io/kubernetes/pkg/security/podsecuritypolicy/util
- k8s.io/kubernetes/pkg/securitycontext
- k8s.io/kubernetes/pkg/serviceaccount
- k8s.io/kubernetes/pkg/util/async
- k8s.io/kubernetes/pkg/util/bandwidth
- k8s.io/kubernetes/pkg/util/config
- k8s.io/kubernetes/pkg/util/configz
- k8s.io/kubernetes/pkg/util/conntrack
- k8s.io/kubernetes/pkg/util/env
- k8s.io/kubernetes/pkg/util/filesystem
- k8s.io/kubernetes/pkg/util/flag
- k8s.io/kubernetes/pkg/util/flock
- k8s.io/kubernetes/pkg/util/goroutinemap
- k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff
- k8s.io/kubernetes/pkg/util/hash
- k8s.io/kubernetes/pkg/util/ipset
- k8s.io/kubernetes/pkg/util/iptables
- k8s.io/kubernetes/pkg/util/ipvs
- k8s.io/kubernetes/pkg/util/labels
- k8s.io/kubernetes/pkg/util/node
- k8s.io/kubernetes/pkg/util/oom
- k8s.io/kubernetes/pkg/util/parsers
- k8s.io/kubernetes/pkg/util/pod
- k8s.io/kubernetes/pkg/util/procfs
- k8s.io/kubernetes/pkg/util/removeall
- k8s.io/kubernetes/pkg/util/resizefs
- k8s.io/kubernetes/pkg/util/rlimit
- k8s.io/kubernetes/pkg/util/selinux
- k8s.io/kubernetes/pkg/util/slice
- k8s.io/kubernetes/pkg/util/sysctl
- k8s.io/kubernetes/pkg/util/system
- k8s.io/kubernetes/pkg/util/tail
- k8s.io/kubernetes/pkg/util/taints
- k8s.io/kubernetes/pkg/volume
- k8s.io/kubernetes/pkg/volume/util
- k8s.io/kubernetes/pkg/volume/util/fs
- k8s.io/kubernetes/pkg/volume/util/fsquota
- k8s.io/kubernetes/pkg/volume/util/recyclerclient
- k8s.io/kubernetes/pkg/volume/util/subpath
- k8s.io/kubernetes/pkg/volume/util/types
- k8s.io/kubernetes/pkg/volume/util/volumepathhandler
# TODO: I have no idea why import-boss --include-test-files is yelling about these for k8s.io/kubernetes/test/e2e/framework/providers/kubemark
- k8s.io/kubernetes/pkg/apis/authentication
- k8s.io/kubernetes/pkg/apis/authentication/v1
- k8s.io/kubernetes/pkg/apis/certificates/v1beta1
- k8s.io/kubernetes/pkg/apis/storage/v1
- k8s.io/kubernetes/pkg/scheduler/internal/cache
- selectorRegexp: k8s[.]io/kubernetes/test/
allowedPrefixes:
- k8s.io/kubernetes/test/e2e/common
- k8s.io/kubernetes/test/e2e/framework
- k8s.io/kubernetes/test/e2e/framework/auth
- k8s.io/kubernetes/test/e2e/framework/ginkgowrapper
- k8s.io/kubernetes/test/e2e/framework/kubectl
- k8s.io/kubernetes/test/e2e/framework/log
- k8s.io/kubernetes/test/e2e/framework/metrics
- k8s.io/kubernetes/test/e2e/framework/network
- k8s.io/kubernetes/test/e2e/framework/node
- k8s.io/kubernetes/test/e2e/framework/pod
- k8s.io/kubernetes/test/e2e/framework/rc
- k8s.io/kubernetes/test/e2e/framework/resource
- k8s.io/kubernetes/test/e2e/framework/service
- k8s.io/kubernetes/test/e2e/framework/ssh
- k8s.io/kubernetes/test/e2e/framework/testfiles
- k8s.io/kubernetes/test/e2e/framework/websocket
- k8s.io/kubernetes/test/e2e/manifest
- k8s.io/kubernetes/test/e2e/perftype
- k8s.io/kubernetes/test/e2e/storage/utils
- k8s.io/kubernetes/test/e2e/system
- k8s.io/kubernetes/test/utils
- k8s.io/kubernetes/test/utils/image
# TODO: why is this here?
- selectorRegexp: k8s[.]io/kubernetes/third_party/
allowedPrefixes:
- k8s.io/kubernetes/third_party/forked/golang/expansion
- k8s.io/kubernetes/pkg/kubelet/apis/
# The following packages are okay to use:
#
# public API
- selectorRegexp: ^k8s[.]io/(api|apimachinery|client-go|component-base|klog|pod-security-admission|utils)/|^[a-z]+(/|$)|github.com/onsi/(ginkgo|gomega)|^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils)
allowedPrefixes: [ "" ]
# stdlib
- selectorRegexp: ^[a-z]+(/|$)
allowedPrefixes: [ "" ]
# Ginkgo + Gomega.
- selectorRegexp: github.com/onsi/(ginkgo|gomega)|^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils)
allowedPrefixes: [ "" ]
# some of the shared test helpers (but not E2E sub-packages!)
- selectorRegexp: ^k8s[.]io/kubernetes/test/(e2e/framework/internal/|utils)
allowedPrefixes: [ "" ]
# Everything else isn't.
#
# In particular importing any test/e2e/framework/* package would be a
# violation (sub-packages get to use the framework, not the other way
# around).
- selectorRegexp: .

View File

@ -1,7 +1,6 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sig-testing-approvers
- andrewsykim
- fabriziopandini
- pohly

View File

@ -57,7 +57,7 @@ ginkgo.AfterEach(func() {
# Do something with f.ClientSet.
}
ginkgo.It("test something", func() {
ginkgo.It("test something", func(ctx context.Context) {
# The actual test.
})
```

View File

@ -0,0 +1,9 @@
# This E2E framework sub-package is currently allowed to use arbitrary
# dependencies, therefore we need to override the restrictions from
# the parent .import-restrictions file.
#
# At some point it may become useful to also check this package's
# dependencies more careful.
rules:
- selectorRegexp: ""
allowedPrefixes: [ "" ]

View File

@ -0,0 +1,9 @@
# This E2E framework sub-package is currently allowed to use arbitrary
# dependencies, therefore we need to override the restrictions from
# the parent .import-restrictions file.
#
# At some point it may become useful to also check this package's
# dependencies more careful.
rules:
- selectorRegexp: ""
allowedPrefixes: [ "" ]

View File

@ -58,25 +58,25 @@ func dumpEventsInNamespace(eventsLister EventsLister, namespace string) {
}
// DumpAllNamespaceInfo dumps events, pods and nodes information in the given namespace.
func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
func DumpAllNamespaceInfo(ctx context.Context, c clientset.Interface, namespace string) {
dumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) {
return c.CoreV1().Events(ns).List(context.TODO(), opts)
return c.CoreV1().Events(ns).List(ctx, opts)
}, namespace)
e2epod.DumpAllPodInfoForNamespace(c, namespace, framework.TestContext.ReportDir)
e2epod.DumpAllPodInfoForNamespace(ctx, c, namespace, framework.TestContext.ReportDir)
// If cluster is large, then the following logs are basically useless, because:
// 1. it takes tens of minutes or hours to grab all of them
// 2. there are so many of them that working with them are mostly impossible
// So we dump them only if the cluster is relatively small.
maxNodesForDump := framework.TestContext.MaxNodesToGather
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
framework.Logf("unable to fetch node list: %v", err)
return
}
if len(nodes.Items) <= maxNodesForDump {
dumpAllNodeInfo(c, nodes)
dumpAllNodeInfo(ctx, c, nodes)
} else {
framework.Logf("skipping dumping cluster info - cluster too large")
}
@ -95,31 +95,31 @@ func (o byFirstTimestamp) Less(i, j int) bool {
return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp)
}
func dumpAllNodeInfo(c clientset.Interface, nodes *v1.NodeList) {
func dumpAllNodeInfo(ctx context.Context, c clientset.Interface, nodes *v1.NodeList) {
names := make([]string, len(nodes.Items))
for ix := range nodes.Items {
names[ix] = nodes.Items[ix].Name
}
DumpNodeDebugInfo(c, names, framework.Logf)
DumpNodeDebugInfo(ctx, c, names, framework.Logf)
}
// DumpNodeDebugInfo dumps debug information of the given nodes.
func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) {
func DumpNodeDebugInfo(ctx context.Context, c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) {
for _, n := range nodeNames {
logFunc("\nLogging node info for node %v", n)
node, err := c.CoreV1().Nodes().Get(context.TODO(), n, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(ctx, n, metav1.GetOptions{})
if err != nil {
logFunc("Error getting node info %v", err)
}
logFunc("Node Info: %v", node)
logFunc("\nLogging kubelet events for node %v", n)
for _, e := range getNodeEvents(c, n) {
for _, e := range getNodeEvents(ctx, c, n) {
logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
}
logFunc("\nLogging pods the kubelet thinks is on node %v", n)
podList, err := getKubeletPods(c, n)
podList, err := getKubeletPods(ctx, c, n)
if err != nil {
logFunc("Unable to retrieve kubelet pods for node %v: %v", n, err)
continue
@ -135,13 +135,14 @@ func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(f
c.Name, c.Ready, c.RestartCount)
}
}
e2emetrics.HighLatencyKubeletOperations(c, 10*time.Second, n, logFunc)
_, err = e2emetrics.HighLatencyKubeletOperations(ctx, c, 10*time.Second, n, logFunc)
framework.ExpectNoError(err)
// TODO: Log node resource info
}
}
// getKubeletPods retrieves the list of pods on the kubelet.
func getKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
func getKubeletPods(ctx context.Context, c clientset.Interface, node string) (*v1.PodList, error) {
var client restclient.Result
finished := make(chan struct{}, 1)
go func() {
@ -151,7 +152,7 @@ func getKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, framework.KubeletPort)).
Suffix("pods").
Do(context.TODO())
Do(ctx)
finished <- struct{}{}
}()
@ -170,7 +171,7 @@ func getKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
// logNodeEvents logs kubelet events from the given node. This includes kubelet
// restart and node unhealthy events. Note that listing events like this will mess
// with latency metrics, beware of calling it during a test.
func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
func getNodeEvents(ctx context.Context, c clientset.Interface, nodeName string) []v1.Event {
selector := fields.Set{
"involvedObject.kind": "Node",
"involvedObject.name": nodeName,
@ -178,7 +179,7 @@ func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
"source": "kubelet",
}.AsSelector().String()
options := metav1.ListOptions{FieldSelector: selector}
events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(context.TODO(), options)
events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(ctx, options)
if err != nil {
framework.Logf("Unexpected error retrieving node events %v", err)
return []v1.Event{}

View File

@ -18,6 +18,7 @@ package debug
import (
"bytes"
"context"
"fmt"
"strconv"
"strings"
@ -156,8 +157,8 @@ func (d *LogsSizeData) addNewData(ip, path string, timestamp time.Time, size int
}
// NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed
func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
nodeAddresses, err := e2essh.NodeSSHHosts(c)
func NewLogsVerifier(ctx context.Context, c clientset.Interface) *LogsSizeVerifier {
nodeAddresses, err := e2essh.NodeSSHHosts(ctx, c)
framework.ExpectNoError(err)
instanceAddress := framework.APIAddress() + ":22"
@ -166,7 +167,6 @@ func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVeri
verifier := &LogsSizeVerifier{
client: c,
stopChannel: stopChannel,
data: prepareData(instanceAddress, nodeAddresses),
masterAddress: instanceAddress,
nodeAddresses: nodeAddresses,
@ -177,7 +177,6 @@ func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVeri
verifier.wg.Add(workersNo)
for i := 0; i < workersNo; i++ {
workers[i] = &LogSizeGatherer{
stopChannel: stopChannel,
data: verifier.data,
wg: &verifier.wg,
workChannel: workChannel,
@ -207,7 +206,7 @@ func (s *LogsSizeVerifier) GetSummary() *LogsSizeDataSummary {
}
// Run starts log size gathering. It starts a gorouting for every worker and then blocks until stopChannel is closed
func (s *LogsSizeVerifier) Run() {
func (s *LogsSizeVerifier) Run(ctx context.Context) {
s.workChannel <- WorkItem{
ip: s.masterAddress,
paths: masterLogsToCheck,
@ -221,15 +220,15 @@ func (s *LogsSizeVerifier) Run() {
}
}
for _, worker := range s.workers {
go worker.Run()
go worker.Run(ctx)
}
<-s.stopChannel
s.wg.Wait()
}
// Run starts log size gathering.
func (g *LogSizeGatherer) Run() {
for g.Work() {
func (g *LogSizeGatherer) Run(ctx context.Context) {
for g.Work(ctx) {
}
}
@ -245,7 +244,7 @@ func (g *LogSizeGatherer) pushWorkItem(workItem WorkItem) {
// Work does a single unit of work: tries to take out a WorkItem from the queue, ssh-es into a given machine,
// gathers data, writes it to the shared <data> map, and creates a gorouting which reinserts work item into
// the queue with a <pollingPeriod> delay. Returns false if worker should exit.
func (g *LogSizeGatherer) Work() bool {
func (g *LogSizeGatherer) Work(ctx context.Context) bool {
var workItem WorkItem
select {
case <-g.stopChannel:
@ -254,6 +253,7 @@ func (g *LogSizeGatherer) Work() bool {
case workItem = <-g.workChannel:
}
sshResult, err := e2essh.SSH(
ctx,
fmt.Sprintf("ls -l %v | awk '{print $9, $5}' | tr '\n' ' '", strings.Join(workItem.paths, " ")),
workItem.ip,
framework.TestContext.Provider,

View File

@ -181,10 +181,10 @@ type resourceGatherWorker struct {
printVerboseLogs bool
}
func (w *resourceGatherWorker) singleProbe() {
func (w *resourceGatherWorker) singleProbe(ctx context.Context) {
data := make(ResourceUsagePerContainer)
if w.inKubemark {
kubemarkData := getKubemarkMasterComponentsResourceUsage()
kubemarkData := getKubemarkMasterComponentsResourceUsage(ctx)
if kubemarkData == nil {
return
}
@ -319,22 +319,26 @@ func removeUint64Ptr(ptr *uint64) uint64 {
return *ptr
}
func (w *resourceGatherWorker) gather(initialSleep time.Duration) {
func (w *resourceGatherWorker) gather(ctx context.Context, initialSleep time.Duration) {
defer utilruntime.HandleCrash()
defer w.wg.Done()
defer framework.Logf("Closing worker for %v", w.nodeName)
defer func() { w.finished = true }()
select {
case <-time.After(initialSleep):
w.singleProbe()
w.singleProbe(ctx)
for {
select {
case <-time.After(w.resourceDataGatheringPeriod):
w.singleProbe()
w.singleProbe(ctx)
case <-ctx.Done():
return
case <-w.stopCh:
return
}
}
case <-ctx.Done():
return
case <-w.stopCh:
return
}
@ -373,11 +377,11 @@ const (
// nodeHasControlPlanePods returns true if specified node has control plane pods
// (kube-scheduler and/or kube-controller-manager).
func nodeHasControlPlanePods(c clientset.Interface, nodeName string) (bool, error) {
func nodeHasControlPlanePods(ctx context.Context, c clientset.Interface, nodeName string) (bool, error) {
regKubeScheduler := regexp.MustCompile("kube-scheduler-.*")
regKubeControllerManager := regexp.MustCompile("kube-controller-manager-.*")
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(ctx, metav1.ListOptions{
FieldSelector: fields.OneTermEqualSelector("spec.nodeName", nodeName).String(),
})
if err != nil {
@ -395,7 +399,7 @@ func nodeHasControlPlanePods(c clientset.Interface, nodeName string) (bool, erro
}
// NewResourceUsageGatherer returns a new ContainerResourceGatherer.
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) {
func NewResourceUsageGatherer(ctx context.Context, c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) {
g := ContainerResourceGatherer{
client: c,
stopCh: make(chan struct{}),
@ -420,7 +424,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
// Tracks kube-system pods if no valid PodList is passed in.
var err error
if pods == nil {
pods, err = c.CoreV1().Pods("kube-system").List(context.TODO(), metav1.ListOptions{})
pods, err = c.CoreV1().Pods("kube-system").List(ctx, metav1.ListOptions{})
if err != nil {
framework.Logf("Error while listing Pods: %v", err)
return nil, err
@ -429,7 +433,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
dnsNodes := make(map[string]bool)
for _, pod := range pods.Items {
if options.Nodes == MasterNodes {
isControlPlane, err := nodeHasControlPlanePods(c, pod.Spec.NodeName)
isControlPlane, err := nodeHasControlPlanePods(ctx, c, pod.Spec.NodeName)
if err != nil {
return nil, err
}
@ -438,7 +442,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
}
}
if options.Nodes == MasterAndDNSNodes {
isControlPlane, err := nodeHasControlPlanePods(c, pod.Spec.NodeName)
isControlPlane, err := nodeHasControlPlanePods(ctx, c, pod.Spec.NodeName)
if err != nil {
return nil, err
}
@ -456,14 +460,14 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
dnsNodes[pod.Spec.NodeName] = true
}
}
nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
nodeList, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
framework.Logf("Error while listing Nodes: %v", err)
return nil, err
}
for _, node := range nodeList.Items {
isControlPlane, err := nodeHasControlPlanePods(c, node.Name)
isControlPlane, err := nodeHasControlPlanePods(ctx, c, node.Name)
if err != nil {
return nil, err
}
@ -491,14 +495,14 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
// StartGatheringData starts a stat gathering worker blocks for each node to track,
// and blocks until StopAndSummarize is called.
func (g *ContainerResourceGatherer) StartGatheringData() {
func (g *ContainerResourceGatherer) StartGatheringData(ctx context.Context) {
if len(g.workers) == 0 {
return
}
delayPeriod := g.options.ResourceDataGatheringPeriod / time.Duration(len(g.workers))
delay := time.Duration(0)
for i := range g.workers {
go g.workers[i].gather(delay)
go g.workers[i].gather(ctx, delay)
delay += delayPeriod
}
g.workerWg.Wait()
@ -603,8 +607,8 @@ type kubemarkResourceUsage struct {
CPUUsageInCores float64
}
func getMasterUsageByPrefix(prefix string) (string, error) {
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), framework.APIAddress()+":22", framework.TestContext.Provider)
func getMasterUsageByPrefix(ctx context.Context, prefix string) (string, error) {
sshResult, err := e2essh.SSH(ctx, fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), framework.APIAddress()+":22", framework.TestContext.Provider)
if err != nil {
return "", err
}
@ -612,10 +616,10 @@ func getMasterUsageByPrefix(prefix string) (string, error) {
}
// getKubemarkMasterComponentsResourceUsage returns the resource usage of kubemark which contains multiple combinations of cpu and memory usage for each pod name.
func getKubemarkMasterComponentsResourceUsage() map[string]*kubemarkResourceUsage {
func getKubemarkMasterComponentsResourceUsage(ctx context.Context) map[string]*kubemarkResourceUsage {
result := make(map[string]*kubemarkResourceUsage)
// Get kubernetes component resource usage
sshResult, err := getMasterUsageByPrefix("kube")
sshResult, err := getMasterUsageByPrefix(ctx, "kube")
if err != nil {
framework.Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
return nil
@ -633,7 +637,7 @@ func getKubemarkMasterComponentsResourceUsage() map[string]*kubemarkResourceUsag
}
}
// Get etcd resource usage
sshResult, err = getMasterUsageByPrefix("bin/etcd")
sshResult, err = getMasterUsageByPrefix(ctx, "bin/etcd")
if err != nil {
framework.Logf("Error when trying to SSH to master machine. Skipping probe")
return nil

View File

@ -17,23 +17,300 @@ limitations under the License.
package framework
import (
"context"
"errors"
"fmt"
"strings"
"time"
ginkgotypes "github.com/onsi/ginkgo/v2/types"
"github.com/onsi/gomega"
"github.com/onsi/gomega/format"
"github.com/onsi/gomega/types"
)
// MakeMatcher builds a gomega.Matcher based on a single callback function.
// That function is passed the actual value that is to be checked.
// There are three possible outcomes of the check:
// - An error is returned, which then is converted into a failure
// by Gomega.
// - A non-nil failure function is returned, which then is called
// by Gomega once a failure string is needed. This is useful
// to avoid unnecessarily preparing a failure string for intermediate
// failures in Eventually or Consistently.
// - Both function and error are nil, which means that the check
// succeeded.
func MakeMatcher[T interface{}](match func(actual T) (failure func() string, err error)) types.GomegaMatcher {
return &matcher[T]{
match: match,
}
}
type matcher[T interface{}] struct {
match func(actual T) (func() string, error)
failure func() string
}
func (m *matcher[T]) Match(actual interface{}) (success bool, err error) {
if actual, ok := actual.(T); ok {
failure, err := m.match(actual)
if err != nil {
return false, err
}
m.failure = failure
if failure != nil {
return false, nil
}
return true, nil
}
var empty T
return false, gomega.StopTrying(fmt.Sprintf("internal error: expected %T, got:\n%s", empty, format.Object(actual, 1)))
}
func (m *matcher[T]) FailureMessage(actual interface{}) string {
return m.failure()
}
func (m matcher[T]) NegatedFailureMessage(actual interface{}) string {
return m.failure()
}
var _ types.GomegaMatcher = &matcher[string]{}
// Gomega returns an interface that can be used like gomega to express
// assertions. The difference is that failed assertions are returned as an
// error:
//
// if err := Gomega().Expect(pod.Status.Phase).To(gomega.BeEqual(v1.Running)); err != nil {
// return fmt.Errorf("test pod not running: %w", err)
// }
//
// This error can get wrapped to provide additional context for the
// failure. The test then should use ExpectNoError to turn a non-nil error into
// a failure.
//
// When using this approach, there is no need for call offsets and extra
// descriptions for the Expect call because the call stack will be dumped when
// ExpectNoError is called and the additional description(s) can be added by
// wrapping the error.
//
// Asynchronous assertions use the framework's Poll interval and PodStart timeout
// by default.
func Gomega() GomegaInstance {
return gomegaInstance{}
}
type GomegaInstance interface {
Expect(actual interface{}) Assertion
Eventually(ctx context.Context, args ...interface{}) AsyncAssertion
Consistently(ctx context.Context, args ...interface{}) AsyncAssertion
}
type Assertion interface {
Should(matcher types.GomegaMatcher) error
ShouldNot(matcher types.GomegaMatcher) error
To(matcher types.GomegaMatcher) error
ToNot(matcher types.GomegaMatcher) error
NotTo(matcher types.GomegaMatcher) error
}
type AsyncAssertion interface {
Should(matcher types.GomegaMatcher) error
ShouldNot(matcher types.GomegaMatcher) error
WithTimeout(interval time.Duration) AsyncAssertion
WithPolling(interval time.Duration) AsyncAssertion
}
type gomegaInstance struct{}
var _ GomegaInstance = gomegaInstance{}
func (g gomegaInstance) Expect(actual interface{}) Assertion {
return assertion{actual: actual}
}
func (g gomegaInstance) Eventually(ctx context.Context, args ...interface{}) AsyncAssertion {
return newAsyncAssertion(ctx, args, false)
}
func (g gomegaInstance) Consistently(ctx context.Context, args ...interface{}) AsyncAssertion {
return newAsyncAssertion(ctx, args, true)
}
func newG() (*FailureError, gomega.Gomega) {
var failure FailureError
g := gomega.NewGomega(func(msg string, callerSkip ...int) {
failure = FailureError{
msg: msg,
}
})
return &failure, g
}
type assertion struct {
actual interface{}
}
func (a assertion) Should(matcher types.GomegaMatcher) error {
err, g := newG()
if !g.Expect(a.actual).Should(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a assertion) ShouldNot(matcher types.GomegaMatcher) error {
err, g := newG()
if !g.Expect(a.actual).ShouldNot(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a assertion) To(matcher types.GomegaMatcher) error {
err, g := newG()
if !g.Expect(a.actual).To(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a assertion) ToNot(matcher types.GomegaMatcher) error {
err, g := newG()
if !g.Expect(a.actual).ToNot(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a assertion) NotTo(matcher types.GomegaMatcher) error {
err, g := newG()
if !g.Expect(a.actual).NotTo(matcher) {
err.backtrace()
return *err
}
return nil
}
type asyncAssertion struct {
ctx context.Context
args []interface{}
timeout time.Duration
interval time.Duration
consistently bool
}
func newAsyncAssertion(ctx context.Context, args []interface{}, consistently bool) asyncAssertion {
return asyncAssertion{
ctx: ctx,
args: args,
// PodStart is used as default because waiting for a pod is the
// most common operation.
timeout: TestContext.timeouts.PodStart,
interval: TestContext.timeouts.Poll,
}
}
func (a asyncAssertion) newAsync() (*FailureError, gomega.AsyncAssertion) {
err, g := newG()
var assertion gomega.AsyncAssertion
if a.consistently {
assertion = g.Consistently(a.ctx, a.args...)
} else {
assertion = g.Eventually(a.ctx, a.args...)
}
assertion = assertion.WithTimeout(a.timeout).WithPolling(a.interval)
return err, assertion
}
func (a asyncAssertion) Should(matcher types.GomegaMatcher) error {
err, assertion := a.newAsync()
if !assertion.Should(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a asyncAssertion) ShouldNot(matcher types.GomegaMatcher) error {
err, assertion := a.newAsync()
if !assertion.ShouldNot(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a asyncAssertion) WithTimeout(timeout time.Duration) AsyncAssertion {
a.timeout = timeout
return a
}
func (a asyncAssertion) WithPolling(interval time.Duration) AsyncAssertion {
a.interval = interval
return a
}
// FailureError is an error where the error string is meant to be passed to
// ginkgo.Fail directly, i.e. adding some prefix like "unexpected error" is not
// necessary. It is also not necessary to dump the error struct.
type FailureError struct {
msg string
fullStackTrace string
}
func (f FailureError) Error() string {
return f.msg
}
func (f FailureError) Backtrace() string {
return f.fullStackTrace
}
func (f FailureError) Is(target error) bool {
return target == ErrFailure
}
func (f *FailureError) backtrace() {
f.fullStackTrace = ginkgotypes.NewCodeLocationWithStackTrace(2).FullStackTrace
}
// ErrFailure is an empty error that can be wrapped to indicate that an error
// is a FailureError. It can also be used to test for a FailureError:.
//
// return fmt.Errorf("some problem%w", ErrFailure)
// ...
// err := someOperation()
// if errors.Is(err, ErrFailure) {
// ...
// }
var ErrFailure error = FailureError{}
// ExpectEqual expects the specified two are the same, otherwise an exception raises
//
// Deprecated: use gomega.Expect().To(gomega.BeEqual())
func ExpectEqual(actual interface{}, extra interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...)
}
// ExpectNotEqual expects the specified two are not the same, otherwise an exception raises
//
// Deprecated: use gomega.Expect().ToNot(gomega.BeEqual())
func ExpectNotEqual(actual interface{}, extra interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).NotTo(gomega.Equal(extra), explain...)
}
// ExpectError expects an error happens, otherwise an exception raises
//
// Deprecated: use gomega.Expect().To(gomega.HaveOccurred()) or (better!) check
// specifically for the error that is expected with
// gomega.Expect().To(gomega.MatchError(gomega.ContainSubstring()))
func ExpectError(err error, explain ...interface{}) {
gomega.ExpectWithOffset(1, err).To(gomega.HaveOccurred(), explain...)
}
@ -72,21 +349,37 @@ func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
// failures at the same code line might not be matched in
// https://go.k8s.io/triage because the error details are too
// different.
Logf("Unexpected error: %s\n%s", prefix, format.Object(err, 1))
//
// Some errors include all relevant information in the Error
// string. For those we can skip the redundant log message.
// For our own failures we only log the additional stack backtrace
// because it is not included in the failure message.
var failure FailureError
if errors.As(err, &failure) && failure.Backtrace() != "" {
Logf("Failed inside E2E framework:\n %s", strings.ReplaceAll(failure.Backtrace(), "\n", "\n "))
} else if !errors.Is(err, ErrFailure) {
Logf("Unexpected error: %s\n%s", prefix, format.Object(err, 1))
}
Fail(prefix+err.Error(), 1+offset)
}
// ExpectConsistOf expects actual contains precisely the extra elements. The ordering of the elements does not matter.
//
// Deprecated: use gomega.Expect().To(gomega.ConsistOf()) instead
func ExpectConsistOf(actual interface{}, extra interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.ConsistOf(extra), explain...)
}
// ExpectHaveKey expects the actual map has the key in the keyset
//
// Deprecated: use gomega.Expect().To(gomega.HaveKey()) instead
func ExpectHaveKey(actual interface{}, key interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.HaveKey(key), explain...)
}
// ExpectEmpty expects actual is empty
//
// Deprecated: use gomega.Expect().To(gomega.BeEmpty()) instead
func ExpectEmpty(actual interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.BeEmpty(), explain...)
}

View File

@ -27,6 +27,7 @@ import (
"math/rand"
"os"
"path"
"reflect"
"strings"
"time"
@ -38,6 +39,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
v1svc "k8s.io/client-go/applyconfigurations/core/v1"
"k8s.io/client-go/discovery"
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
@ -54,6 +56,7 @@ import (
const (
// DefaultNamespaceDeletionTimeout is timeout duration for waiting for a namespace deletion.
DefaultNamespaceDeletionTimeout = 5 * time.Minute
defaultServiceAccountName = "default"
)
var (
@ -103,6 +106,7 @@ type Framework struct {
ScalesGetter scaleclient.ScalesGetter
SkipNamespaceCreation bool // Whether to skip creating a namespace
SkipSecretCreation bool // Whether to skip creating secret for a test
Namespace *v1.Namespace // Every test has at least one namespace unless creation is skipped
namespacesToDelete []*v1.Namespace // Some tests have more than one.
NamespaceDeletionTimeout time.Duration
@ -128,7 +132,7 @@ type Framework struct {
// DumpAllNamespaceInfoAction is called after each failed test for namespaces
// created for the test.
type DumpAllNamespaceInfoAction func(f *Framework, namespace string)
type DumpAllNamespaceInfoAction func(ctx context.Context, f *Framework, namespace string)
// TestDataSummary is an interface for managing test data.
type TestDataSummary interface {
@ -144,10 +148,19 @@ type Options struct {
GroupVersion *schema.GroupVersion
}
// NewFrameworkWithCustomTimeouts makes a framework with with custom timeouts.
// NewFrameworkWithCustomTimeouts makes a framework with custom timeouts.
// For timeout values that are zero the normal default value continues to
// be used.
func NewFrameworkWithCustomTimeouts(baseName string, timeouts *TimeoutContext) *Framework {
f := NewDefaultFramework(baseName)
f.Timeouts = timeouts
in := reflect.ValueOf(timeouts).Elem()
out := reflect.ValueOf(f.Timeouts).Elem()
for i := 0; i < in.NumField(); i++ {
value := in.Field(i)
if !value.IsZero() {
out.Field(i).Set(value)
}
}
return f
}
@ -169,7 +182,7 @@ func NewFramework(baseName string, options Options, client clientset.Interface)
BaseName: baseName,
Options: options,
ClientSet: client,
Timeouts: NewTimeoutContextWithDefaults(),
Timeouts: NewTimeoutContext(),
}
// The order is important here: if the extension calls ginkgo.BeforeEach
@ -184,7 +197,7 @@ func NewFramework(baseName string, options Options, client clientset.Interface)
}
// BeforeEach gets a client and makes a namespace.
func (f *Framework) BeforeEach() {
func (f *Framework) BeforeEach(ctx context.Context) {
// DeferCleanup, in contrast to AfterEach, triggers execution in
// first-in-last-out order. This ensures that the framework instance
// remains valid as long as possible.
@ -235,7 +248,7 @@ func (f *Framework) BeforeEach() {
if !f.SkipNamespaceCreation {
ginkgo.By(fmt.Sprintf("Building a namespace api object, basename %s", f.BaseName))
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
namespace, err := f.CreateNamespace(ctx, f.BaseName, map[string]string{
"e2e-framework": f.BaseName,
})
ExpectNoError(err)
@ -244,14 +257,15 @@ func (f *Framework) BeforeEach() {
if TestContext.VerifyServiceAccount {
ginkgo.By("Waiting for a default service account to be provisioned in namespace")
err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
err = WaitForDefaultServiceAccountInNamespace(ctx, f.ClientSet, namespace.Name)
ExpectNoError(err)
ginkgo.By("Waiting for kube-root-ca.crt to be provisioned in namespace")
err = WaitForKubeRootCAInNamespace(f.ClientSet, namespace.Name)
err = WaitForKubeRootCAInNamespace(ctx, f.ClientSet, namespace.Name)
ExpectNoError(err)
} else {
Logf("Skipping waiting for service account")
}
f.UniqueName = f.Namespace.GetName()
} else {
// not guaranteed to be unique, but very likely
@ -261,17 +275,20 @@ func (f *Framework) BeforeEach() {
f.flakeReport = NewFlakeReport()
}
func (f *Framework) dumpNamespaceInfo() {
func (f *Framework) dumpNamespaceInfo(ctx context.Context) {
if !ginkgo.CurrentSpecReport().Failed() {
return
}
if !TestContext.DumpLogsOnFailure {
return
}
if f.DumpAllNamespaceInfo == nil {
return
}
ginkgo.By("dump namespace information after failure", func() {
if !f.SkipNamespaceCreation {
for _, ns := range f.namespacesToDelete {
f.DumpAllNamespaceInfo(f, ns.Name)
f.DumpAllNamespaceInfo(ctx, f, ns.Name)
}
}
})
@ -315,7 +332,7 @@ func printSummaries(summaries []TestDataSummary, testBaseName string) {
}
// AfterEach deletes the namespace, after reading its events.
func (f *Framework) AfterEach() {
func (f *Framework) AfterEach(ctx context.Context) {
// This should not happen. Given ClientSet is a public field a test must have updated it!
// Error out early before any API calls during cleanup.
if f.ClientSet == nil {
@ -332,13 +349,13 @@ func (f *Framework) AfterEach() {
if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !ginkgo.CurrentSpecReport().Failed()) {
for _, ns := range f.namespacesToDelete {
ginkgo.By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name))
if err := f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), ns.Name, metav1.DeleteOptions{}); err != nil {
if err := f.ClientSet.CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
nsDeletionErrors[ns.Name] = err
// Dump namespace if we are unable to delete the namespace and the dump was not already performed.
if !ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure && f.DumpAllNamespaceInfo != nil {
f.DumpAllNamespaceInfo(f, ns.Name)
f.DumpAllNamespaceInfo(ctx, f, ns.Name)
}
} else {
Logf("Namespace %v was already deleted", ns.Name)
@ -385,14 +402,14 @@ func (f *Framework) AfterEach() {
// DeleteNamespace can be used to delete a namespace. Additionally it can be used to
// dump namespace information so as it can be used as an alternative of framework
// deleting the namespace towards the end.
func (f *Framework) DeleteNamespace(name string) {
func (f *Framework) DeleteNamespace(ctx context.Context, name string) {
defer func() {
err := f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{})
err := f.ClientSet.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
Logf("error deleting namespace %s: %v", name, err)
return
}
err = WaitForNamespacesDeleted(f.ClientSet, []string{name}, DefaultNamespaceDeletionTimeout)
err = WaitForNamespacesDeleted(ctx, f.ClientSet, []string{name}, DefaultNamespaceDeletionTimeout)
if err != nil {
Logf("error deleting namespace %s: %v", name, err)
return
@ -409,13 +426,13 @@ func (f *Framework) DeleteNamespace(name string) {
}()
// if current test failed then we should dump namespace information
if !f.SkipNamespaceCreation && ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure && f.DumpAllNamespaceInfo != nil {
f.DumpAllNamespaceInfo(f, name)
f.DumpAllNamespaceInfo(ctx, f, name)
}
}
// CreateNamespace creates a namespace for e2e testing.
func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*v1.Namespace, error) {
func (f *Framework) CreateNamespace(ctx context.Context, baseName string, labels map[string]string) (*v1.Namespace, error) {
createTestingNS := TestContext.CreateTestingNS
if createTestingNS == nil {
createTestingNS = CreateTestingNS
@ -437,14 +454,53 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
}
labels[admissionapi.EnforceLevelLabel] = string(enforceLevel)
ns, err := createTestingNS(baseName, f.ClientSet, labels)
ns, err := createTestingNS(ctx, baseName, f.ClientSet, labels)
// check ns instead of err to see if it's nil as we may
// fail to create serviceAccount in it.
f.AddNamespacesToDelete(ns)
if TestContext.E2EDockerConfigFile != "" && !f.SkipSecretCreation {
// With the Secret created, the default service account (in the new namespace)
// is patched with the secret and can then be referenced by all the pods spawned by E2E process, and repository authentication should be successful.
secret, err := f.createSecretFromDockerConfig(ctx, ns.Name)
if err != nil {
return ns, fmt.Errorf("failed to create secret from docker config file: %v", err)
}
serviceAccountClient := f.ClientSet.CoreV1().ServiceAccounts(ns.Name)
serviceAccountConfig := v1svc.ServiceAccount(defaultServiceAccountName, ns.Name)
serviceAccountConfig.ImagePullSecrets = append(serviceAccountConfig.ImagePullSecrets, v1svc.LocalObjectReferenceApplyConfiguration{Name: &secret.Name})
svc, err := serviceAccountClient.Apply(ctx, serviceAccountConfig, metav1.ApplyOptions{FieldManager: "e2e-framework"})
if err != nil {
return ns, fmt.Errorf("failed to patch imagePullSecret [%s] to service account [%s]: %v", secret.Name, svc.Name, err)
}
}
return ns, err
}
// createSecretFromDockerConfig creates a secret using the private image registry credentials.
// The credentials are provided by --e2e-docker-config-file flag.
func (f *Framework) createSecretFromDockerConfig(ctx context.Context, namespace string) (*v1.Secret, error) {
contents, err := os.ReadFile(TestContext.E2EDockerConfigFile)
if err != nil {
return nil, fmt.Errorf("error reading docker config file: %v", err)
}
secretObject := &v1.Secret{
Data: map[string][]byte{v1.DockerConfigJsonKey: contents},
Type: v1.SecretTypeDockerConfigJson,
}
secretObject.GenerateName = "registry-cred"
Logf("create image pull secret %s", secretObject.Name)
secret, err := f.ClientSet.CoreV1().Secrets(namespace).Create(ctx, secretObject, metav1.CreateOptions{})
return secret, err
}
// RecordFlakeIfError records flakeness info if error happens.
// NOTE: This function is not used at any places yet, but we are in progress for https://github.com/kubernetes/kubernetes/issues/66239 which requires this. Please don't remove this.
func (f *Framework) RecordFlakeIfError(err error, optionalDescription ...interface{}) {
@ -526,11 +582,6 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
return nil
}
// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
func ConformanceIt(text string, body interface{}) bool {
return ginkgo.It(text+" [Conformance]", ginkgo.Offset(1), body)
}
// PodStateVerification represents a verification of pod state.
// Any time you have a set of pods that you want to operate against or query,
// this struct can be used to declaratively identify those pods.
@ -596,7 +647,7 @@ func passesPhasesFilter(pod v1.Pod, validPhases []v1.PodPhase) bool {
}
// filterLabels returns a list of pods which have labels.
func filterLabels(selectors map[string]string, cli clientset.Interface, ns string) (*v1.PodList, error) {
func filterLabels(ctx context.Context, selectors map[string]string, cli clientset.Interface, ns string) (*v1.PodList, error) {
var err error
var selector labels.Selector
var pl *v1.PodList
@ -605,9 +656,9 @@ func filterLabels(selectors map[string]string, cli clientset.Interface, ns strin
if len(selectors) > 0 {
selector = labels.SelectorFromSet(labels.Set(selectors))
options := metav1.ListOptions{LabelSelector: selector.String()}
pl, err = cli.CoreV1().Pods(ns).List(context.TODO(), options)
pl, err = cli.CoreV1().Pods(ns).List(ctx, options)
} else {
pl, err = cli.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
pl, err = cli.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
}
return pl, err
}
@ -615,13 +666,13 @@ func filterLabels(selectors map[string]string, cli clientset.Interface, ns strin
// filter filters pods which pass a filter. It can be used to compose
// the more useful abstractions like ForEach, WaitFor, and so on, which
// can be used directly by tests.
func (p *PodStateVerification) filter(c clientset.Interface, namespace *v1.Namespace) ([]v1.Pod, error) {
func (p *PodStateVerification) filter(ctx context.Context, c clientset.Interface, namespace *v1.Namespace) ([]v1.Pod, error) {
if len(p.ValidPhases) == 0 || namespace == nil {
panic(fmt.Errorf("Need to specify a valid pod phases (%v) and namespace (%v). ", p.ValidPhases, namespace))
}
ns := namespace.Name
pl, err := filterLabels(p.Selectors, c, ns) // Build an v1.PodList to operate against.
pl, err := filterLabels(ctx, p.Selectors, c, ns) // Build an v1.PodList to operate against.
Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
if len(pl.Items) == 0 || err != nil {
return pl.Items, err
@ -649,12 +700,12 @@ ReturnPodsSoFar:
// WaitFor waits for some minimum number of pods to be verified, according to the PodStateVerification
// definition.
func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1.Pod, error) {
func (cl *ClusterVerification) WaitFor(ctx context.Context, atLeast int, timeout time.Duration) ([]v1.Pod, error) {
pods := []v1.Pod{}
var returnedErr error
err := wait.Poll(1*time.Second, timeout, func() (bool, error) {
pods, returnedErr = cl.podState.filter(cl.client, cl.namespace)
err := wait.PollWithContext(ctx, 1*time.Second, timeout, func(ctx context.Context) (bool, error) {
pods, returnedErr = cl.podState.filter(ctx, cl.client, cl.namespace)
// Failure
if returnedErr != nil {
@ -677,8 +728,8 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
}
// WaitForOrFail provides a shorthand WaitFor with failure as an option if anything goes wrong.
func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration) {
pods, err := cl.WaitFor(atLeast, timeout)
func (cl *ClusterVerification) WaitForOrFail(ctx context.Context, atLeast int, timeout time.Duration) {
pods, err := cl.WaitFor(ctx, atLeast, timeout)
if err != nil || len(pods) < atLeast {
Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
}
@ -689,8 +740,8 @@ func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration)
//
// For example, if you require at least 5 pods to be running before your test will pass,
// its smart to first call "clusterVerification.WaitFor(5)" before you call clusterVerification.ForEach.
func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error {
pods, err := cl.podState.filter(cl.client, cl.namespace)
func (cl *ClusterVerification) ForEach(ctx context.Context, podFunc func(v1.Pod)) error {
pods, err := cl.podState.filter(ctx, cl.client, cl.namespace)
if err == nil {
if len(pods) == 0 {
Failf("No pods matched the filter.")

145
vendor/k8s.io/kubernetes/test/e2e/framework/get.go generated vendored Normal file
View File

@ -0,0 +1,145 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"errors"
"fmt"
"time"
"github.com/onsi/gomega"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// GetFunc is a function which retrieves a certain object.
type GetFunc[T any] func(ctx context.Context) (T, error)
// APIGetFunc is a get functions as used in client-go.
type APIGetFunc[T any] func(ctx context.Context, name string, getOptions metav1.GetOptions) (T, error)
// APIListFunc is a list functions as used in client-go.
type APIListFunc[T any] func(ctx context.Context, listOptions metav1.ListOptions) (T, error)
// GetObject takes a get function like clientset.CoreV1().Pods(ns).Get
// and the parameters for it and returns a function that executes that get
// operation in a [gomega.Eventually] or [gomega.Consistently].
//
// Delays and retries are handled by [HandleRetry]. A "not found" error is
// a fatal error that causes polling to stop immediately. If that is not
// desired, then wrap the result with [IgnoreNotFound].
func GetObject[T any](get APIGetFunc[T], name string, getOptions metav1.GetOptions) GetFunc[T] {
return HandleRetry(func(ctx context.Context) (T, error) {
return get(ctx, name, getOptions)
})
}
// ListObjects takes a list function like clientset.CoreV1().Pods(ns).List
// and the parameters for it and returns a function that executes that list
// operation in a [gomega.Eventually] or [gomega.Consistently].
//
// Delays and retries are handled by [HandleRetry].
func ListObjects[T any](list APIListFunc[T], listOptions metav1.ListOptions) GetFunc[T] {
return HandleRetry(func(ctx context.Context) (T, error) {
return list(ctx, listOptions)
})
}
// HandleRetry wraps an arbitrary get function. When the wrapped function
// returns an error, HandleGetError will decide whether the call should be
// retried and if requested, will sleep before doing so.
//
// This is meant to be used inside [gomega.Eventually] or [gomega.Consistently].
func HandleRetry[T any](get GetFunc[T]) GetFunc[T] {
return func(ctx context.Context) (T, error) {
t, err := get(ctx)
if err != nil {
if retry, delay := ShouldRetry(err); retry {
if delay > 0 {
// We could return
// gomega.TryAgainAfter(delay) here,
// but then we need to funnel that
// error through any other
// wrappers. Waiting directly is simpler.
ctx, cancel := context.WithTimeout(ctx, delay)
defer cancel()
<-ctx.Done()
}
return t, err
}
// Give up polling immediately.
var null T
return t, gomega.StopTrying(fmt.Sprintf("Unexpected final error while getting %T", null)).Wrap(err)
}
return t, nil
}
}
// ShouldRetry decides whether to retry an API request. Optionally returns a
// delay to retry after.
func ShouldRetry(err error) (retry bool, retryAfter time.Duration) {
// if the error sends the Retry-After header, we respect it as an explicit confirmation we should retry.
if delay, shouldRetry := apierrors.SuggestsClientDelay(err); shouldRetry {
return shouldRetry, time.Duration(delay) * time.Second
}
// these errors indicate a transient error that should be retried.
if apierrors.IsTimeout(err) || apierrors.IsTooManyRequests(err) || errors.As(err, &transientError{}) {
return true, 0
}
return false, 0
}
// RetryNotFound wraps an arbitrary get function. When the wrapped function
// encounters a "not found" error, that error is treated as a transient problem
// and polling continues.
//
// This is meant to be used inside [gomega.Eventually] or [gomega.Consistently].
func RetryNotFound[T any](get GetFunc[T]) GetFunc[T] {
return func(ctx context.Context) (T, error) {
t, err := get(ctx)
if apierrors.IsNotFound(err) {
// If we are wrapping HandleRetry, then the error will
// be gomega.StopTrying. We need to get rid of that,
// otherwise gomega.Eventually will stop.
var stopTryingErr gomega.PollingSignalError
if errors.As(err, &stopTryingErr) {
if wrappedErr := errors.Unwrap(stopTryingErr); wrappedErr != nil {
err = wrappedErr
}
}
// Mark the error as transient in case that we get
// wrapped by HandleRetry.
err = transientError{error: err}
}
return t, err
}
}
// transientError wraps some other error and indicates that the
// wrapper error is something that may go away.
type transientError struct {
error
}
func (err transientError) Unwrap() error {
return err.error
}

View File

@ -18,16 +18,55 @@ package framework
import (
"path"
"reflect"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/types"
apierrors "k8s.io/apimachinery/pkg/api/errors"
)
var errInterface = reflect.TypeOf((*error)(nil)).Elem()
// IgnoreNotFound can be used to wrap an arbitrary function in a call to
// [ginkgo.DeferCleanup]. When the wrapped function returns an error that
// `apierrors.IsNotFound` considers as "not found", the error is ignored
// instead of failing the test during cleanup. This is useful for cleanup code
// that just needs to ensure that some object does not exist anymore.
func IgnoreNotFound(in any) any {
inType := reflect.TypeOf(in)
inValue := reflect.ValueOf(in)
return reflect.MakeFunc(inType, func(args []reflect.Value) []reflect.Value {
out := inValue.Call(args)
if len(out) > 0 {
lastValue := out[len(out)-1]
last := lastValue.Interface()
if last != nil && lastValue.Type().Implements(errInterface) && apierrors.IsNotFound(last.(error)) {
out[len(out)-1] = reflect.Zero(errInterface)
}
}
return out
}).Interface()
}
// AnnotatedLocation can be used to provide more informative source code
// locations by passing the result as additional parameter to a
// BeforeEach/AfterEach/DeferCleanup/It/etc.
func AnnotatedLocation(annotation string) types.CodeLocation {
codeLocation := types.NewCodeLocation(1)
return AnnotatedLocationWithOffset(annotation, 1)
}
// AnnotatedLocationWithOffset skips additional call stack levels. With 0 as offset
// it is identical to [AnnotatedLocation].
func AnnotatedLocationWithOffset(annotation string, offset int) types.CodeLocation {
codeLocation := types.NewCodeLocation(offset + 1)
codeLocation.FileName = path.Base(codeLocation.FileName)
codeLocation = types.NewCustomCodeLocation(annotation + " | " + codeLocation.String())
return codeLocation
}
// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
func ConformanceIt(text string, args ...interface{}) bool {
args = append(args, ginkgo.Offset(1))
return ginkgo.It(text+" [Conformance]", args...)
}

View File

@ -0,0 +1,42 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package junit
import (
"github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types"
)
// WriteJUnitReport generates a JUnit file that is shorter than the one
// normally written by `ginkgo --junit-report`. This is needed because the full
// report can become too large for tools like Spyglass
// (https://github.com/kubernetes/kubernetes/issues/111510).
func WriteJUnitReport(report ginkgo.Report, filename string) error {
config := reporters.JunitReportConfig{
// Remove details for specs where we don't care.
OmitTimelinesForSpecState: types.SpecStatePassed | types.SpecStateSkipped,
// Don't write <failure message="summary">. The same text is
// also in the full text for the failure. If we were to write
// both, then tools like kettle and spyglass would concatenate
// the two strings and thus show duplicated information.
OmitFailureMessageAttr: true,
}
return reporters.GenerateJUnitReportWithConfig(report, filename, config)
}

View File

@ -0,0 +1,9 @@
# This E2E framework sub-package is currently allowed to use arbitrary
# dependencies, therefore we need to override the restrictions from
# the parent .import-restrictions file.
#
# At some point it may become useful to also check this package's
# dependencies more careful.
rules:
- selectorRegexp: ""
allowedPrefixes: [ "" ]

View File

@ -22,6 +22,7 @@ import (
"io"
"net"
"net/url"
"os"
"os/exec"
"strings"
"syscall"
@ -48,9 +49,12 @@ func NewKubectlCommand(namespace string, args ...string) *KubectlBuilder {
return b
}
// WithEnv sets the given environment and returns itself.
func (b *KubectlBuilder) WithEnv(env []string) *KubectlBuilder {
b.cmd.Env = env
// WithEnv appends the given environment and returns itself.
func (b *KubectlBuilder) AppendEnv(env []string) *KubectlBuilder {
if b.cmd.Env == nil {
b.cmd.Env = os.Environ()
}
b.cmd.Env = append(b.cmd.Env, env...)
return b
}

View File

@ -100,8 +100,8 @@ func (tk *TestKubeconfig) KubectlCmd(args ...string) *exec.Cmd {
}
// LogFailedContainers runs `kubectl logs` on a failed containers.
func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
func LogFailedContainers(ctx context.Context, c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
if err != nil {
logFunc("Error getting pods in namespace '%s': %v", ns, err)
return
@ -109,18 +109,18 @@ func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm stri
logFunc("Running kubectl logs on non-ready containers in %v", ns)
for _, pod := range podList.Items {
if res, err := testutils.PodRunningReady(&pod); !res || err != nil {
kubectlLogPod(c, pod, "", framework.Logf)
kubectlLogPod(ctx, c, pod, "", framework.Logf)
}
}
}
func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
func kubectlLogPod(ctx context.Context, c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
for _, container := range pod.Spec.Containers {
if strings.Contains(container.Name, containerNameSubstr) {
// Contains() matches all strings if substr is empty
logs, err := e2epod.GetPodLogs(c, pod.Namespace, pod.Name, container.Name)
logs, err := e2epod.GetPodLogs(ctx, c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logs, err = e2epod.GetPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name)
logs, err = e2epod.GetPreviousPodLogs(ctx, c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logFunc("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err)
}

View File

@ -17,14 +17,10 @@ limitations under the License.
package framework
import (
"bytes"
"fmt"
"regexp"
"runtime/debug"
"time"
"github.com/onsi/ginkgo/v2"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
)
func nowStamp() string {
@ -45,61 +41,9 @@ func Logf(format string, args ...interface{}) {
func Failf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
skip := 1
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
ginkgo.Fail(nowStamp()+": "+msg, skip)
ginkgo.Fail(msg, skip)
panic("unreachable")
}
// Fail is a replacement for ginkgo.Fail which logs the problem as it occurs
// together with a stack trace and then calls ginkgowrapper.Fail.
func Fail(msg string, callerSkip ...int) {
skip := 1
if len(callerSkip) > 0 {
skip += callerSkip[0]
}
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
ginkgo.Fail(nowStamp()+": "+msg, skip)
}
var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/v2/`)
// PrunedStack is a wrapper around debug.Stack() that removes information
// about the current goroutine and optionally skips some of the initial stack entries.
// With skip == 0, the returned stack will start with the caller of PruneStack.
// From the remaining entries it automatically filters out useless ones like
// entries coming from Ginkgo.
//
// This is a modified copy of PruneStack in https://github.com/onsi/ginkgo/v2/blob/f90f37d87fa6b1dd9625e2b1e83c23ffae3de228/internal/codelocation/code_location.go#L25:
// - simplified API and thus renamed (calls debug.Stack() instead of taking a parameter)
// - source code filtering updated to be specific to Kubernetes
// - optimized to use bytes and in-place slice filtering from
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
func PrunedStack(skip int) []byte {
fullStackTrace := debug.Stack()
stack := bytes.Split(fullStackTrace, []byte("\n"))
// Ensure that the even entries are the method names and
// the odd entries the source code information.
if len(stack) > 0 && bytes.HasPrefix(stack[0], []byte("goroutine ")) {
// Ignore "goroutine 29 [running]:" line.
stack = stack[1:]
}
// The "+2" is for skipping over:
// - runtime/debug.Stack()
// - PrunedStack()
skip += 2
if len(stack) > 2*skip {
stack = stack[2*skip:]
}
n := 0
for i := 0; i < len(stack)/2; i++ {
// We filter out based on the source code file name.
if !codeFilterRE.Match([]byte(stack[i*2+1])) {
stack[n] = stack[i*2]
stack[n+1] = stack[i*2+1]
n += 2
}
}
stack = stack[:n]
return bytes.Join(stack, []byte("\n"))
}
// Fail is an alias for ginkgo.Fail.
var Fail = ginkgo.Fail

View File

@ -0,0 +1,9 @@
# This E2E framework sub-package is currently allowed to use arbitrary
# dependencies, therefore we need to override the restrictions from
# the parent .import-restrictions file.
#
# At some point it may become useful to also check this package's
# dependencies more careful.
rules:
- selectorRegexp: ""
allowedPrefixes: [ "" ]

View File

@ -0,0 +1,14 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sig-instrumentation-approvers
emeritus_approvers:
- fabxc
- piosz
- fgrzadkowski
- kawych
- x13n
reviewers:
- sig-instrumentation-reviewers
labels:
- sig/instrumentation

View File

@ -17,8 +17,6 @@ limitations under the License.
package metrics
import (
"context"
"k8s.io/component-base/metrics/testutil"
)
@ -42,11 +40,3 @@ func parseAPIServerMetrics(data string) (APIServerMetrics, error) {
}
return result, nil
}
func (g *Grabber) getMetricsFromAPIServer() (string, error) {
rawOutput, err := g.client.CoreV1().RESTClient().Get().RequestURI("/metrics").Do(context.TODO()).Raw()
if err != nil {
return "", err
}
return string(rawOutput), nil
}

View File

@ -17,24 +17,26 @@ limitations under the License.
package metrics
import (
"context"
"github.com/onsi/ginkgo/v2"
"k8s.io/kubernetes/test/e2e/framework"
)
func GrabBeforeEach(f *framework.Framework) (result *Collection) {
func GrabBeforeEach(ctx context.Context, f *framework.Framework) (result *Collection) {
gatherMetricsAfterTest := framework.TestContext.GatherMetricsAfterTest == "true" || framework.TestContext.GatherMetricsAfterTest == "master"
if !gatherMetricsAfterTest || !framework.TestContext.IncludeClusterAutoscalerMetrics {
return nil
}
ginkgo.By("Gathering metrics before test", func() {
grabber, err := NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), !framework.ProviderIs("kubemark"), false, false, false, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
grabber, err := NewMetricsGrabber(ctx, f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), !framework.ProviderIs("kubemark"), false, false, false, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
if err != nil {
framework.Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
return
}
metrics, err := grabber.Grab()
metrics, err := grabber.Grab(ctx)
if err != nil {
framework.Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
return
@ -46,7 +48,7 @@ func GrabBeforeEach(f *framework.Framework) (result *Collection) {
return
}
func GrabAfterEach(f *framework.Framework, before *Collection) {
func GrabAfterEach(ctx context.Context, f *framework.Framework, before *Collection) {
if framework.TestContext.GatherMetricsAfterTest == "false" {
return
}
@ -54,12 +56,12 @@ func GrabAfterEach(f *framework.Framework, before *Collection) {
ginkgo.By("Gathering metrics after test", func() {
// Grab apiserver, scheduler, controller-manager metrics and (optionally) nodes' kubelet metrics.
grabMetricsFromKubelets := framework.TestContext.GatherMetricsAfterTest != "master" && !framework.ProviderIs("kubemark")
grabber, err := NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), grabMetricsFromKubelets, true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
grabber, err := NewMetricsGrabber(ctx, f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), grabMetricsFromKubelets, true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
if err != nil {
framework.Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
return
}
received, err := grabber.Grab()
received, err := grabber.Grab(ctx)
if err != nil {
framework.Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
return

View File

@ -68,9 +68,12 @@ func NewKubeletMetrics() KubeletMetrics {
}
// GrabKubeletMetricsWithoutProxy retrieve metrics from the kubelet on the given node using a simple GET over http.
// Currently only used in integration tests.
func GrabKubeletMetricsWithoutProxy(nodeName, path string) (KubeletMetrics, error) {
resp, err := http.Get(fmt.Sprintf("http://%s%s", nodeName, path))
func GrabKubeletMetricsWithoutProxy(ctx context.Context, nodeName, path string) (KubeletMetrics, error) {
req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("http://%s%s", nodeName, path), nil)
if err != nil {
return KubeletMetrics{}, err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return KubeletMetrics{}, err
}
@ -90,31 +93,6 @@ func parseKubeletMetrics(data string) (KubeletMetrics, error) {
return result, nil
}
func (g *Grabber) getMetricsFromNode(nodeName string, kubeletPort int) (string, error) {
// There's a problem with timing out during proxy. Wrapping this in a goroutine to prevent deadlock.
finished := make(chan struct{}, 1)
var err error
var rawOutput []byte
go func() {
rawOutput, err = g.client.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, kubeletPort)).
Suffix("metrics").
Do(context.TODO()).Raw()
finished <- struct{}{}
}()
select {
case <-time.After(proxyTimeout):
return "", fmt.Errorf("Timed out when waiting for proxy to gather metrics from %v", nodeName)
case <-finished:
if err != nil {
return "", err
}
return string(rawOutput), nil
}
}
// KubeletLatencyMetric stores metrics scraped from the kubelet server's /metric endpoint.
// TODO: Get some more structure around the metrics and this type
type KubeletLatencyMetric struct {
@ -137,21 +115,21 @@ func (a KubeletLatencyMetrics) Less(i, j int) bool { return a[i].Latency > a[j].
// If a apiserver client is passed in, the function will try to get kubelet metrics from metrics grabber;
// or else, the function will try to get kubelet metrics directly from the node.
func getKubeletMetricsFromNode(c clientset.Interface, nodeName string) (KubeletMetrics, error) {
func getKubeletMetricsFromNode(ctx context.Context, c clientset.Interface, nodeName string) (KubeletMetrics, error) {
if c == nil {
return GrabKubeletMetricsWithoutProxy(nodeName, "/metrics")
return GrabKubeletMetricsWithoutProxy(ctx, nodeName, "/metrics")
}
grabber, err := NewMetricsGrabber(c, nil, nil, true, false, false, false, false, false)
grabber, err := NewMetricsGrabber(ctx, c, nil, nil, true, false, false, false, false, false)
if err != nil {
return KubeletMetrics{}, err
}
return grabber.GrabFromKubelet(nodeName)
return grabber.GrabFromKubelet(ctx, nodeName)
}
// GetKubeletMetrics gets all metrics in kubelet subsystem from specified node and trims
// the subsystem prefix.
func GetKubeletMetrics(c clientset.Interface, nodeName string) (KubeletMetrics, error) {
ms, err := getKubeletMetricsFromNode(c, nodeName)
func GetKubeletMetrics(ctx context.Context, c clientset.Interface, nodeName string) (KubeletMetrics, error) {
ms, err := getKubeletMetricsFromNode(ctx, c, nodeName)
if err != nil {
return KubeletMetrics{}, err
}
@ -217,8 +195,8 @@ func GetKubeletLatencyMetrics(ms KubeletMetrics, filterMetricNames sets.String)
}
// HighLatencyKubeletOperations logs and counts the high latency metrics exported by the kubelet server via /metrics.
func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration, nodeName string, logFunc func(fmt string, args ...interface{})) (KubeletLatencyMetrics, error) {
ms, err := GetKubeletMetrics(c, nodeName)
func HighLatencyKubeletOperations(ctx context.Context, c clientset.Interface, threshold time.Duration, nodeName string, logFunc func(fmt string, args ...interface{})) (KubeletLatencyMetrics, error) {
ms, err := GetKubeletMetrics(ctx, c, nodeName)
if err != nil {
return KubeletLatencyMetrics{}, err
}

View File

@ -54,6 +54,7 @@ var MetricsGrabbingDisabledError = errors.New("metrics grabbing disabled")
// Collection is metrics collection of components
type Collection struct {
APIServerMetrics APIServerMetrics
APIServerMetricsSLIs APIServerMetrics
ControllerManagerMetrics ControllerManagerMetrics
SnapshotControllerMetrics SnapshotControllerMetrics
KubeletMetrics map[string]KubeletMetrics
@ -88,7 +89,7 @@ type Grabber struct {
// Collecting metrics data is an optional debug feature. Not all clusters will
// support it. If disabled for a component, the corresponding Grab function
// will immediately return an error derived from MetricsGrabbingDisabledError.
func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, config *rest.Config, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool, snapshotController bool) (*Grabber, error) {
func NewMetricsGrabber(ctx context.Context, c clientset.Interface, ec clientset.Interface, config *rest.Config, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool, snapshotController bool) (*Grabber, error) {
kubeScheduler := ""
kubeControllerManager := ""
@ -102,7 +103,7 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, config *re
return nil, errors.New("a rest config is required for grabbing kube-controller and kube-controller-manager metrics")
}
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), metav1.ListOptions{})
podList, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}
@ -132,18 +133,18 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, config *re
externalClient: ec,
config: config,
grabFromAPIServer: apiServer,
grabFromControllerManager: checkPodDebugHandlers(c, controllers, "kube-controller-manager", kubeControllerManager),
grabFromControllerManager: checkPodDebugHandlers(ctx, c, controllers, "kube-controller-manager", kubeControllerManager),
grabFromKubelets: kubelets,
grabFromScheduler: checkPodDebugHandlers(c, scheduler, "kube-scheduler", kubeScheduler),
grabFromScheduler: checkPodDebugHandlers(ctx, c, scheduler, "kube-scheduler", kubeScheduler),
grabFromClusterAutoscaler: clusterAutoscaler,
grabFromSnapshotController: checkPodDebugHandlers(c, snapshotController, "snapshot-controller", snapshotControllerManager),
grabFromSnapshotController: checkPodDebugHandlers(ctx, c, snapshotController, "snapshot-controller", snapshotControllerManager),
kubeScheduler: kubeScheduler,
kubeControllerManager: kubeControllerManager,
snapshotController: snapshotControllerManager,
}, nil
}
func checkPodDebugHandlers(c clientset.Interface, requested bool, component, podName string) bool {
func checkPodDebugHandlers(ctx context.Context, c clientset.Interface, requested bool, component, podName string) bool {
if !requested {
return false
}
@ -155,7 +156,7 @@ func checkPodDebugHandlers(c clientset.Interface, requested bool, component, pod
// The debug handlers on the host where the pod runs might be disabled.
// We can check that indirectly by trying to retrieve log output.
limit := int64(1)
if _, err := c.CoreV1().Pods(metav1.NamespaceSystem).GetLogs(podName, &v1.PodLogOptions{LimitBytes: &limit}).DoRaw(context.TODO()); err != nil {
if _, err := c.CoreV1().Pods(metav1.NamespaceSystem).GetLogs(podName, &v1.PodLogOptions{LimitBytes: &limit}).DoRaw(ctx); err != nil {
klog.Warningf("Can't retrieve log output of %s (%q). Debug handlers might be disabled in kubelet. Grabbing metrics from %s is disabled.",
podName, err, component)
return false
@ -171,8 +172,8 @@ func (g *Grabber) HasControlPlanePods() bool {
}
// GrabFromKubelet returns metrics from kubelet
func (g *Grabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error) {
nodes, err := g.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": nodeName}.AsSelector().String()})
func (g *Grabber) GrabFromKubelet(ctx context.Context, nodeName string) (KubeletMetrics, error) {
nodes, err := g.client.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": nodeName}.AsSelector().String()})
if err != nil {
return KubeletMetrics{}, err
}
@ -180,22 +181,47 @@ func (g *Grabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error) {
return KubeletMetrics{}, fmt.Errorf("Error listing nodes with name %v, got %v", nodeName, nodes.Items)
}
kubeletPort := nodes.Items[0].Status.DaemonEndpoints.KubeletEndpoint.Port
return g.grabFromKubeletInternal(nodeName, int(kubeletPort))
return g.grabFromKubeletInternal(ctx, nodeName, int(kubeletPort))
}
func (g *Grabber) grabFromKubeletInternal(nodeName string, kubeletPort int) (KubeletMetrics, error) {
func (g *Grabber) grabFromKubeletInternal(ctx context.Context, nodeName string, kubeletPort int) (KubeletMetrics, error) {
if kubeletPort <= 0 || kubeletPort > 65535 {
return KubeletMetrics{}, fmt.Errorf("Invalid Kubelet port %v. Skipping Kubelet's metrics gathering", kubeletPort)
}
output, err := g.getMetricsFromNode(nodeName, int(kubeletPort))
output, err := g.getMetricsFromNode(ctx, nodeName, int(kubeletPort))
if err != nil {
return KubeletMetrics{}, err
}
return parseKubeletMetrics(output)
}
func (g *Grabber) getMetricsFromNode(ctx context.Context, nodeName string, kubeletPort int) (string, error) {
// There's a problem with timing out during proxy. Wrapping this in a goroutine to prevent deadlock.
finished := make(chan struct{}, 1)
var err error
var rawOutput []byte
go func() {
rawOutput, err = g.client.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, kubeletPort)).
Suffix("metrics").
Do(ctx).Raw()
finished <- struct{}{}
}()
select {
case <-time.After(proxyTimeout):
return "", fmt.Errorf("Timed out when waiting for proxy to gather metrics from %v", nodeName)
case <-finished:
if err != nil {
return "", err
}
return string(rawOutput), nil
}
}
// GrabFromScheduler returns metrics from scheduler
func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) {
func (g *Grabber) GrabFromScheduler(ctx context.Context) (SchedulerMetrics, error) {
if !g.grabFromScheduler {
return SchedulerMetrics{}, fmt.Errorf("kube-scheduler: %w", MetricsGrabbingDisabledError)
}
@ -203,7 +229,7 @@ func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) {
var err error
g.waitForSchedulerReadyOnce.Do(func() {
if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(g.client, g.kubeScheduler, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil {
if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, g.client, g.kubeScheduler, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil {
err = fmt.Errorf("error waiting for kube-scheduler pod to be ready: %w", readyErr)
}
})
@ -213,8 +239,8 @@ func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) {
var lastMetricsFetchErr error
var output string
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(g.kubeScheduler, metav1.NamespaceSystem, kubeSchedulerPort)
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeScheduler, metav1.NamespaceSystem, kubeSchedulerPort)
return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil {
err := fmt.Errorf("error waiting for kube-scheduler pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
@ -225,7 +251,7 @@ func (g *Grabber) GrabFromScheduler() (SchedulerMetrics, error) {
}
// GrabFromClusterAutoscaler returns metrics from cluster autoscaler
func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error) {
func (g *Grabber) GrabFromClusterAutoscaler(ctx context.Context) (ClusterAutoscalerMetrics, error) {
if !g.HasControlPlanePods() && g.externalClient == nil {
return ClusterAutoscalerMetrics{}, fmt.Errorf("ClusterAutoscaler: %w", MetricsGrabbingDisabledError)
}
@ -238,7 +264,7 @@ func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error)
client = g.client
namespace = metav1.NamespaceSystem
}
output, err := g.getMetricsFromPod(client, "cluster-autoscaler", namespace, 8085)
output, err := g.getMetricsFromPod(ctx, client, "cluster-autoscaler", namespace, 8085)
if err != nil {
return ClusterAutoscalerMetrics{}, err
}
@ -246,7 +272,7 @@ func (g *Grabber) GrabFromClusterAutoscaler() (ClusterAutoscalerMetrics, error)
}
// GrabFromControllerManager returns metrics from controller manager
func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error) {
func (g *Grabber) GrabFromControllerManager(ctx context.Context) (ControllerManagerMetrics, error) {
if !g.grabFromControllerManager {
return ControllerManagerMetrics{}, fmt.Errorf("kube-controller-manager: %w", MetricsGrabbingDisabledError)
}
@ -254,7 +280,7 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error)
var err error
g.waitForControllerManagerReadyOnce.Do(func() {
if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(g.client, g.kubeControllerManager, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil {
if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, g.client, g.kubeControllerManager, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil {
err = fmt.Errorf("error waiting for kube-controller-manager pod to be ready: %w", readyErr)
}
})
@ -264,8 +290,8 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error)
var output string
var lastMetricsFetchErr error
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(g.kubeControllerManager, metav1.NamespaceSystem, kubeControllerManagerPort)
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getSecureMetricsFromPod(ctx, g.kubeControllerManager, metav1.NamespaceSystem, kubeControllerManagerPort)
return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil {
err := fmt.Errorf("error waiting for kube-controller-manager to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
@ -276,7 +302,7 @@ func (g *Grabber) GrabFromControllerManager() (ControllerManagerMetrics, error)
}
// GrabFromSnapshotController returns metrics from controller manager
func (g *Grabber) GrabFromSnapshotController(podName string, port int) (SnapshotControllerMetrics, error) {
func (g *Grabber) GrabFromSnapshotController(ctx context.Context, podName string, port int) (SnapshotControllerMetrics, error) {
if !g.grabFromSnapshotController {
return SnapshotControllerMetrics{}, fmt.Errorf("volume-snapshot-controller: %w", MetricsGrabbingDisabledError)
}
@ -293,7 +319,7 @@ func (g *Grabber) GrabFromSnapshotController(podName string, port int) (Snapshot
var err error
g.waitForSnapshotControllerReadyOnce.Do(func() {
if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(g.client, podName, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil {
if readyErr := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, g.client, podName, metav1.NamespaceSystem, 5*time.Minute); readyErr != nil {
err = fmt.Errorf("error waiting for volume-snapshot-controller pod to be ready: %w", readyErr)
}
})
@ -303,8 +329,8 @@ func (g *Grabber) GrabFromSnapshotController(podName string, port int) (Snapshot
var output string
var lastMetricsFetchErr error
if metricsWaitErr := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
output, lastMetricsFetchErr = g.getMetricsFromPod(g.client, podName, metav1.NamespaceSystem, port)
if metricsWaitErr := wait.PollImmediateWithContext(ctx, time.Second, time.Minute, func(ctx context.Context) (bool, error) {
output, lastMetricsFetchErr = g.getMetricsFromPod(ctx, g.client, podName, metav1.NamespaceSystem, port)
return lastMetricsFetchErr == nil, nil
}); metricsWaitErr != nil {
err = fmt.Errorf("error waiting for volume-snapshot-controller pod to expose metrics: %v; %v", metricsWaitErr, lastMetricsFetchErr)
@ -315,28 +341,59 @@ func (g *Grabber) GrabFromSnapshotController(podName string, port int) (Snapshot
}
// GrabFromAPIServer returns metrics from API server
func (g *Grabber) GrabFromAPIServer() (APIServerMetrics, error) {
output, err := g.getMetricsFromAPIServer()
func (g *Grabber) GrabFromAPIServer(ctx context.Context) (APIServerMetrics, error) {
output, err := g.getMetricsFromAPIServer(ctx)
if err != nil {
return APIServerMetrics{}, err
}
return parseAPIServerMetrics(output)
}
// GrabMetricsSLIsFromAPIServer returns metrics from API server
func (g *Grabber) GrabMetricsSLIsFromAPIServer(ctx context.Context) (APIServerMetrics, error) {
output, err := g.getMetricsSLIsFromAPIServer(ctx)
if err != nil {
return APIServerMetrics{}, err
}
return parseAPIServerMetrics(output)
}
func (g *Grabber) getMetricsFromAPIServer(ctx context.Context) (string, error) {
rawOutput, err := g.client.CoreV1().RESTClient().Get().RequestURI("/metrics").Do(ctx).Raw()
if err != nil {
return "", err
}
return string(rawOutput), nil
}
func (g *Grabber) getMetricsSLIsFromAPIServer(ctx context.Context) (string, error) {
rawOutput, err := g.client.CoreV1().RESTClient().Get().RequestURI("/metrics/slis").Do(ctx).Raw()
if err != nil {
return "", err
}
return string(rawOutput), nil
}
// Grab returns metrics from corresponding component
func (g *Grabber) Grab() (Collection, error) {
func (g *Grabber) Grab(ctx context.Context) (Collection, error) {
result := Collection{}
var errs []error
if g.grabFromAPIServer {
metrics, err := g.GrabFromAPIServer()
metrics, err := g.GrabFromAPIServer(ctx)
if err != nil {
errs = append(errs, err)
} else {
result.APIServerMetrics = metrics
}
metrics, err = g.GrabMetricsSLIsFromAPIServer(ctx)
if err != nil {
errs = append(errs, err)
} else {
result.APIServerMetricsSLIs = metrics
}
}
if g.grabFromScheduler {
metrics, err := g.GrabFromScheduler()
metrics, err := g.GrabFromScheduler(ctx)
if err != nil {
errs = append(errs, err)
} else {
@ -344,7 +401,7 @@ func (g *Grabber) Grab() (Collection, error) {
}
}
if g.grabFromControllerManager {
metrics, err := g.GrabFromControllerManager()
metrics, err := g.GrabFromControllerManager(ctx)
if err != nil {
errs = append(errs, err)
} else {
@ -352,7 +409,7 @@ func (g *Grabber) Grab() (Collection, error) {
}
}
if g.grabFromSnapshotController {
metrics, err := g.GrabFromSnapshotController(g.snapshotController, snapshotControllerPort)
metrics, err := g.GrabFromSnapshotController(ctx, g.snapshotController, snapshotControllerPort)
if err != nil {
errs = append(errs, err)
} else {
@ -360,7 +417,7 @@ func (g *Grabber) Grab() (Collection, error) {
}
}
if g.grabFromClusterAutoscaler {
metrics, err := g.GrabFromClusterAutoscaler()
metrics, err := g.GrabFromClusterAutoscaler(ctx)
if err != nil {
errs = append(errs, err)
} else {
@ -369,13 +426,13 @@ func (g *Grabber) Grab() (Collection, error) {
}
if g.grabFromKubelets {
result.KubeletMetrics = make(map[string]KubeletMetrics)
nodes, err := g.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
nodes, err := g.client.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
errs = append(errs, err)
} else {
for _, node := range nodes.Items {
kubeletPort := node.Status.DaemonEndpoints.KubeletEndpoint.Port
metrics, err := g.grabFromKubeletInternal(node.Name, int(kubeletPort))
metrics, err := g.grabFromKubeletInternal(ctx, node.Name, int(kubeletPort))
if err != nil {
errs = append(errs, err)
}
@ -390,14 +447,14 @@ func (g *Grabber) Grab() (Collection, error) {
}
// getMetricsFromPod retrieves metrics data from an insecure port.
func (g *Grabber) getMetricsFromPod(client clientset.Interface, podName string, namespace string, port int) (string, error) {
func (g *Grabber) getMetricsFromPod(ctx context.Context, client clientset.Interface, podName string, namespace string, port int) (string, error) {
rawOutput, err := client.CoreV1().RESTClient().Get().
Namespace(namespace).
Resource("pods").
SubResource("proxy").
Name(fmt.Sprintf("%s:%d", podName, port)).
Suffix("metrics").
Do(context.TODO()).Raw()
Do(ctx).Raw()
if err != nil {
return "", err
}
@ -409,7 +466,7 @@ func (g *Grabber) getMetricsFromPod(client clientset.Interface, podName string,
// similar to "kubectl port-forward" + "kubectl get --raw
// https://localhost:<port>/metrics". It uses the same credentials
// as kubelet.
func (g *Grabber) getSecureMetricsFromPod(podName string, namespace string, port int) (string, error) {
func (g *Grabber) getSecureMetricsFromPod(ctx context.Context, podName string, namespace string, port int) (string, error) {
dialer := e2epod.NewDialer(g.client, g.config)
metricConfig := rest.CopyConfig(g.config)
addr := e2epod.Addr{
@ -444,7 +501,7 @@ func (g *Grabber) getSecureMetricsFromPod(podName string, namespace string, port
rawOutput, err := metricClient.RESTClient().Get().
AbsPath("metrics").
Do(context.TODO()).Raw()
Do(ctx).Raw()
if err != nil {
return "", err
}

View File

@ -0,0 +1,49 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
// NamespacedName comprises a resource name, with a mandatory namespace,
// rendered as "<namespace>/<name>". It implements NamedObject and thus can be
// used as function parameter instead of a full API object.
type NamespacedName struct {
Namespace string
Name string
}
var _ NamedObject = NamespacedName{}
// NamedObject is a subset of metav1.Object which provides read-only access
// to name and namespace of an object.
type NamedObject interface {
GetNamespace() string
GetName() string
}
// GetNamespace implements NamedObject.
func (n NamespacedName) GetNamespace() string {
return n.Namespace
}
// GetName implements NamedObject.
func (n NamespacedName) GetName() string {
return n.Name
}
// String returns the general purpose string representation
func (n NamespacedName) String() string {
return n.Namespace + "/" + n.Name
}

View File

@ -0,0 +1,9 @@
# This E2E framework sub-package is currently allowed to use arbitrary
# dependencies, therefore we need to override the restrictions from
# the parent .import-restrictions file.
#
# At some point it may become useful to also check this package's
# dependencies more careful.
rules:
- selectorRegexp: ""
allowedPrefixes: [ "" ]

View File

@ -39,16 +39,17 @@ const (
// WaitForAllNodesSchedulable waits up to timeout for all
// (but TestContext.AllowedNotReadyNodes) to become schedulable.
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
func WaitForAllNodesSchedulable(ctx context.Context, c clientset.Interface, timeout time.Duration) error {
if framework.TestContext.AllowedNotReadyNodes == -1 {
return nil
}
framework.Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, framework.TestContext.AllowedNotReadyNodes)
return wait.PollImmediate(
return wait.PollImmediateWithContext(
ctx,
30*time.Second,
timeout,
CheckReadyForTests(c, framework.TestContext.NonblockingTaints, framework.TestContext.AllowedNotReadyNodes, largeClusterThreshold),
CheckReadyForTests(ctx, c, framework.TestContext.NonblockingTaints, framework.TestContext.AllowedNotReadyNodes, largeClusterThreshold),
)
}
@ -58,9 +59,9 @@ func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, la
}
// ExpectNodeHasLabel expects that the given node has the given label pair.
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
func ExpectNodeHasLabel(ctx context.Context, c clientset.Interface, nodeName string, labelKey string, labelValue string) {
ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue)
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(node.Labels[labelKey], labelValue)
}
@ -76,17 +77,17 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string)
}
// ExpectNodeHasTaint expects that the node has the given taint.
func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) {
func ExpectNodeHasTaint(ctx context.Context, c clientset.Interface, nodeName string, taint *v1.Taint) {
ginkgo.By("verifying the node has the taint " + taint.ToString())
if has, err := NodeHasTaint(c, nodeName, taint); !has {
if has, err := NodeHasTaint(ctx, c, nodeName, taint); !has {
framework.ExpectNoError(err)
framework.Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
}
}
// NodeHasTaint returns true if the node has the given taint, else returns false.
func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
func NodeHasTaint(ctx context.Context, c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -104,14 +105,14 @@ func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
// and figure out how to do it in a configurable way, as we can't expect all setups to run
// default test add-ons.
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
if err := allNodesReady(c, timeout); err != nil {
return fmt.Errorf("checking for ready nodes: %v", err)
func AllNodesReady(ctx context.Context, c clientset.Interface, timeout time.Duration) error {
if err := allNodesReady(ctx, c, timeout); err != nil {
return fmt.Errorf("checking for ready nodes: %w", err)
}
return nil
}
func allNodesReady(c clientset.Interface, timeout time.Duration) error {
func allNodesReady(ctx context.Context, c clientset.Interface, timeout time.Duration) error {
if framework.TestContext.AllowedNotReadyNodes == -1 {
return nil
}
@ -119,10 +120,10 @@ func allNodesReady(c clientset.Interface, timeout time.Duration) error {
framework.Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, framework.TestContext.AllowedNotReadyNodes)
var notReady []*v1.Node
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
err := wait.PollImmediateWithContext(ctx, framework.Poll, timeout, func(ctx context.Context) (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package node
import (
"context"
"sync"
"time"
@ -39,31 +40,31 @@ type NodeKiller struct {
// NewNodeKiller creates new NodeKiller.
func NewNodeKiller(config framework.NodeKillerConfig, client clientset.Interface, provider string) *NodeKiller {
config.NodeKillerStopCh = make(chan struct{})
config.NodeKillerStopCtx, config.NodeKillerStop = context.WithCancel(context.Background())
return &NodeKiller{config, client, provider}
}
// Run starts NodeKiller until stopCh is closed.
func (k *NodeKiller) Run(stopCh <-chan struct{}) {
func (k *NodeKiller) Run(ctx context.Context) {
// wait.JitterUntil starts work immediately, so wait first.
time.Sleep(wait.Jitter(k.config.Interval, k.config.JitterFactor))
wait.JitterUntil(func() {
nodes := k.pickNodes()
k.kill(nodes)
}, k.config.Interval, k.config.JitterFactor, true, stopCh)
wait.JitterUntilWithContext(ctx, func(ctx context.Context) {
nodes := k.pickNodes(ctx)
k.kill(ctx, nodes)
}, k.config.Interval, k.config.JitterFactor, true)
}
func (k *NodeKiller) pickNodes() []v1.Node {
nodes, err := GetReadySchedulableNodes(k.client)
func (k *NodeKiller) pickNodes(ctx context.Context) []v1.Node {
nodes, err := GetReadySchedulableNodes(ctx, k.client)
framework.ExpectNoError(err)
numNodes := int(k.config.FailureRatio * float64(len(nodes.Items)))
nodes, err = GetBoundedReadySchedulableNodes(k.client, numNodes)
nodes, err = GetBoundedReadySchedulableNodes(ctx, k.client, numNodes)
framework.ExpectNoError(err)
return nodes.Items
}
func (k *NodeKiller) kill(nodes []v1.Node) {
func (k *NodeKiller) kill(ctx context.Context, nodes []v1.Node) {
wg := sync.WaitGroup{}
wg.Add(len(nodes))
for _, node := range nodes {
@ -73,7 +74,7 @@ func (k *NodeKiller) kill(nodes []v1.Node) {
defer wg.Done()
framework.Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)
err := e2essh.IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node)
err := e2essh.IssueSSHCommand(ctx, "sudo systemctl stop docker kubelet", k.provider, &node)
if err != nil {
framework.Logf("ERROR while stopping node %q: %v", node.Name, err)
return
@ -82,7 +83,7 @@ func (k *NodeKiller) kill(nodes []v1.Node) {
time.Sleep(k.config.SimulatedDowntime)
framework.Logf("Rebooting %q to repair the node", node.Name)
err = e2essh.IssueSSHCommand("sudo reboot", k.provider, &node)
err = e2essh.IssueSSHCommand(ctx, "sudo reboot", k.provider, &node)
if err != nil {
framework.Logf("ERROR while rebooting node %q: %v", node.Name, err)
return

View File

@ -193,8 +193,8 @@ func Filter(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
}
// TotalRegistered returns number of schedulable Nodes.
func TotalRegistered(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c)
func TotalRegistered(ctx context.Context, c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(ctx, c)
if err != nil {
framework.Logf("Failed to list nodes: %v", err)
return 0, err
@ -203,8 +203,8 @@ func TotalRegistered(c clientset.Interface) (int, error) {
}
// TotalReady returns number of ready schedulable Nodes.
func TotalReady(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c)
func TotalReady(ctx context.Context, c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(ctx, c)
if err != nil {
framework.Logf("Failed to list nodes: %v", err)
return 0, err
@ -217,36 +217,28 @@ func TotalReady(c clientset.Interface) (int, error) {
return len(nodes.Items), nil
}
// GetExternalIP returns node external IP concatenated with port 22 for ssh
// GetSSHExternalIP returns node external IP concatenated with port 22 for ssh
// e.g. 1.2.3.4:22
func GetExternalIP(node *v1.Node) (string, error) {
func GetSSHExternalIP(node *v1.Node) (string, error) {
framework.Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP && a.Address != "" {
host = net.JoinHostPort(a.Address, sshPort)
break
return net.JoinHostPort(a.Address, sshPort), nil
}
}
if host == "" {
return "", fmt.Errorf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
return host, nil
return "", fmt.Errorf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
// GetInternalIP returns node internal IP
func GetInternalIP(node *v1.Node) (string, error) {
host := ""
// GetSSHInternalIP returns node internal IP concatenated with port 22 for ssh
func GetSSHInternalIP(node *v1.Node) (string, error) {
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeInternalIP && address.Address != "" {
host = net.JoinHostPort(address.Address, sshPort)
break
return net.JoinHostPort(address.Address, sshPort), nil
}
}
if host == "" {
return "", fmt.Errorf("Couldn't get the internal IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
return host, nil
return "", fmt.Errorf("Couldn't get the internal IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
// FirstAddressByTypeAndFamily returns the first address that matches the given type and family of the list of nodes
@ -301,10 +293,10 @@ func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []stri
}
// PickIP picks one public node IP
func PickIP(c clientset.Interface) (string, error) {
publicIps, err := GetPublicIps(c)
func PickIP(ctx context.Context, c clientset.Interface) (string, error) {
publicIps, err := GetPublicIps(ctx, c)
if err != nil {
return "", fmt.Errorf("get node public IPs error: %s", err)
return "", fmt.Errorf("get node public IPs error: %w", err)
}
if len(publicIps) == 0 {
return "", fmt.Errorf("got unexpected number (%d) of public IPs", len(publicIps))
@ -314,10 +306,10 @@ func PickIP(c clientset.Interface) (string, error) {
}
// GetPublicIps returns a public IP list of nodes.
func GetPublicIps(c clientset.Interface) ([]string, error) {
nodes, err := GetReadySchedulableNodes(c)
func GetPublicIps(ctx context.Context, c clientset.Interface) ([]string, error) {
nodes, err := GetReadySchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("get schedulable and ready nodes error: %s", err)
return nil, fmt.Errorf("get schedulable and ready nodes error: %w", err)
}
ips := CollectAddresses(nodes, v1.NodeExternalIP)
if len(ips) == 0 {
@ -332,10 +324,10 @@ func GetPublicIps(c clientset.Interface) ([]string, error) {
// 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
// If there are no nodes that are both ready and schedulable, this will return an error.
func GetReadySchedulableNodes(c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = checkWaitListSchedulableNodes(c)
func GetReadySchedulableNodes(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = checkWaitListSchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
return nil, fmt.Errorf("listing schedulable nodes error: %w", err)
}
Filter(nodes, func(node v1.Node) bool {
return IsNodeSchedulable(&node) && isNodeUntainted(&node)
@ -349,8 +341,8 @@ func GetReadySchedulableNodes(c clientset.Interface) (nodes *v1.NodeList, err er
// GetBoundedReadySchedulableNodes is like GetReadySchedulableNodes except that it returns
// at most maxNodes nodes. Use this to keep your test case from blowing up when run on a
// large cluster.
func GetBoundedReadySchedulableNodes(c clientset.Interface, maxNodes int) (nodes *v1.NodeList, err error) {
nodes, err = GetReadySchedulableNodes(c)
func GetBoundedReadySchedulableNodes(ctx context.Context, c clientset.Interface, maxNodes int) (nodes *v1.NodeList, err error) {
nodes, err = GetReadySchedulableNodes(ctx, c)
if err != nil {
return nil, err
}
@ -369,58 +361,22 @@ func GetBoundedReadySchedulableNodes(c clientset.Interface, maxNodes int) (nodes
// GetRandomReadySchedulableNode gets a single randomly-selected node which is available for
// running pods on. If there are no available nodes it will return an error.
func GetRandomReadySchedulableNode(c clientset.Interface) (*v1.Node, error) {
nodes, err := GetReadySchedulableNodes(c)
func GetRandomReadySchedulableNode(ctx context.Context, c clientset.Interface) (*v1.Node, error) {
nodes, err := GetReadySchedulableNodes(ctx, c)
if err != nil {
return nil, err
}
return &nodes.Items[rand.Intn(len(nodes.Items))], nil
}
// GetSubnetPrefix gets first 2 number of an IP in the node subnet. [IPv4]
// It assumes that the subnet mask is /16.
func GetSubnetPrefix(c clientset.Interface) ([]string, error) {
node, err := GetReadySchedulableWorkerNode(c)
if err != nil {
return nil, fmt.Errorf("error getting a ready schedulable worker Node, err: %v", err)
}
internalIP, err := GetInternalIP(node)
if err != nil {
return nil, fmt.Errorf("error getting Node internal IP, err: %v", err)
}
splitted := strings.Split(internalIP, ".")
if len(splitted) == 4 {
return splitted[:2], nil
}
return nil, fmt.Errorf("invalid IP address format: %s", internalIP)
}
// GetReadySchedulableWorkerNode gets a single worker node which is available for
// running pods on. If there are no such available nodes it will return an error.
func GetReadySchedulableWorkerNode(c clientset.Interface) (*v1.Node, error) {
nodes, err := GetReadySchedulableNodes(c)
if err != nil {
return nil, err
}
for i := range nodes.Items {
node := nodes.Items[i]
_, isMaster := node.Labels["node-role.kubernetes.io/master"]
_, isControlPlane := node.Labels["node-role.kubernetes.io/control-plane"]
if !isMaster && !isControlPlane {
return &node, nil
}
}
return nil, fmt.Errorf("there are currently no ready, schedulable worker nodes in the cluster")
}
// GetReadyNodesIncludingTainted returns all ready nodes, even those which are tainted.
// There are cases when we care about tainted nodes
// E.g. in tests related to nodes with gpu we care about nodes despite
// presence of nvidia.com/gpu=present:NoSchedule taint
func GetReadyNodesIncludingTainted(c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = checkWaitListSchedulableNodes(c)
func GetReadyNodesIncludingTainted(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = checkWaitListSchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
return nil, fmt.Errorf("listing schedulable nodes error: %w", err)
}
Filter(nodes, func(node v1.Node) bool {
return IsNodeSchedulable(&node)
@ -437,25 +393,6 @@ func isNodeUntainted(node *v1.Node) bool {
// isNodeUntaintedWithNonblocking tests whether a fake pod can be scheduled on "node"
// but allows for taints in the list of non-blocking taints.
func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) bool {
fakePod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "fake-not-scheduled",
Namespace: "fake-not-scheduled",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-not-scheduled",
Image: "fake-not-scheduled",
},
},
},
}
// Simple lookup for nonblocking taints based on comma-delimited list.
nonblockingTaintsMap := map[string]struct{}{}
for _, t := range strings.Split(nonblockingTaints, ",") {
@ -475,7 +412,8 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo
}
n = nodeCopy
}
return toleratesTaintsWithNoScheduleNoExecuteEffects(n.Spec.Taints, fakePod.Spec.Tolerations)
return toleratesTaintsWithNoScheduleNoExecuteEffects(n.Spec.Taints, nil)
}
func toleratesTaintsWithNoScheduleNoExecuteEffects(taints []v1.Taint, tolerations []v1.Toleration) bool {
@ -558,10 +496,10 @@ func hasNonblockingTaint(node *v1.Node, nonblockingTaints string) bool {
}
// PodNodePairs return podNode pairs for all pods in a namespace
func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) {
func PodNodePairs(ctx context.Context, c clientset.Interface, ns string) ([]PodNode, error) {
var result []PodNode
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
podList, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return result, err
}
@ -577,10 +515,10 @@ func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) {
}
// GetClusterZones returns the values of zone label collected from all nodes.
func GetClusterZones(c clientset.Interface) (sets.String, error) {
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
func GetClusterZones(ctx context.Context, c clientset.Interface) (sets.String, error) {
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %w", err)
}
// collect values of zone label from all nodes
@ -598,11 +536,11 @@ func GetClusterZones(c clientset.Interface) (sets.String, error) {
}
// GetSchedulableClusterZones returns the values of zone label collected from all nodes which are schedulable.
func GetSchedulableClusterZones(c clientset.Interface) (sets.String, error) {
func GetSchedulableClusterZones(ctx context.Context, c clientset.Interface) (sets.String, error) {
// GetReadySchedulableNodes already filters our tainted and unschedulable nodes.
nodes, err := GetReadySchedulableNodes(c)
nodes, err := GetReadySchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("error getting nodes while attempting to list cluster zones: %v", err)
return nil, fmt.Errorf("error getting nodes while attempting to list cluster zones: %w", err)
}
// collect values of zone label from all nodes
@ -620,8 +558,8 @@ func GetSchedulableClusterZones(c clientset.Interface) (sets.String, error) {
}
// CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
nodes, err := GetBoundedReadySchedulableNodes(c, maxCount)
func CreatePodsPerNodeForSimpleApp(ctx context.Context, c clientset.Interface, namespace, appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
nodes, err := GetBoundedReadySchedulableNodes(ctx, c, maxCount)
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
podLabels := map[string]string{
@ -629,7 +567,7 @@ func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName str
}
for i, node := range nodes.Items {
framework.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
_, err := c.CoreV1().Pods(namespace).Create(context.TODO(), &v1.Pod{
_, err := c.CoreV1().Pods(namespace).Create(ctx, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf(appName+"-pod-%v", i),
Labels: podLabels,
@ -644,33 +582,33 @@ func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName str
// RemoveTaintsOffNode removes a list of taints from the given node
// It is simply a helper wrapper for RemoveTaintOffNode
func RemoveTaintsOffNode(c clientset.Interface, nodeName string, taints []v1.Taint) {
func RemoveTaintsOffNode(ctx context.Context, c clientset.Interface, nodeName string, taints []v1.Taint) {
for _, taint := range taints {
RemoveTaintOffNode(c, nodeName, taint)
RemoveTaintOffNode(ctx, c, nodeName, taint)
}
}
// RemoveTaintOffNode removes the given taint from the given node.
func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) {
err := removeNodeTaint(c, nodeName, nil, &taint)
func RemoveTaintOffNode(ctx context.Context, c clientset.Interface, nodeName string, taint v1.Taint) {
err := removeNodeTaint(ctx, c, nodeName, nil, &taint)
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
verifyThatTaintIsGone(c, nodeName, &taint)
verifyThatTaintIsGone(ctx, c, nodeName, &taint)
}
// AddOrUpdateTaintOnNode adds the given taint to the given node or updates taint.
func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Taint) {
func AddOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName string, taint v1.Taint) {
// TODO use wrapper methods in expect.go after removing the dependency on this
// package from the core e2e framework.
err := addOrUpdateTaintOnNode(c, nodeName, &taint)
err := addOrUpdateTaintOnNode(ctx, c, nodeName, &taint)
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
}
// addOrUpdateTaintOnNode add taints to the node. If taint was added into node, it'll issue API calls
// to update nodes; otherwise, no API calls. Return error if any.
// copied from pkg/controller/controller_utils.go AddOrUpdateTaintOnNode()
func addOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v1.Taint) error {
func addOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName string, taints ...*v1.Taint) error {
if len(taints) == 0 {
return nil
}
@ -681,10 +619,10 @@ func addOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
if firstTry {
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"})
firstTry = false
} else {
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
}
if err != nil {
return err
@ -705,7 +643,7 @@ func addOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v
if !updated {
return nil
}
return patchNodeTaints(c, nodeName, oldNode, newNode)
return patchNodeTaints(ctx, c, nodeName, oldNode, newNode)
})
}
@ -768,7 +706,7 @@ var semantic = conversion.EqualitiesOrDie(
// won't fail if target taint doesn't exist or has been removed.
// If passed a node it'll check if there's anything to be done, if taint is not present it won't issue
// any API calls.
func removeNodeTaint(c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error {
func removeNodeTaint(ctx context.Context, c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error {
if len(taints) == 0 {
return nil
}
@ -793,10 +731,10 @@ func removeNodeTaint(c clientset.Interface, nodeName string, node *v1.Node, tain
// First we try getting node from the API server cache, as it's cheaper. If it fails
// we get it from etcd to be sure to have fresh data.
if firstTry {
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"})
firstTry = false
} else {
oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
}
if err != nil {
return err
@ -817,15 +755,15 @@ func removeNodeTaint(c clientset.Interface, nodeName string, node *v1.Node, tain
if !updated {
return nil
}
return patchNodeTaints(c, nodeName, oldNode, newNode)
return patchNodeTaints(ctx, c, nodeName, oldNode, newNode)
})
}
// patchNodeTaints patches node's taints.
func patchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
func patchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
oldData, err := json.Marshal(oldNode)
if err != nil {
return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err)
return fmt.Errorf("failed to marshal old node %#v for node %q: %w", oldNode, nodeName, err)
}
newTaints := newNode.Spec.Taints
@ -833,15 +771,15 @@ func patchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n
newNodeClone.Spec.Taints = newTaints
newData, err := json.Marshal(newNodeClone)
if err != nil {
return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err)
return fmt.Errorf("failed to marshal new node %#v for node %q: %w", newNodeClone, nodeName, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
if err != nil {
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
return fmt.Errorf("failed to create patch for node %q: %w", nodeName, err)
}
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
_, err = c.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
return err
}
@ -877,9 +815,9 @@ func deleteTaint(taints []v1.Taint, taintToDelete *v1.Taint) ([]v1.Taint, bool)
return newTaints, deleted
}
func verifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) {
func verifyThatTaintIsGone(ctx context.Context, c clientset.Interface, nodeName string, taint *v1.Taint) {
ginkgo.By("verifying the node doesn't have the taint " + taint.ToString())
nodeUpdated, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
nodeUpdated, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())

View File

@ -1,51 +0,0 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package node
import (
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
imageutils "k8s.io/kubernetes/test/utils/image"
utilpointer "k8s.io/utils/pointer"
)
const (
// PreconfiguredRuntimeClassHandler is the name of the runtime handler
// that is expected to be preconfigured in the test environment.
PreconfiguredRuntimeClassHandler = "test-handler"
)
// NewRuntimeClassPod returns a test pod with the given runtimeClassName
func NewRuntimeClassPod(runtimeClassName string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: fmt.Sprintf("test-runtimeclass-%s-", runtimeClassName),
},
Spec: v1.PodSpec{
RuntimeClassName: &runtimeClassName,
Containers: []v1.Container{{
Name: "test",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"true"},
}},
RestartPolicy: v1.RestartPolicyNever,
AutomountServiceAccountToken: utilpointer.BoolPtr(false),
},
}
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package node
import (
"context"
"time"
"k8s.io/apimachinery/pkg/util/wait"
@ -25,7 +26,7 @@ import (
)
// WaitForSSHTunnels waits for establishing SSH tunnel to busybox pod.
func WaitForSSHTunnels(namespace string) {
func WaitForSSHTunnels(ctx context.Context, namespace string) {
framework.Logf("Waiting for SSH tunnels to establish")
e2ekubectl.RunKubectl(namespace, "run", "ssh-tunnel-test",
"--image=busybox",
@ -35,7 +36,7 @@ func WaitForSSHTunnels(namespace string) {
defer e2ekubectl.RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test")
// allow up to a minute for new ssh tunnels to establish
wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
wait.PollImmediateWithContext(ctx, 5*time.Second, time.Minute, func(ctx context.Context) (bool, error) {
_, err := e2ekubectl.RunKubectl(namespace, "logs", "ssh-tunnel-test")
return err == nil, nil
})

View File

@ -40,21 +40,21 @@ var requiredPerNodePods = []*regexp.Regexp{
// WaitForReadyNodes waits up to timeout for cluster to has desired size and
// there is no not-ready nodes in it. By cluster size we mean number of schedulable Nodes.
func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) error {
_, err := CheckReady(c, size, timeout)
func WaitForReadyNodes(ctx context.Context, c clientset.Interface, size int, timeout time.Duration) error {
_, err := CheckReady(ctx, c, size, timeout)
return err
}
// WaitForTotalHealthy checks whether all registered nodes are ready and all required Pods are running on them.
func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
func WaitForTotalHealthy(ctx context.Context, c clientset.Interface, timeout time.Duration) error {
framework.Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []v1.Node
var missingPodsPerNode map[string][]string
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
err := wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"})
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
@ -63,7 +63,7 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
notReady = append(notReady, node)
}
}
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"})
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(ctx, metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
@ -114,10 +114,10 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
// within timeout. If wantTrue is true, it will ensure the node condition status
// is ConditionTrue; if it's false, it ensures the node condition is in any state
// other than ConditionTrue (e.g. not true or unknown).
func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
func WaitConditionToBe(ctx context.Context, c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
framework.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{})
if err != nil {
framework.Logf("Couldn't get node %s", name)
continue
@ -134,20 +134,37 @@ func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.Node
// WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the
// readiness condition is anything but ready, e.g false or unknown) within
// timeout.
func WaitForNodeToBeNotReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitConditionToBe(c, name, v1.NodeReady, false, timeout)
func WaitForNodeToBeNotReady(ctx context.Context, c clientset.Interface, name string, timeout time.Duration) bool {
return WaitConditionToBe(ctx, c, name, v1.NodeReady, false, timeout)
}
// WaitForNodeToBeReady returns whether node name is ready within timeout.
func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitConditionToBe(c, name, v1.NodeReady, true, timeout)
func WaitForNodeToBeReady(ctx context.Context, c clientset.Interface, name string, timeout time.Duration) bool {
return WaitConditionToBe(ctx, c, name, v1.NodeReady, true, timeout)
}
func WaitForNodeSchedulable(ctx context.Context, c clientset.Interface, name string, timeout time.Duration, wantSchedulable bool) bool {
framework.Logf("Waiting up to %v for node %s to be schedulable: %t", timeout, name, wantSchedulable)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
node, err := c.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{})
if err != nil {
framework.Logf("Couldn't get node %s", name)
continue
}
if IsNodeSchedulable(node) == wantSchedulable {
return true
}
}
framework.Logf("Node %s didn't reach desired schedulable status (%t) within %v", name, wantSchedulable, timeout)
return false
}
// CheckReady waits up to timeout for cluster to has desired size and
// there is no not-ready nodes in it. By cluster size we mean number of schedulable Nodes.
func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) {
func CheckReady(ctx context.Context, c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(sleepTime) {
nodes, err := waitListSchedulableNodes(c)
nodes, err := waitListSchedulableNodes(ctx, c)
if err != nil {
framework.Logf("Failed to list nodes: %v", err)
continue
@ -172,11 +189,11 @@ func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.No
}
// waitListSchedulableNodes is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
func waitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) {
var nodes *v1.NodeList
var err error
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
nodes, err = c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{
if wait.PollImmediateWithContext(ctx, poll, singleCallTimeout, func(ctx context.Context) (bool, error) {
nodes, err = c.CoreV1().Nodes().List(ctx, metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
@ -190,8 +207,8 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
}
// checkWaitListSchedulableNodes is a wrapper around listing nodes supporting retries.
func checkWaitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
nodes, err := waitListSchedulableNodes(c)
func checkWaitListSchedulableNodes(ctx context.Context, c clientset.Interface) (*v1.NodeList, error) {
nodes, err := waitListSchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("error: %s. Non-retryable failure or timed out while listing nodes for e2e cluster", err)
}
@ -199,9 +216,9 @@ func checkWaitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error)
}
// CheckReadyForTests returns a function which will return 'true' once the number of ready nodes is above the allowedNotReadyNodes threshold (i.e. to be used as a global gate for starting the tests).
func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowedNotReadyNodes, largeClusterThreshold int) func() (bool, error) {
func CheckReadyForTests(ctx context.Context, c clientset.Interface, nonblockingTaints string, allowedNotReadyNodes, largeClusterThreshold int) func(ctx context.Context) (bool, error) {
attempt := 0
return func() (bool, error) {
return func(ctx context.Context) (bool, error) {
if allowedNotReadyNodes == -1 {
return true, nil
}
@ -212,7 +229,7 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
// remove uncordoned nodes from our calculation, TODO refactor if node v2 API removes that semantic.
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
}
allNodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
allNodes, err := c.CoreV1().Nodes().List(ctx, opts)
if err != nil {
var terminalListNodesErr error
framework.Logf("Unexpected error listing nodes: %v", err)

View File

@ -0,0 +1,9 @@
# This E2E framework sub-package is currently allowed to use arbitrary
# dependencies, therefore we need to override the restrictions from
# the parent .import-restrictions file.
#
# At some point it may become useful to also check this package's
# dependencies more careful.
rules:
- selectorRegexp: ""
allowedPrefixes: [ "" ]

View File

@ -52,76 +52,76 @@ type Config struct {
}
// CreateUnschedulablePod with given claims based on node selector
func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
func CreateUnschedulablePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
return nil, fmt.Errorf("pod Create API error: %w", err)
}
// Waiting for pod to become Unschedulable
err = WaitForPodNameUnschedulableInNamespace(client, pod.Name, namespace)
err = WaitForPodNameUnschedulableInNamespace(ctx, client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Unschedulable: %v", pod.Name, err)
return pod, fmt.Errorf("pod %q is not Unschedulable: %w", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
return pod, fmt.Errorf("pod Get API error: %w", err)
}
return pod, nil
}
// CreateClientPod defines and creates a pod with a mounted PV. Pod runs infinite loop until killed.
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
return CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
func CreateClientPod(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
return CreatePod(ctx, c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
}
// CreatePod with given claims based on node selector
func CreatePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
func CreatePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
return nil, fmt.Errorf("pod Create API error: %w", err)
}
// Waiting for pod to be running
err = WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
err = WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
return pod, fmt.Errorf("pod Get API error: %w", err)
}
return pod, nil
}
// CreateSecPod creates security pod with given claims
func CreateSecPod(client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
return CreateSecPodWithNodeSelection(client, podConfig, timeout)
func CreateSecPod(ctx context.Context, client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
return CreateSecPodWithNodeSelection(ctx, client, podConfig, timeout)
}
// CreateSecPodWithNodeSelection creates security pod with given claims
func CreateSecPodWithNodeSelection(client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
func CreateSecPodWithNodeSelection(ctx context.Context, client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
pod, err := MakeSecPod(podConfig)
if err != nil {
return nil, fmt.Errorf("Unable to create pod: %v", err)
return nil, fmt.Errorf("Unable to create pod: %w", err)
}
pod, err = client.CoreV1().Pods(podConfig.NS).Create(context.TODO(), pod, metav1.CreateOptions{})
pod, err = client.CoreV1().Pods(podConfig.NS).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
return nil, fmt.Errorf("pod Create API error: %w", err)
}
// Waiting for pod to be running
err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, podConfig.NS, timeout)
err = WaitTimeoutForPodRunningInNamespace(ctx, client, pod.Name, podConfig.NS, timeout)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(podConfig.NS).Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err = client.CoreV1().Pods(podConfig.NS).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
return pod, fmt.Errorf("pod Get API error: %w", err)
}
return pod, nil
}

View File

@ -37,9 +37,9 @@ const (
// DeletePodOrFail deletes the pod of the specified namespace and name. Resilient to the pod
// not existing.
func DeletePodOrFail(c clientset.Interface, ns, name string) {
func DeletePodOrFail(ctx context.Context, c clientset.Interface, ns, name string) {
ginkgo.By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns))
err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
err := c.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{})
if err != nil && apierrors.IsNotFound(err) {
return
}
@ -49,41 +49,41 @@ func DeletePodOrFail(c clientset.Interface, ns, name string) {
// DeletePodWithWait deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod
// not existing.
func DeletePodWithWait(c clientset.Interface, pod *v1.Pod) error {
func DeletePodWithWait(ctx context.Context, c clientset.Interface, pod *v1.Pod) error {
if pod == nil {
return nil
}
return DeletePodWithWaitByName(c, pod.GetName(), pod.GetNamespace())
return DeletePodWithWaitByName(ctx, c, pod.GetName(), pod.GetNamespace())
}
// DeletePodWithWaitByName deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod
// not existing.
func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string) error {
func DeletePodWithWaitByName(ctx context.Context, c clientset.Interface, podName, podNamespace string) error {
framework.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, metav1.DeleteOptions{})
err := c.CoreV1().Pods(podNamespace).Delete(ctx, podName, metav1.DeleteOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return nil // assume pod was already deleted
}
return fmt.Errorf("pod Delete API error: %v", err)
return fmt.Errorf("pod Delete API error: %w", err)
}
framework.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName)
err = WaitForPodNotFoundInNamespace(c, podName, podNamespace, PodDeleteTimeout)
err = WaitForPodNotFoundInNamespace(ctx, c, podName, podNamespace, PodDeleteTimeout)
if err != nil {
return fmt.Errorf("pod %q was not deleted: %v", podName, err)
return fmt.Errorf("pod %q was not deleted: %w", podName, err)
}
return nil
}
// DeletePodWithGracePeriod deletes the passed-in pod. Resilient to the pod not existing.
func DeletePodWithGracePeriod(c clientset.Interface, pod *v1.Pod, grace int64) error {
return DeletePodWithGracePeriodByName(c, pod.GetName(), pod.GetNamespace(), grace)
func DeletePodWithGracePeriod(ctx context.Context, c clientset.Interface, pod *v1.Pod, grace int64) error {
return DeletePodWithGracePeriodByName(ctx, c, pod.GetName(), pod.GetNamespace(), grace)
}
// DeletePodsWithGracePeriod deletes the passed-in pods. Resilient to the pods not existing.
func DeletePodsWithGracePeriod(c clientset.Interface, pods []v1.Pod, grace int64) error {
func DeletePodsWithGracePeriod(ctx context.Context, c clientset.Interface, pods []v1.Pod, grace int64) error {
for _, pod := range pods {
if err := DeletePodWithGracePeriod(c, &pod, grace); err != nil {
if err := DeletePodWithGracePeriod(ctx, c, &pod, grace); err != nil {
return err
}
}
@ -91,14 +91,14 @@ func DeletePodsWithGracePeriod(c clientset.Interface, pods []v1.Pod, grace int64
}
// DeletePodWithGracePeriodByName deletes a pod by name and namespace. Resilient to the pod not existing.
func DeletePodWithGracePeriodByName(c clientset.Interface, podName, podNamespace string, grace int64) error {
func DeletePodWithGracePeriodByName(ctx context.Context, c clientset.Interface, podName, podNamespace string, grace int64) error {
framework.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(grace))
err := c.CoreV1().Pods(podNamespace).Delete(ctx, podName, *metav1.NewDeleteOptions(grace))
if err != nil {
if apierrors.IsNotFound(err) {
return nil // assume pod was already deleted
}
return fmt.Errorf("pod Delete API error: %v", err)
return fmt.Errorf("pod Delete API error: %w", err)
}
return nil
}

View File

@ -87,13 +87,13 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
SubResource("portforward")
transport, upgrader, err := spdy.RoundTripperFor(restConfig)
if err != nil {
return nil, fmt.Errorf("create round tripper: %v", err)
return nil, fmt.Errorf("create round tripper: %w", err)
}
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL())
streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name)
if err != nil {
return nil, fmt.Errorf("dialer failed: %v", err)
return nil, fmt.Errorf("dialer failed: %w", err)
}
requestID := "1"
defer func() {
@ -112,7 +112,7 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
// This happens asynchronously.
errorStream, err := streamConn.CreateStream(headers)
if err != nil {
return nil, fmt.Errorf("error creating error stream: %v", err)
return nil, fmt.Errorf("error creating error stream: %w", err)
}
errorStream.Close()
go func() {
@ -129,7 +129,7 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
headers.Set(v1.StreamType, v1.StreamTypeData)
dataStream, err := streamConn.CreateStream(headers)
if err != nil {
return nil, fmt.Errorf("error creating data stream: %v", err)
return nil, fmt.Errorf("error creating data stream: %w", err)
}
return &stream{

View File

@ -87,6 +87,7 @@ func ExecWithOptions(f *framework.Framework, options ExecOptions) (string, strin
// ExecCommandInContainerWithFullOutput executes a command in the
// specified container and return stdout, stderr and error
func ExecCommandInContainerWithFullOutput(f *framework.Framework, podName, containerName string, cmd ...string) (string, string, error) {
// TODO (pohly): add context support
return ExecWithOptions(f, ExecOptions{
Command: cmd,
Namespace: f.Namespace.Name,
@ -114,28 +115,28 @@ func ExecShellInContainer(f *framework.Framework, podName, containerName string,
return ExecCommandInContainer(f, podName, containerName, "/bin/sh", "-c", cmd)
}
func execCommandInPod(f *framework.Framework, podName string, cmd ...string) string {
pod, err := NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{})
func execCommandInPod(ctx context.Context, f *framework.Framework, podName string, cmd ...string) string {
pod, err := NewPodClient(f).Get(ctx, podName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get pod %v", podName)
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
return ExecCommandInContainer(f, podName, pod.Spec.Containers[0].Name, cmd...)
}
func execCommandInPodWithFullOutput(f *framework.Framework, podName string, cmd ...string) (string, string, error) {
pod, err := NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{})
func execCommandInPodWithFullOutput(ctx context.Context, f *framework.Framework, podName string, cmd ...string) (string, string, error) {
pod, err := NewPodClient(f).Get(ctx, podName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get pod %v", podName)
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
return ExecCommandInContainerWithFullOutput(f, podName, pod.Spec.Containers[0].Name, cmd...)
}
// ExecShellInPod executes the specified command on the pod.
func ExecShellInPod(f *framework.Framework, podName string, cmd string) string {
return execCommandInPod(f, podName, "/bin/sh", "-c", cmd)
func ExecShellInPod(ctx context.Context, f *framework.Framework, podName string, cmd string) string {
return execCommandInPod(ctx, f, podName, "/bin/sh", "-c", cmd)
}
// ExecShellInPodWithFullOutput executes the specified command on the Pod and returns stdout, stderr and error.
func ExecShellInPodWithFullOutput(f *framework.Framework, podName string, cmd string) (string, string, error) {
return execCommandInPodWithFullOutput(f, podName, "/bin/sh", "-c", cmd)
func ExecShellInPodWithFullOutput(ctx context.Context, f *framework.Framework, podName string, cmd string) (string, string, error) {
return execCommandInPodWithFullOutput(ctx, f, podName, "/bin/sh", "-c", cmd)
}
func execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error {

31
vendor/k8s.io/kubernetes/test/e2e/framework/pod/get.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
)
// Get creates a function which retrieves the pod anew each time the function
// is called. Fatal errors are detected by framework.HandleRetry and cause
// polling to stop.
func Get(c clientset.Interface, pod framework.NamedObject) framework.GetFunc[*v1.Pod] {
return framework.HandleRetry(framework.GetObject(c.CoreV1().Pods(pod.GetNamespace()).Get, pod.GetName(), metav1.GetOptions{}))
}

View File

@ -107,7 +107,7 @@ func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration
return out, nil
}
if elapsed := time.Since(start); elapsed > timeout {
return out, fmt.Errorf("RunHostCmd still failed after %v: %v", elapsed, err)
return out, fmt.Errorf("RunHostCmd still failed after %v: %w", elapsed, err)
}
framework.Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err)
time.Sleep(interval)
@ -121,6 +121,15 @@ func LookForStringInLog(ns, podName, container, expectedString string, timeout t
})
}
// LookForStringInLogWithoutKubectl looks for the given string in the log of a specific pod container
func LookForStringInLogWithoutKubectl(ctx context.Context, client clientset.Interface, ns string, podName string, container string, expectedString string, timeout time.Duration) (result string, err error) {
return lookForString(expectedString, timeout, func() string {
podLogs, err := e2epod.GetPodLogs(ctx, client, ns, podName, container)
framework.ExpectNoError(err)
return podLogs
})
}
// CreateEmptyFileOnPod creates empty file at given path on the pod.
func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error {
_, err := e2ekubectl.RunKubectl(namespace, "exec", podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
@ -128,8 +137,8 @@ func CreateEmptyFileOnPod(namespace string, podName string, filePath string) err
}
// DumpDebugInfo dumps debug info of tests.
func DumpDebugInfo(c clientset.Interface, ns string) {
sl, _ := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
func DumpDebugInfo(ctx context.Context, c clientset.Interface, ns string) {
sl, _ := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()})
for _, s := range sl.Items {
desc, _ := e2ekubectl.RunKubectl(ns, "describe", "po", s.Name)
framework.Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc)
@ -142,6 +151,7 @@ func DumpDebugInfo(c clientset.Interface, ns string) {
// MatchContainerOutput creates a pod and waits for all it's containers to exit with success.
// It then tests that the matcher with each expectedOutput matches the output of the specified container.
func MatchContainerOutput(
ctx context.Context,
f *framework.Framework,
pod *v1.Pod,
containerName string,
@ -153,25 +163,25 @@ func MatchContainerOutput(
}
podClient := e2epod.PodClientNS(f, ns)
createdPod := podClient.Create(pod)
createdPod := podClient.Create(ctx, pod)
defer func() {
ginkgo.By("delete the pod")
podClient.DeleteSync(createdPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
podClient.DeleteSync(ctx, createdPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
}()
// Wait for client pod to complete.
podErr := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod.Name, ns, f.Timeouts.PodStart)
podErr := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, createdPod.Name, ns, f.Timeouts.PodStart)
// Grab its logs. Get host first.
podStatus, err := podClient.Get(context.TODO(), createdPod.Name, metav1.GetOptions{})
podStatus, err := podClient.Get(ctx, createdPod.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get pod status: %v", err)
return fmt.Errorf("failed to get pod status: %w", err)
}
if podErr != nil {
// Pod failed. Dump all logs from all containers to see what's wrong
_ = apiv1pod.VisitContainers(&podStatus.Spec, apiv1pod.AllFeatureEnabledContainers(), func(c *v1.Container, containerType apiv1pod.ContainerType) bool {
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, c.Name)
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, ns, podStatus.Name, c.Name)
if err != nil {
framework.Logf("Failed to get logs from node %q pod %q container %q: %v",
podStatus.Spec.NodeName, podStatus.Name, c.Name, err)
@ -187,18 +197,18 @@ func MatchContainerOutput(
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
// Sometimes the actual containers take a second to get started, try to get logs for 60s
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName)
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, ns, podStatus.Name, containerName)
if err != nil {
framework.Logf("Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %w", podStatus.Name, containerName, err)
}
for _, expected := range expectedOutput {
m := matcher(expected)
matches, err := m.Match(logs)
if err != nil {
return fmt.Errorf("expected %q in container output: %v", expected, err)
return fmt.Errorf("expected %q in container output: %w", expected, err)
} else if !matches {
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
}
@ -210,21 +220,21 @@ func MatchContainerOutput(
// TestContainerOutput runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a substring matcher.
func TestContainerOutput(f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
TestContainerOutputMatcher(f, scenarioName, pod, containerIndex, expectedOutput, gomega.ContainSubstring)
func TestContainerOutput(ctx context.Context, f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
TestContainerOutputMatcher(ctx, f, scenarioName, pod, containerIndex, expectedOutput, gomega.ContainSubstring)
}
// TestContainerOutputRegexp runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a regexp matcher.
func TestContainerOutputRegexp(f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
TestContainerOutputMatcher(f, scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp)
func TestContainerOutputRegexp(ctx context.Context, f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
TestContainerOutputMatcher(ctx, f, scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp)
}
// TestContainerOutputMatcher runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using the given matcher.
func TestContainerOutputMatcher(f *framework.Framework,
func TestContainerOutputMatcher(ctx context.Context, f *framework.Framework,
scenarioName string,
pod *v1.Pod,
containerIndex int,
@ -234,5 +244,5 @@ func TestContainerOutputMatcher(f *framework.Framework,
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
framework.Failf("Invalid container index: %d", containerIndex)
}
framework.ExpectNoError(MatchContainerOutput(f, pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
framework.ExpectNoError(MatchContainerOutput(ctx, f, pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
}

View File

@ -27,7 +27,6 @@ import (
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
@ -93,26 +92,26 @@ type PodClient struct {
}
// Create creates a new pod according to the framework specifications (don't wait for it to start).
func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
func (c *PodClient) Create(ctx context.Context, pod *v1.Pod) *v1.Pod {
c.mungeSpec(pod)
p, err := c.PodInterface.Create(context.TODO(), pod, metav1.CreateOptions{})
p, err := c.PodInterface.Create(ctx, pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating Pod")
return p
}
// CreateSync creates a new pod according to the framework specifications, and wait for it to start and be running and ready.
func (c *PodClient) CreateSync(pod *v1.Pod) *v1.Pod {
func (c *PodClient) CreateSync(ctx context.Context, pod *v1.Pod) *v1.Pod {
namespace := c.f.Namespace.Name
p := c.Create(pod)
framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(c.f.ClientSet, p.Name, namespace, framework.PodStartTimeout))
p := c.Create(ctx, pod)
framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(ctx, c.f.ClientSet, p.Name, namespace, framework.PodStartTimeout))
// Get the newest pod after it becomes running and ready, some status may change after pod created, such as pod ip.
p, err := c.Get(context.TODO(), p.Name, metav1.GetOptions{})
p, err := c.Get(ctx, p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
return p
}
// CreateBatch create a batch of pods. All pods are created before waiting.
func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
func (c *PodClient) CreateBatch(ctx context.Context, pods []*v1.Pod) []*v1.Pod {
ps := make([]*v1.Pod, len(pods))
var wg sync.WaitGroup
for i, pod := range pods {
@ -120,7 +119,7 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
go func(i int, pod *v1.Pod) {
defer wg.Done()
defer ginkgo.GinkgoRecover()
ps[i] = c.CreateSync(pod)
ps[i] = c.CreateSync(ctx, pod)
}(i, pod)
}
wg.Wait()
@ -130,14 +129,14 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
// Update updates the pod object. It retries if there is a conflict, throw out error if
// there is any other apierrors. name is the pod name, updateFn is the function updating the
// pod object.
func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
pod, err := c.PodInterface.Get(context.TODO(), name, metav1.GetOptions{})
func (c *PodClient) Update(ctx context.Context, name string, updateFn func(pod *v1.Pod)) {
framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*30, func(ctx context.Context) (bool, error) {
pod, err := c.PodInterface.Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get pod %q: %v", name, err)
return false, fmt.Errorf("failed to get pod %q: %w", name, err)
}
updateFn(pod)
_, err = c.PodInterface.Update(context.TODO(), pod, metav1.UpdateOptions{})
_, err = c.PodInterface.Update(ctx, pod, metav1.UpdateOptions{})
if err == nil {
framework.Logf("Successfully updated pod %q", name)
return true, nil
@ -146,12 +145,12 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
framework.Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
return false, nil
}
return false, fmt.Errorf("failed to update pod %q: %v", name, err)
return false, fmt.Errorf("failed to update pod %q: %w", name, err)
}))
}
// AddEphemeralContainerSync adds an EphemeralContainer to a pod and waits for it to be running.
func (c *PodClient) AddEphemeralContainerSync(pod *v1.Pod, ec *v1.EphemeralContainer, timeout time.Duration) error {
func (c *PodClient) AddEphemeralContainerSync(ctx context.Context, pod *v1.Pod, ec *v1.EphemeralContainer, timeout time.Duration) error {
namespace := c.f.Namespace.Name
podJS, err := json.Marshal(pod)
@ -166,24 +165,23 @@ func (c *PodClient) AddEphemeralContainerSync(pod *v1.Pod, ec *v1.EphemeralConta
framework.ExpectNoError(err, "error creating patch to add ephemeral container %q", format.Pod(pod))
// Clients may optimistically attempt to add an ephemeral container to determine whether the EphemeralContainers feature is enabled.
if _, err := c.Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "ephemeralcontainers"); err != nil {
if _, err := c.Patch(ctx, pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "ephemeralcontainers"); err != nil {
return err
}
framework.ExpectNoError(WaitForContainerRunning(c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
framework.ExpectNoError(WaitForContainerRunning(ctx, c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
return nil
}
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't
// disappear before the timeout, it will fail the test.
func (c *PodClient) DeleteSync(name string, options metav1.DeleteOptions, timeout time.Duration) {
func (c *PodClient) DeleteSync(ctx context.Context, name string, options metav1.DeleteOptions, timeout time.Duration) {
namespace := c.f.Namespace.Name
err := c.Delete(context.TODO(), name, options)
err := c.Delete(ctx, name, options)
if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Failed to delete pod %q: %v", name, err)
}
gomega.Expect(WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name)
framework.ExpectNoError(WaitForPodNotFoundInNamespace(ctx, c.f.ClientSet, name, namespace, timeout), "wait for pod %q to disappear", name)
}
// mungeSpec apply test-suite specific transformations to the pod spec.
@ -224,9 +222,9 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
// WaitForSuccess waits for pod to succeed.
// TODO(random-liu): Move pod wait function into this file
func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
func (c *PodClient) WaitForSuccess(ctx context.Context, name string, timeout time.Duration) {
f := c.f
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
gomega.Expect(WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
case v1.PodFailed:
@ -241,9 +239,9 @@ func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
}
// WaitForFinish waits for pod to finish running, regardless of success or failure.
func (c *PodClient) WaitForFinish(name string, timeout time.Duration) {
func (c *PodClient) WaitForFinish(ctx context.Context, name string, timeout time.Duration) {
f := c.f
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
gomega.Expect(WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
case v1.PodFailed:
@ -258,12 +256,12 @@ func (c *PodClient) WaitForFinish(name string, timeout time.Duration) {
}
// WaitForErrorEventOrSuccess waits for pod to succeed or an error event for that pod.
func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
func (c *PodClient) WaitForErrorEventOrSuccess(ctx context.Context, pod *v1.Pod) (*v1.Event, error) {
var ev *v1.Event
err := wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
err := wait.PollWithContext(ctx, framework.Poll, framework.PodStartTimeout, func(ctx context.Context) (bool, error) {
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
return false, fmt.Errorf("error in listing events: %w", err)
}
for _, e := range evnts.Items {
switch e.Reason {
@ -282,15 +280,15 @@ func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
}
// MatchContainerOutput gets output of a container and match expected regexp in the output.
func (c *PodClient) MatchContainerOutput(name string, containerName string, expectedRegexp string) error {
func (c *PodClient) MatchContainerOutput(ctx context.Context, name string, containerName string, expectedRegexp string) error {
f := c.f
output, err := GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName)
output, err := GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, name, containerName)
if err != nil {
return fmt.Errorf("failed to get output for container %q of pod %q", containerName, name)
}
regex, err := regexp.Compile(expectedRegexp)
if err != nil {
return fmt.Errorf("failed to compile regexp %q: %v", expectedRegexp, err)
return fmt.Errorf("failed to compile regexp %q: %w", expectedRegexp, err)
}
if !regex.MatchString(output) {
return fmt.Errorf("failed to match regexp %q in output %q", expectedRegexp, output)
@ -299,16 +297,16 @@ func (c *PodClient) MatchContainerOutput(name string, containerName string, expe
}
// PodIsReady returns true if the specified pod is ready. Otherwise false.
func (c *PodClient) PodIsReady(name string) bool {
pod, err := c.Get(context.TODO(), name, metav1.GetOptions{})
func (c *PodClient) PodIsReady(ctx context.Context, name string) bool {
pod, err := c.Get(ctx, name, metav1.GetOptions{})
framework.ExpectNoError(err)
return podutils.IsPodReady(pod)
}
// RemovePodFinalizer removes the pod's finalizer
func (c *PodClient) RemoveFinalizer(podName string, finalizerName string) {
func (c *PodClient) RemoveFinalizer(ctx context.Context, podName string, finalizerName string) {
framework.Logf("Removing pod's %q finalizer: %q", podName, finalizerName)
c.Update(podName, func(pod *v1.Pod) {
c.Update(ctx, podName, func(pod *v1.Pod) {
pod.ObjectMeta.Finalizers = slice.RemoveString(pod.ObjectMeta.Finalizers, finalizerName, nil)
})
}

View File

@ -18,7 +18,6 @@ package pod
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
@ -31,7 +30,6 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
@ -40,14 +38,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
)
// errPodCompleted is returned by PodRunning or PodContainerRunning to indicate that
// the pod has already reached completed state.
var errPodCompleted = FinalError(errors.New("pod ran to completion successfully"))
// errPodFailed is returned by PodRunning or PodContainerRunning to indicate that
// the pod has already reached a permanent failue state.
var errPodFailed = FinalError(errors.New("pod failed permanently"))
// LabelLogOnPodFailure can be used to mark which Pods will have their logs logged in the case of
// a test failure. By default, if there are no Pods with this label, only the first 5 Pods will
// have their logs fetched.
@ -69,109 +59,20 @@ func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
}
func isElementOf(podUID types.UID, pods *v1.PodList) bool {
for _, pod := range pods.Items {
if pod.UID == podUID {
return true
}
}
return false
}
// ProxyResponseChecker is a context for checking pods responses by issuing GETs to them (via the API
// proxy) and verifying that they answer with their own pod name.
type ProxyResponseChecker struct {
c clientset.Interface
ns string
label labels.Selector
controllerName string
respondName bool // Whether the pod should respond with its own name.
pods *v1.PodList
}
// NewProxyResponseChecker returns a context for checking pods responses.
func NewProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) ProxyResponseChecker {
return ProxyResponseChecker{c, ns, label, controllerName, respondName, pods}
}
// CheckAllResponses issues GETs to all pods in the context and verify they
// reply with their own pod name.
func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
successes := 0
options := metav1.ListOptions{LabelSelector: r.label.String()}
currentPods, err := r.c.CoreV1().Pods(r.ns).List(context.TODO(), options)
expectNoError(err, "Failed to get list of currentPods in namespace: %s", r.ns)
for i, pod := range r.pods.Items {
// Check that the replica list remains unchanged, otherwise we have problems.
if !isElementOf(pod.UID, currentPods) {
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
}
ctx, cancel := context.WithTimeout(context.Background(), singleCallTimeout)
defer cancel()
body, err := r.c.CoreV1().RESTClient().Get().
Namespace(r.ns).
Resource("pods").
SubResource("proxy").
Name(string(pod.Name)).
Do(ctx).
Raw()
if err != nil {
if ctx.Err() != nil {
// We may encounter errors here because of a race between the pod readiness and apiserver
// proxy. So, we log the error and retry if this occurs.
framework.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
return false, nil
}
framework.Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
continue
}
// The response checker expects the pod's name unless !respondName, in
// which case it just checks for a non-empty response.
got := string(body)
what := ""
if r.respondName {
what = "expected"
want := pod.Name
if got != want {
framework.Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
r.controllerName, i+1, pod.Name, want, got)
continue
}
} else {
what = "non-empty"
if len(got) == 0 {
framework.Logf("Controller %s: Replica %d [%s] expected non-empty response",
r.controllerName, i+1, pod.Name)
continue
}
}
successes++
framework.Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
}
if successes < len(r.pods.Items) {
return false, nil
}
return true, nil
}
// PodsCreated returns a pod list matched by the given name.
func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
func PodsCreated(ctx context.Context, c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return PodsCreatedByLabel(c, ns, name, replicas, label)
return PodsCreatedByLabel(ctx, c, ns, name, replicas, label)
}
// PodsCreatedByLabel returns a created pod list matched by the given label.
func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) {
func PodsCreatedByLabel(ctx context.Context, c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) {
timeout := 2 * time.Minute
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
options := metav1.ListOptions{LabelSelector: label.String()}
// List the pods, making sure we observe all the replicas.
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
pods, err := c.CoreV1().Pods(ns).List(ctx, options)
if err != nil {
return nil, err
}
@ -194,34 +95,31 @@ func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32,
}
// VerifyPods checks if the specified pod is responding.
func VerifyPods(c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
return podRunningMaybeResponding(c, ns, name, wantName, replicas, true)
func VerifyPods(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
return podRunningMaybeResponding(ctx, c, ns, name, wantName, replicas, true)
}
// VerifyPodsRunning checks if the specified pod is running.
func VerifyPodsRunning(c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
return podRunningMaybeResponding(c, ns, name, wantName, replicas, false)
func VerifyPodsRunning(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
return podRunningMaybeResponding(ctx, c, ns, name, wantName, replicas, false)
}
func podRunningMaybeResponding(c clientset.Interface, ns, name string, wantName bool, replicas int32, checkResponding bool) error {
pods, err := PodsCreated(c, ns, name, replicas)
func podRunningMaybeResponding(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, replicas int32, checkResponding bool) error {
pods, err := PodsCreated(ctx, c, ns, name, replicas)
if err != nil {
return err
}
e := podsRunning(c, pods)
e := podsRunning(ctx, c, pods)
if len(e) > 0 {
return fmt.Errorf("failed to wait for pods running: %v", e)
}
if checkResponding {
err = PodsResponding(c, ns, name, wantName, pods)
if err != nil {
return fmt.Errorf("failed to wait for pods responding: %v", err)
}
return WaitForPodsResponding(ctx, c, ns, name, wantName, podRespondingTimeout, pods)
}
return nil
}
func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
func podsRunning(ctx context.Context, c clientset.Interface, pods *v1.PodList) []error {
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
ginkgo.By("ensuring each pod is running")
@ -230,7 +128,7 @@ func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
for _, pod := range pods.Items {
go func(p v1.Pod) {
errorChan <- WaitForPodRunningInNamespace(c, &p)
errorChan <- WaitForPodRunningInNamespace(ctx, c, &p)
}(pod)
}
@ -302,7 +200,7 @@ func logPodTerminationMessages(pods []v1.Pod) {
// We will log the Pods that have the LabelLogOnPodFailure label. If there aren't any, we default to
// logging only the first 5 Pods. This requires the reportDir to be set, and the pods are logged into:
// {report_dir}/pods/{namespace}/{pod}/{container_name}/logs.txt
func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDir string) {
func logPodLogs(ctx context.Context, c clientset.Interface, namespace string, pods []v1.Pod, reportDir string) {
if reportDir == "" {
return
}
@ -328,7 +226,7 @@ func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDi
for i := 0; i < maxPods; i++ {
pod := logPods[i]
for _, container := range pod.Spec.Containers {
logs, err := getPodLogsInternal(c, namespace, pod.Name, container.Name, false, nil, &tailLen)
logs, err := getPodLogsInternal(ctx, c, namespace, pod.Name, container.Name, false, nil, &tailLen)
if err != nil {
framework.Logf("Unable to fetch %s/%s/%s logs: %v", pod.Namespace, pod.Name, container.Name, err)
continue
@ -351,14 +249,14 @@ func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDi
}
// DumpAllPodInfoForNamespace logs all pod information for a given namespace.
func DumpAllPodInfoForNamespace(c clientset.Interface, namespace, reportDir string) {
pods, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
func DumpAllPodInfoForNamespace(ctx context.Context, c clientset.Interface, namespace, reportDir string) {
pods, err := c.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
framework.Logf("unable to fetch pod debug info: %v", err)
}
LogPodStates(pods.Items)
logPodTerminationMessages(pods.Items)
logPodLogs(c, namespace, pods.Items, reportDir)
logPodLogs(ctx, c, namespace, pods.Items, reportDir)
}
// FilterNonRestartablePods filters out pods that will never get recreated if
@ -459,15 +357,15 @@ func newExecPodSpec(ns, generateName string) *v1.Pod {
// CreateExecPodOrFail creates a agnhost pause pod used as a vessel for kubectl exec commands.
// Pod name is uniquely generated.
func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) *v1.Pod {
func CreateExecPodOrFail(ctx context.Context, client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) *v1.Pod {
framework.Logf("Creating new exec pod")
pod := newExecPodSpec(ns, generateName)
if tweak != nil {
tweak(pod)
}
execPod, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
execPod, err := client.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
expectNoError(err, "failed to create new exec pod in namespace: %s", ns)
err = WaitForPodNameRunningInNamespace(client, execPod.Name, execPod.Namespace)
err = WaitForPodNameRunningInNamespace(ctx, client, execPod.Name, execPod.Namespace)
expectNoError(err, "failed to create new exec pod in namespace: %s", ns)
return execPod
}
@ -497,20 +395,20 @@ func WithWindowsHostProcess(pod *v1.Pod, username string) {
// CheckPodsRunningReady returns whether all pods whose names are listed in
// podNames in namespace ns are running and ready, using c and waiting at most
// timeout.
func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return checkPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready")
func CheckPodsRunningReady(ctx context.Context, c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return checkPodsCondition(ctx, c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready")
}
// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are
// listed in podNames in namespace ns are running and ready, or succeeded; use
// c and waiting at most timeout.
func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return checkPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded")
func CheckPodsRunningReadyOrSucceeded(ctx context.Context, c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return checkPodsCondition(ctx, c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded")
}
// checkPodsCondition returns whether all pods whose names are listed in podNames
// in namespace ns are in the condition, using c and waiting at most timeout.
func checkPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
func checkPodsCondition(ctx context.Context, c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
np := len(podNames)
framework.Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
type waitPodResult struct {
@ -521,7 +419,7 @@ func checkPodsCondition(c clientset.Interface, ns string, podNames []string, tim
for _, podName := range podNames {
// Launch off pod readiness checkers.
go func(name string) {
err := WaitForPodCondition(c, ns, name, desc, timeout, condition)
err := WaitForPodCondition(ctx, c, ns, name, desc, timeout, condition)
result <- waitPodResult{err == nil, name}
}(podName)
}
@ -539,24 +437,24 @@ func checkPodsCondition(c clientset.Interface, ns string, podNames []string, tim
}
// GetPodLogs returns the logs of the specified container (namespace/pod/container).
func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, false, nil, nil)
func GetPodLogs(ctx context.Context, c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(ctx, c, namespace, podName, containerName, false, nil, nil)
}
// GetPodLogsSince returns the logs of the specified container (namespace/pod/container) since a timestamp.
func GetPodLogsSince(c clientset.Interface, namespace, podName, containerName string, since time.Time) (string, error) {
func GetPodLogsSince(ctx context.Context, c clientset.Interface, namespace, podName, containerName string, since time.Time) (string, error) {
sinceTime := metav1.NewTime(since)
return getPodLogsInternal(c, namespace, podName, containerName, false, &sinceTime, nil)
return getPodLogsInternal(ctx, c, namespace, podName, containerName, false, &sinceTime, nil)
}
// GetPreviousPodLogs returns the logs of the previous instance of the
// specified container (namespace/pod/container).
func GetPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, true, nil, nil)
func GetPreviousPodLogs(ctx context.Context, c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(ctx, c, namespace, podName, containerName, true, nil, nil)
}
// utility function for gomega Eventually
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool, sinceTime *metav1.Time, tailLines *int) (string, error) {
func getPodLogsInternal(ctx context.Context, c clientset.Interface, namespace, podName, containerName string, previous bool, sinceTime *metav1.Time, tailLines *int) (string, error) {
request := c.CoreV1().RESTClient().Get().
Resource("pods").
Namespace(namespace).
@ -569,7 +467,7 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName
if tailLines != nil {
request.Param("tailLines", strconv.Itoa(*tailLines))
}
logs, err := request.Do(context.TODO()).Raw()
logs, err := request.Do(ctx).Raw()
if err != nil {
return "", err
}
@ -580,8 +478,8 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName
}
// GetPodsInNamespace returns the pods in the given namespace.
func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) {
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
func GetPodsInNamespace(ctx context.Context, c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) {
pods, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return []*v1.Pod{}, err
}
@ -598,10 +496,10 @@ func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[strin
}
// GetPods return the label matched pods in the given ns
func GetPods(c clientset.Interface, ns string, matchLabels map[string]string) ([]v1.Pod, error) {
func GetPods(ctx context.Context, c clientset.Interface, ns string, matchLabels map[string]string) ([]v1.Pod, error) {
label := labels.SelectorFromSet(matchLabels)
listOpts := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), listOpts)
pods, err := c.CoreV1().Pods(ns).List(ctx, listOpts)
if err != nil {
return []v1.Pod{}, err
}
@ -609,13 +507,13 @@ func GetPods(c clientset.Interface, ns string, matchLabels map[string]string) ([
}
// GetPodSecretUpdateTimeout returns the timeout duration for updating pod secret.
func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration {
func GetPodSecretUpdateTimeout(ctx context.Context, c clientset.Interface) time.Duration {
// With SecretManager(ConfigMapManager), we may have to wait up to full sync period +
// TTL of secret(configmap) to elapse before the Kubelet projects the update into the
// volume and the container picks it up.
// So this timeout is based on default Kubelet sync period (1 minute) + maximum TTL for
// secret(configmap) that's based on cluster size + additional time as a fudge factor.
secretTTL, err := getNodeTTLAnnotationValue(c)
secretTTL, err := getNodeTTLAnnotationValue(ctx, c)
if err != nil {
framework.Logf("Couldn't get node TTL annotation (using default value of 0): %v", err)
}
@ -624,18 +522,18 @@ func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration {
}
// VerifyPodHasConditionWithType verifies the pod has the expected condition by type
func VerifyPodHasConditionWithType(f *framework.Framework, pod *v1.Pod, cType v1.PodConditionType) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
func VerifyPodHasConditionWithType(ctx context.Context, f *framework.Framework, pod *v1.Pod, cType v1.PodConditionType) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get the recent pod object for name: %q", pod.Name)
if condition := FindPodConditionByType(&pod.Status, cType); condition == nil {
framework.Failf("pod %q should have the condition: %q, pod status: %v", pod.Name, cType, pod.Status)
}
}
func getNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) {
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
func getNodeTTLAnnotationValue(ctx context.Context, c clientset.Interface) (time.Duration, error) {
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil || len(nodes.Items) == 0 {
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err)
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %w", err)
}
// Since TTL the kubelet is using is stored in node object, for the timeout
// purpose we take it from the first node (all of them should be the same).
@ -674,15 +572,3 @@ func IsPodActive(p *v1.Pod) bool {
v1.PodFailed != p.Status.Phase &&
p.DeletionTimestamp == nil
}
func podIdentifier(namespace, name string) string {
return fmt.Sprintf("%s/%s", namespace, name)
}
func identifier(pod *v1.Pod) string {
id := podIdentifier(pod.Namespace, pod.Name)
if pod.UID != "" {
id += fmt.Sprintf("(%s)", pod.UID)
}
return id
}

View File

@ -141,14 +141,14 @@ const DefaultNonRootUserName = "ContainerUser"
// Tests that require a specific user ID should override this.
func GetRestrictedPodSecurityContext() *v1.PodSecurityContext {
psc := &v1.PodSecurityContext{
RunAsNonRoot: pointer.BoolPtr(true),
RunAsNonRoot: pointer.Bool(true),
RunAsUser: GetDefaultNonRootUser(),
SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault},
}
if NodeOSDistroIs("windows") {
psc.WindowsOptions = &v1.WindowsSecurityContextOptions{}
psc.WindowsOptions.RunAsUserName = pointer.StringPtr(DefaultNonRootUserName)
psc.WindowsOptions.RunAsUserName = pointer.String(DefaultNonRootUserName)
}
return psc
@ -157,7 +157,7 @@ func GetRestrictedPodSecurityContext() *v1.PodSecurityContext {
// GetRestrictedContainerSecurityContext returns a minimal restricted container security context.
func GetRestrictedContainerSecurityContext() *v1.SecurityContext {
return &v1.SecurityContext{
AllowPrivilegeEscalation: pointer.BoolPtr(false),
AllowPrivilegeEscalation: pointer.Bool(false),
Capabilities: &v1.Capabilities{Drop: []v1.Capability{"ALL"}},
}
}
@ -181,7 +181,7 @@ func MixinRestrictedPodSecurity(pod *v1.Pod) error {
pod.Spec.SecurityContext = GetRestrictedPodSecurityContext()
} else {
if pod.Spec.SecurityContext.RunAsNonRoot == nil {
pod.Spec.SecurityContext.RunAsNonRoot = pointer.BoolPtr(true)
pod.Spec.SecurityContext.RunAsNonRoot = pointer.Bool(true)
}
if pod.Spec.SecurityContext.RunAsUser == nil {
pod.Spec.SecurityContext.RunAsUser = GetDefaultNonRootUser()
@ -191,7 +191,7 @@ func MixinRestrictedPodSecurity(pod *v1.Pod) error {
}
if NodeOSDistroIs("windows") && pod.Spec.SecurityContext.WindowsOptions == nil {
pod.Spec.SecurityContext.WindowsOptions = &v1.WindowsSecurityContextOptions{}
pod.Spec.SecurityContext.WindowsOptions.RunAsUserName = pointer.StringPtr(DefaultNonRootUserName)
pod.Spec.SecurityContext.WindowsOptions.RunAsUserName = pointer.String(DefaultNonRootUserName)
}
}
for i := range pod.Spec.Containers {
@ -241,3 +241,23 @@ func FindPodConditionByType(podStatus *v1.PodStatus, conditionType v1.PodConditi
}
return nil
}
// FindContainerStatusInPod finds a container status by its name in the provided pod
func FindContainerStatusInPod(pod *v1.Pod, containerName string) *v1.ContainerStatus {
for _, containerStatus := range pod.Status.InitContainerStatuses {
if containerStatus.Name == containerName {
return &containerStatus
}
}
for _, containerStatus := range pod.Status.ContainerStatuses {
if containerStatus.Name == containerName {
return &containerStatus
}
}
for _, containerStatus := range pod.Status.EphemeralContainerStatuses {
if containerStatus.Name == containerName {
return &containerStatus
}
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -17,6 +17,7 @@ limitations under the License.
package framework
import (
"context"
"fmt"
"os"
"sync"
@ -100,12 +101,12 @@ type ProviderInterface interface {
CreateShare() (string, string, string, error)
DeleteShare(accountName, shareName string) error
CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error)
DeletePVSource(pvSource *v1.PersistentVolumeSource) error
CreatePVSource(ctx context.Context, zone, diskName string) (*v1.PersistentVolumeSource, error)
DeletePVSource(ctx context.Context, pvSource *v1.PersistentVolumeSource) error
CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string)
CleanupServiceResources(ctx context.Context, c clientset.Interface, loadBalancerName, region, zone string)
EnsureLoadBalancerResourcesDeleted(ip, portRange string) error
EnsureLoadBalancerResourcesDeleted(ctx context.Context, ip, portRange string) error
LoadBalancerSrcRanges() []string
EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service))
}
@ -159,21 +160,21 @@ func (n NullProvider) DeletePD(pdName string) error {
}
// CreatePVSource is a base implementation which creates PV source.
func (n NullProvider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) {
func (n NullProvider) CreatePVSource(ctx context.Context, zone, diskName string) (*v1.PersistentVolumeSource, error) {
return nil, fmt.Errorf("Provider not supported")
}
// DeletePVSource is a base implementation which deletes PV source.
func (n NullProvider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
func (n NullProvider) DeletePVSource(ctx context.Context, pvSource *v1.PersistentVolumeSource) error {
return fmt.Errorf("Provider not supported")
}
// CleanupServiceResources is a base implementation which cleans up service resources.
func (n NullProvider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
func (n NullProvider) CleanupServiceResources(ctx context.Context, c clientset.Interface, loadBalancerName, region, zone string) {
}
// EnsureLoadBalancerResourcesDeleted is a base implementation which ensures load balancer is deleted.
func (n NullProvider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
func (n NullProvider) EnsureLoadBalancerResourcesDeleted(ctx context.Context, ip, portRange string) error {
return nil
}

Some files were not shown because too many files have changed in this diff Show More