mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
rebase: update kubernetes to 1.26.1
update kubernetes and its dependencies to v1.26.1 Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
e9e33fb851
commit
9c8de9471e
6
vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go
generated
vendored
@ -61,7 +61,7 @@ const (
|
||||
)
|
||||
|
||||
// AllContainers specifies that all containers be visited
|
||||
const AllContainers ContainerType = (InitContainers | Containers | EphemeralContainers)
|
||||
const AllContainers ContainerType = InitContainers | Containers | EphemeralContainers
|
||||
|
||||
// AllFeatureEnabledContainers returns a ContainerType mask which includes all container
|
||||
// types except for the ones guarded by feature gate.
|
||||
@ -188,6 +188,7 @@ func VisitPodSecretNames(pod *v1.Pod, visitor Visitor) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// visitContainerSecretNames returns true unless the visitor returned false when invoked with a secret reference
|
||||
func visitContainerSecretNames(container *v1.Container, visitor Visitor) bool {
|
||||
for _, env := range container.EnvFrom {
|
||||
if env.SecretRef != nil {
|
||||
@ -236,6 +237,7 @@ func VisitPodConfigmapNames(pod *v1.Pod, visitor Visitor) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// visitContainerConfigmapNames returns true unless the visitor returned false when invoked with a configmap reference
|
||||
func visitContainerConfigmapNames(container *v1.Container, visitor Visitor) bool {
|
||||
for _, env := range container.EnvFrom {
|
||||
if env.ConfigMapRef != nil {
|
||||
@ -300,7 +302,7 @@ func IsPodTerminal(pod *v1.Pod) bool {
|
||||
return IsPodPhaseTerminal(pod.Status.Phase)
|
||||
}
|
||||
|
||||
// IsPhaseTerminal returns true if the pod's phase is terminal.
|
||||
// IsPodPhaseTerminal returns true if the pod's phase is terminal.
|
||||
func IsPodPhaseTerminal(phase v1.PodPhase) bool {
|
||||
return phase == v1.PodFailed || phase == v1.PodSucceeded
|
||||
}
|
||||
|
31
vendor/k8s.io/kubernetes/pkg/apis/apps/types.go
generated
vendored
31
vendor/k8s.io/kubernetes/pkg/apis/apps/types.go
generated
vendored
@ -143,6 +143,21 @@ type StatefulSetPersistentVolumeClaimRetentionPolicy struct {
|
||||
WhenScaled PersistentVolumeClaimRetentionPolicyType
|
||||
}
|
||||
|
||||
// StatefulSetOrdinals describes the policy used for replica ordinal assignment
|
||||
// in this StatefulSet.
|
||||
type StatefulSetOrdinals struct {
|
||||
// start is the number representing the first replica's index. It may be used
|
||||
// to number replicas from an alternate index (eg: 1-indexed) over the default
|
||||
// 0-indexed names, or to orchestrate progressive movement of replicas from
|
||||
// one StatefulSet to another.
|
||||
// If set, replica indices will be in the range:
|
||||
// [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).
|
||||
// If unset, defaults to 0. Replica indices will be in the range:
|
||||
// [0, .spec.replicas).
|
||||
// +optional
|
||||
Start int32
|
||||
}
|
||||
|
||||
// A StatefulSetSpec is the specification of a StatefulSet.
|
||||
type StatefulSetSpec struct {
|
||||
// Replicas is the desired number of replicas of the given Template.
|
||||
@ -162,7 +177,9 @@ type StatefulSetSpec struct {
|
||||
// Template is the object that describes the pod that will be created if
|
||||
// insufficient replicas are detected. Each pod stamped out by the StatefulSet
|
||||
// will fulfill this Template, but have a unique identity from the rest
|
||||
// of the StatefulSet.
|
||||
// of the StatefulSet. Each pod will be named with the format
|
||||
// <statefulsetname>-<podindex>. For example, a pod in a StatefulSet named
|
||||
// "web" with index number "3" would be named "web-3".
|
||||
Template api.PodTemplateSpec
|
||||
|
||||
// VolumeClaimTemplates is a list of claims that pods are allowed to reference.
|
||||
@ -215,6 +232,14 @@ type StatefulSetSpec struct {
|
||||
// StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.
|
||||
// +optional
|
||||
PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy
|
||||
|
||||
// ordinals controls the numbering of replica indices in a StatefulSet. The
|
||||
// default ordinals behavior assigns a "0" index to the first replica and
|
||||
// increments the index by one for each additional replica requested. Using
|
||||
// the ordinals field requires the StatefulSetStartOrdinal feature gate to be
|
||||
// enabled, which is alpha.
|
||||
// +optional
|
||||
Ordinals *StatefulSetOrdinals
|
||||
}
|
||||
|
||||
// StatefulSetStatus represents the current state of a StatefulSet.
|
||||
@ -342,9 +367,7 @@ type Deployment struct {
|
||||
|
||||
// DeploymentSpec specifies the state of a Deployment.
|
||||
type DeploymentSpec struct {
|
||||
// Number of desired pods. This is a pointer to distinguish between explicit
|
||||
// zero and not specified. Defaults to 1.
|
||||
// +optional
|
||||
// Number of desired pods.
|
||||
Replicas int32
|
||||
|
||||
// Label selector for pods. Existing ReplicaSets whose pods are
|
||||
|
21
vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go
generated
vendored
21
vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go
generated
vendored
@ -718,6 +718,22 @@ func (in *StatefulSetList) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StatefulSetOrdinals) DeepCopyInto(out *StatefulSetOrdinals) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetOrdinals.
|
||||
func (in *StatefulSetOrdinals) DeepCopy() *StatefulSetOrdinals {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StatefulSetOrdinals)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StatefulSetPersistentVolumeClaimRetentionPolicy) DeepCopyInto(out *StatefulSetPersistentVolumeClaimRetentionPolicy) {
|
||||
*out = *in
|
||||
@ -761,6 +777,11 @@ func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) {
|
||||
*out = new(StatefulSetPersistentVolumeClaimRetentionPolicy)
|
||||
**out = **in
|
||||
}
|
||||
if in.Ordinals != nil {
|
||||
in, out := &in.Ordinals, &out.Ordinals
|
||||
*out = new(StatefulSetOrdinals)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
17
vendor/k8s.io/kubernetes/pkg/apis/batch/types.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/apis/batch/types.go
generated
vendored
@ -24,10 +24,13 @@ import (
|
||||
|
||||
// JobTrackingFinalizer is a finalizer for Job's pods. It prevents them from
|
||||
// being deleted before being accounted in the Job status.
|
||||
// The apiserver and job controller use this string as a Job annotation, to
|
||||
// mark Jobs that are being tracked using pod finalizers. Two releases after
|
||||
// the JobTrackingWithFinalizers graduates to GA, JobTrackingFinalizer will
|
||||
// no longer be used as a Job annotation.
|
||||
//
|
||||
// Additionally, the apiserver and job controller use this string as a Job
|
||||
// annotation, to mark Jobs that are being tracked using pod finalizers.
|
||||
// However, this behavior is deprecated in kubernetes 1.26. This means that, in
|
||||
// 1.27+, one release after JobTrackingWithFinalizers graduates to GA, the
|
||||
// apiserver and job controller will ignore this annotation and they will
|
||||
// always track jobs using finalizers.
|
||||
const JobTrackingFinalizer = "batch.kubernetes.io/job-tracking"
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@ -405,9 +408,6 @@ type JobStatus struct {
|
||||
// (3) Remove the pod UID from the array while increasing the corresponding
|
||||
// counter.
|
||||
//
|
||||
// This field is beta-level. The job controller only makes use of this field
|
||||
// when the feature gate JobTrackingWithFinalizers is enabled (enabled
|
||||
// by default).
|
||||
// Old jobs might not be tracked using this field, in which case the field
|
||||
// remains null.
|
||||
// +optional
|
||||
@ -440,8 +440,7 @@ const (
|
||||
// JobFailed means the job has failed its execution.
|
||||
JobFailed JobConditionType = "Failed"
|
||||
// FailureTarget means the job is about to fail its execution.
|
||||
// The constant is to be renamed once the name is accepted within the KEP-3329.
|
||||
AlphaNoCompatGuaranteeJobFailureTarget JobConditionType = "FailureTarget"
|
||||
JobFailureTarget JobConditionType = "FailureTarget"
|
||||
)
|
||||
|
||||
// JobCondition describes current state of a job.
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go
generated
vendored
@ -175,7 +175,7 @@ func IsExtendedResourceName(name core.ResourceName) bool {
|
||||
}
|
||||
// Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name
|
||||
nameForQuota := fmt.Sprintf("%s%s", core.DefaultResourceRequestsPrefix, string(name))
|
||||
if errs := validation.IsQualifiedName(string(nameForQuota)); len(errs) != 0 {
|
||||
if errs := validation.IsQualifiedName(nameForQuota); len(errs) != 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
166
vendor/k8s.io/kubernetes/pkg/apis/core/types.go
generated
vendored
166
vendor/k8s.io/kubernetes/pkg/apis/core/types.go
generated
vendored
@ -291,7 +291,7 @@ type PersistentVolume struct {
|
||||
// +optional
|
||||
metav1.ObjectMeta
|
||||
|
||||
//Spec defines a persistent volume owned by the cluster
|
||||
// Spec defines a persistent volume owned by the cluster
|
||||
// +optional
|
||||
Spec PersistentVolumeSpec
|
||||
|
||||
@ -452,29 +452,54 @@ type PersistentVolumeClaimSpec struct {
|
||||
// * An existing PVC (PersistentVolumeClaim)
|
||||
// If the provisioner or an external controller can support the specified data source,
|
||||
// it will create a new volume based on the contents of the specified data source.
|
||||
// If the AnyVolumeDataSource feature gate is enabled, this field will always have
|
||||
// the same contents as the DataSourceRef field.
|
||||
// When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
|
||||
// and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
|
||||
// If the namespace is specified, then dataSourceRef will not be copied to dataSource.
|
||||
// +optional
|
||||
DataSource *TypedLocalObjectReference
|
||||
// Specifies the object from which to populate the volume with data, if a non-empty
|
||||
// volume is desired. This may be any local object from a non-empty API group (non
|
||||
// volume is desired. This may be any object from a non-empty API group (non
|
||||
// core object) or a PersistentVolumeClaim object.
|
||||
// When this field is specified, volume binding will only succeed if the type of
|
||||
// the specified object matches some installed volume populator or dynamic
|
||||
// provisioner.
|
||||
// This field will replace the functionality of the DataSource field and as such
|
||||
// This field will replace the functionality of the dataSource field and as such
|
||||
// if both fields are non-empty, they must have the same value. For backwards
|
||||
// compatibility, both fields (DataSource and DataSourceRef) will be set to the same
|
||||
// compatibility, when namespace isn't specified in dataSourceRef,
|
||||
// both fields (dataSource and dataSourceRef) will be set to the same
|
||||
// value automatically if one of them is empty and the other is non-empty.
|
||||
// There are two important differences between DataSource and DataSourceRef:
|
||||
// * While DataSource only allows two specific types of objects, DataSourceRef
|
||||
// When namespace is specified in dataSourceRef,
|
||||
// dataSource isn't set to the same value and must be empty.
|
||||
// There are three important differences between dataSource and dataSourceRef:
|
||||
// * While dataSource only allows two specific types of objects, dataSourceRef
|
||||
// allows any non-core object, as well as PersistentVolumeClaim objects.
|
||||
// * While DataSource ignores disallowed values (dropping them), DataSourceRef
|
||||
// * While dataSource ignores disallowed values (dropping them), dataSourceRef
|
||||
// preserves all values, and generates an error if a disallowed value is
|
||||
// specified.
|
||||
// * While dataSource only allows local objects, dataSourceRef allows objects
|
||||
// in any namespaces.
|
||||
// (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
|
||||
// (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
|
||||
// +optional
|
||||
DataSourceRef *TypedLocalObjectReference
|
||||
DataSourceRef *TypedObjectReference
|
||||
}
|
||||
|
||||
type TypedObjectReference struct {
|
||||
// APIGroup is the group for the resource being referenced.
|
||||
// If APIGroup is not specified, the specified Kind must be in the core API group.
|
||||
// For any other third-party types, APIGroup is required.
|
||||
// +optional
|
||||
APIGroup *string
|
||||
// Kind is the type of resource being referenced
|
||||
Kind string
|
||||
// Name is the name of resource being referenced
|
||||
Name string
|
||||
// Namespace is the namespace of resource being referenced
|
||||
// Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
|
||||
// (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
|
||||
// +featureGate=CrossNamespaceVolumeDataSource
|
||||
// +optional
|
||||
Namespace *string
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimConditionType defines the condition of PV claim.
|
||||
@ -1977,10 +2002,10 @@ type EnvFromSource struct {
|
||||
// +optional
|
||||
Prefix string
|
||||
// The ConfigMap to select from.
|
||||
//+optional
|
||||
// +optional
|
||||
ConfigMapRef *ConfigMapEnvSource
|
||||
// The Secret to select from.
|
||||
//+optional
|
||||
// +optional
|
||||
SecretRef *SecretEnvSource
|
||||
}
|
||||
|
||||
@ -2160,6 +2185,25 @@ type ResourceRequirements struct {
|
||||
// otherwise to an implementation-defined value
|
||||
// +optional
|
||||
Requests ResourceList
|
||||
// Claims lists the names of resources, defined in spec.resourceClaims,
|
||||
// that are used by this container.
|
||||
//
|
||||
// This is an alpha field and requires enabling the
|
||||
// DynamicResourceAllocation feature gate.
|
||||
//
|
||||
// This field is immutable.
|
||||
//
|
||||
// +featureGate=DynamicResourceAllocation
|
||||
// +optional
|
||||
Claims []ResourceClaim
|
||||
}
|
||||
|
||||
// ResourceClaim references one entry in PodSpec.ResourceClaims.
|
||||
type ResourceClaim struct {
|
||||
// Name must match the name of one entry in pod.spec.resourceClaims of
|
||||
// the Pod where this field is used. It makes that resource available
|
||||
// inside a container.
|
||||
Name string
|
||||
}
|
||||
|
||||
// Container represents a single container that is expected to be run on the host.
|
||||
@ -2428,12 +2472,14 @@ const (
|
||||
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
|
||||
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
|
||||
PodReasonUnschedulable = "Unschedulable"
|
||||
// PodReasonSchedulingGated reason in PodScheduled PodCondition means that the scheduler
|
||||
// skips scheduling the pod because one or more scheduling gates are still present.
|
||||
PodReasonSchedulingGated = "SchedulingGated"
|
||||
// ContainersReady indicates whether all containers in the pod are ready.
|
||||
ContainersReady PodConditionType = "ContainersReady"
|
||||
// AlphaNoCompatGuaranteeDisruptionTarget indicates the pod is about to be deleted due to a
|
||||
// DisruptionTarget indicates the pod is about to be terminated due to a
|
||||
// disruption (such as preemption, eviction API or garbage-collection).
|
||||
// The constant is to be renamed once the name is accepted within the KEP-3329.
|
||||
AlphaNoCompatGuaranteeDisruptionTarget PodConditionType = "DisruptionTarget"
|
||||
DisruptionTarget PodConditionType = "DisruptionTarget"
|
||||
)
|
||||
|
||||
// PodCondition represents pod's condition
|
||||
@ -2502,7 +2548,7 @@ const (
|
||||
// over a set of nodes; that is, it represents the OR of the selectors represented
|
||||
// by the node selector terms.
|
||||
type NodeSelector struct {
|
||||
//Required. A list of node selector terms. The terms are ORed.
|
||||
// Required. A list of node selector terms. The terms are ORed.
|
||||
NodeSelectorTerms []NodeSelectorTerm
|
||||
}
|
||||
|
||||
@ -2997,6 +3043,68 @@ type PodSpec struct {
|
||||
// - spec.containers[*].securityContext.runAsGroup
|
||||
// +optional
|
||||
OS *PodOS
|
||||
|
||||
// SchedulingGates is an opaque list of values that if specified will block scheduling the pod.
|
||||
// More info: https://git.k8s.io/enhancements/keps/sig-scheduling/3521-pod-scheduling-readiness.
|
||||
//
|
||||
// This is an alpha-level feature enabled by PodSchedulingReadiness feature gate.
|
||||
// +optional
|
||||
SchedulingGates []PodSchedulingGate
|
||||
// ResourceClaims defines which ResourceClaims must be allocated
|
||||
// and reserved before the Pod is allowed to start. The resources
|
||||
// will be made available to those containers which consume them
|
||||
// by name.
|
||||
//
|
||||
// This is an alpha field and requires enabling the
|
||||
// DynamicResourceAllocation feature gate.
|
||||
//
|
||||
// This field is immutable.
|
||||
//
|
||||
// +featureGate=DynamicResourceAllocation
|
||||
// +optional
|
||||
ResourceClaims []PodResourceClaim
|
||||
}
|
||||
|
||||
// PodResourceClaim references exactly one ResourceClaim through a ClaimSource.
|
||||
// It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
|
||||
// Containers that need access to the ResourceClaim reference it with this name.
|
||||
type PodResourceClaim struct {
|
||||
// Name uniquely identifies this resource claim inside the pod.
|
||||
// This must be a DNS_LABEL.
|
||||
Name string
|
||||
|
||||
// Source describes where to find the ResourceClaim.
|
||||
Source ClaimSource
|
||||
}
|
||||
|
||||
// ClaimSource describes a reference to a ResourceClaim.
|
||||
//
|
||||
// Exactly one of these fields should be set. Consumers of this type must
|
||||
// treat an empty object as if it has an unknown value.
|
||||
type ClaimSource struct {
|
||||
// ResourceClaimName is the name of a ResourceClaim object in the same
|
||||
// namespace as this pod.
|
||||
ResourceClaimName *string
|
||||
|
||||
// ResourceClaimTemplateName is the name of a ResourceClaimTemplate
|
||||
// object in the same namespace as this pod.
|
||||
//
|
||||
// The template will be used to create a new ResourceClaim, which will
|
||||
// be bound to this pod. When this pod is deleted, the ResourceClaim
|
||||
// will also be deleted. The name of the ResourceClaim will be <pod
|
||||
// name>-<resource name>, where <resource name> is the
|
||||
// PodResourceClaim.Name. Pod validation will reject the pod if the
|
||||
// concatenated name is not valid for a ResourceClaim (e.g. too long).
|
||||
//
|
||||
// An existing ResourceClaim with that name that is not owned by the
|
||||
// pod will not be used for the pod to avoid using an unrelated
|
||||
// resource by mistake. Scheduling and pod startup are then blocked
|
||||
// until the unrelated ResourceClaim is removed.
|
||||
//
|
||||
// This field is immutable and no changes will be made to the
|
||||
// corresponding ResourceClaim by the control plane after creating the
|
||||
// ResourceClaim.
|
||||
ResourceClaimTemplateName *string
|
||||
}
|
||||
|
||||
// OSName is the set of OS'es that can be used in OS.
|
||||
@ -3017,6 +3125,13 @@ type PodOS struct {
|
||||
Name OSName
|
||||
}
|
||||
|
||||
// PodSchedulingGate is associated to a Pod to guard its scheduling.
|
||||
type PodSchedulingGate struct {
|
||||
// Name of the scheduling gate.
|
||||
// Each scheduling gate must have a unique name field.
|
||||
Name string
|
||||
}
|
||||
|
||||
// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
|
||||
// pod's hosts file.
|
||||
type HostAlias struct {
|
||||
@ -3131,8 +3246,11 @@ type PodSecurityContext struct {
|
||||
// +optional
|
||||
RunAsNonRoot *bool
|
||||
// A list of groups applied to the first process run in each container, in addition
|
||||
// to the container's primary GID. If unspecified, no groups will be added to
|
||||
// any container.
|
||||
// to the container's primary GID, the fsGroup (if specified), and group memberships
|
||||
// defined in the container image for the uid of the container process. If unspecified,
|
||||
// no additional groups are added to any container. Note that group memberships
|
||||
// defined in the container image for the uid of the container process are still effective,
|
||||
// even if they are not included in this list.
|
||||
// Note that this field cannot be set when spec.os.name is windows.
|
||||
// +optional
|
||||
SupplementalGroups []int64
|
||||
@ -3494,7 +3612,7 @@ type ReplicationControllerSpec struct {
|
||||
// insufficient replicas are detected. This reference is ignored if a Template is set.
|
||||
// Must be set before converting to a versioned API object
|
||||
// +optional
|
||||
//TemplateRef *ObjectReference
|
||||
// TemplateRef *ObjectReference
|
||||
|
||||
// Template is the object that describes the pod that will be created if
|
||||
// insufficient replicas are detected. Internally, this takes precedence over a
|
||||
@ -3936,7 +4054,6 @@ type ServiceSpec struct {
|
||||
// implementation (e.g. cloud providers) should ignore Services that set this field.
|
||||
// This field can only be set when creating or updating a Service to type 'LoadBalancer'.
|
||||
// Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
|
||||
// +featureGate=LoadBalancerClass
|
||||
// +optional
|
||||
LoadBalancerClass *string
|
||||
|
||||
@ -3946,7 +4063,6 @@ type ServiceSpec struct {
|
||||
// dropping the traffic if there are no local endpoints. The default value,
|
||||
// "Cluster", uses the standard behavior of routing to all endpoints evenly
|
||||
// (possibly modified by topology and other features).
|
||||
// +featureGate=ServiceInternalTrafficPolicy
|
||||
// +optional
|
||||
InternalTrafficPolicy *ServiceInternalTrafficPolicyType
|
||||
}
|
||||
@ -4161,7 +4277,7 @@ type NodeSpec struct {
|
||||
// +optional
|
||||
Taints []Taint
|
||||
|
||||
// Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed from Kubelets as of 1.24 and will be fully removed in 1.26.
|
||||
// Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed.
|
||||
// +optional
|
||||
ConfigSource *NodeConfigSource
|
||||
|
||||
@ -4831,7 +4947,7 @@ type ObjectReference struct {
|
||||
|
||||
// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.
|
||||
type LocalObjectReference struct {
|
||||
//TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
// TODO: Add other useful fields. apiVersion, kind, uid?
|
||||
Name string
|
||||
}
|
||||
|
||||
@ -5737,7 +5853,7 @@ type TopologySpreadConstraint struct {
|
||||
// - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
|
||||
//
|
||||
// If this value is nil, the behavior is equivalent to the Honor policy.
|
||||
// This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
// +optional
|
||||
NodeAffinityPolicy *NodeInclusionPolicy
|
||||
// NodeTaintsPolicy indicates how we will treat node taints when calculating
|
||||
@ -5747,7 +5863,7 @@ type TopologySpreadConstraint struct {
|
||||
// - Ignore: node taints are ignored. All nodes are included.
|
||||
//
|
||||
// If this value is nil, the behavior is equivalent to the Ignore policy.
|
||||
// This is a alpha-level feature enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
|
||||
// +optional
|
||||
NodeTaintsPolicy *NodeInclusionPolicy
|
||||
// MatchLabelKeys is a set of pod label keys to select the pods over which
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go
generated
vendored
@ -22,8 +22,6 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/parsers"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
@ -130,14 +128,11 @@ func SetDefaults_Service(obj *v1.Service) {
|
||||
obj.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ServiceInternalTrafficPolicy) {
|
||||
if obj.Spec.InternalTrafficPolicy == nil {
|
||||
if obj.Spec.Type == v1.ServiceTypeNodePort || obj.Spec.Type == v1.ServiceTypeLoadBalancer || obj.Spec.Type == v1.ServiceTypeClusterIP {
|
||||
serviceInternalTrafficPolicyCluster := v1.ServiceInternalTrafficPolicyCluster
|
||||
obj.Spec.InternalTrafficPolicy = &serviceInternalTrafficPolicyCluster
|
||||
}
|
||||
if obj.Spec.InternalTrafficPolicy == nil {
|
||||
if obj.Spec.Type == v1.ServiceTypeNodePort || obj.Spec.Type == v1.ServiceTypeLoadBalancer || obj.Spec.Type == v1.ServiceTypeClusterIP {
|
||||
serviceInternalTrafficPolicyCluster := v1.ServiceInternalTrafficPolicyCluster
|
||||
obj.Spec.InternalTrafficPolicy = &serviceInternalTrafficPolicyCluster
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if obj.Spec.Type == v1.ServiceTypeLoadBalancer {
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go
generated
vendored
@ -39,7 +39,7 @@ func IsExtendedResourceName(name v1.ResourceName) bool {
|
||||
}
|
||||
// Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name
|
||||
nameForQuota := fmt.Sprintf("%s%s", v1.DefaultResourceRequestsPrefix, string(name))
|
||||
if errs := validation.IsQualifiedName(string(nameForQuota)); len(errs) != 0 {
|
||||
if errs := validation.IsQualifiedName(nameForQuota); len(errs) != 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
174
vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go
generated
vendored
174
vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go
generated
vendored
@ -192,6 +192,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.ClaimSource)(nil), (*core.ClaimSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_ClaimSource_To_core_ClaimSource(a.(*v1.ClaimSource), b.(*core.ClaimSource), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.ClaimSource)(nil), (*v1.ClaimSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_ClaimSource_To_v1_ClaimSource(a.(*core.ClaimSource), b.(*v1.ClaimSource), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.ClientIPConfig)(nil), (*core.ClientIPConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_ClientIPConfig_To_core_ClientIPConfig(a.(*v1.ClientIPConfig), b.(*core.ClientIPConfig), scope)
|
||||
}); err != nil {
|
||||
@ -1352,6 +1362,26 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.PodResourceClaim)(nil), (*core.PodResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_PodResourceClaim_To_core_PodResourceClaim(a.(*v1.PodResourceClaim), b.(*core.PodResourceClaim), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.PodResourceClaim)(nil), (*v1.PodResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_PodResourceClaim_To_v1_PodResourceClaim(a.(*core.PodResourceClaim), b.(*v1.PodResourceClaim), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.PodSchedulingGate)(nil), (*core.PodSchedulingGate)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_PodSchedulingGate_To_core_PodSchedulingGate(a.(*v1.PodSchedulingGate), b.(*core.PodSchedulingGate), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.PodSchedulingGate)(nil), (*v1.PodSchedulingGate)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_PodSchedulingGate_To_v1_PodSchedulingGate(a.(*core.PodSchedulingGate), b.(*v1.PodSchedulingGate), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.PodSecurityContext)(nil), (*core.PodSecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_PodSecurityContext_To_core_PodSecurityContext(a.(*v1.PodSecurityContext), b.(*core.PodSecurityContext), scope)
|
||||
}); err != nil {
|
||||
@ -1562,6 +1592,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.ResourceClaim)(nil), (*core.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_ResourceClaim_To_core_ResourceClaim(a.(*v1.ResourceClaim), b.(*core.ResourceClaim), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.ResourceClaim)(nil), (*v1.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_ResourceClaim_To_v1_ResourceClaim(a.(*core.ResourceClaim), b.(*v1.ResourceClaim), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.ResourceFieldSelector)(nil), (*core.ResourceFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(a.(*v1.ResourceFieldSelector), b.(*core.ResourceFieldSelector), scope)
|
||||
}); err != nil {
|
||||
@ -1967,6 +2007,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.TypedObjectReference)(nil), (*core.TypedObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_TypedObjectReference_To_core_TypedObjectReference(a.(*v1.TypedObjectReference), b.(*core.TypedObjectReference), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.TypedObjectReference)(nil), (*v1.TypedObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_TypedObjectReference_To_v1_TypedObjectReference(a.(*core.TypedObjectReference), b.(*v1.TypedObjectReference), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.VolumeDevice)(nil), (*core.VolumeDevice)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_VolumeDevice_To_core_VolumeDevice(a.(*v1.VolumeDevice), b.(*core.VolumeDevice), scope)
|
||||
}); err != nil {
|
||||
@ -2613,6 +2663,28 @@ func Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVol
|
||||
return autoConvert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_ClaimSource_To_core_ClaimSource(in *v1.ClaimSource, out *core.ClaimSource, s conversion.Scope) error {
|
||||
out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName))
|
||||
out.ResourceClaimTemplateName = (*string)(unsafe.Pointer(in.ResourceClaimTemplateName))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_ClaimSource_To_core_ClaimSource is an autogenerated conversion function.
|
||||
func Convert_v1_ClaimSource_To_core_ClaimSource(in *v1.ClaimSource, out *core.ClaimSource, s conversion.Scope) error {
|
||||
return autoConvert_v1_ClaimSource_To_core_ClaimSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_ClaimSource_To_v1_ClaimSource(in *core.ClaimSource, out *v1.ClaimSource, s conversion.Scope) error {
|
||||
out.ResourceClaimName = (*string)(unsafe.Pointer(in.ResourceClaimName))
|
||||
out.ResourceClaimTemplateName = (*string)(unsafe.Pointer(in.ResourceClaimTemplateName))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_ClaimSource_To_v1_ClaimSource is an autogenerated conversion function.
|
||||
func Convert_core_ClaimSource_To_v1_ClaimSource(in *core.ClaimSource, out *v1.ClaimSource, s conversion.Scope) error {
|
||||
return autoConvert_core_ClaimSource_To_v1_ClaimSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_ClientIPConfig_To_core_ClientIPConfig(in *v1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error {
|
||||
out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds))
|
||||
return nil
|
||||
@ -5172,7 +5244,7 @@ func autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(
|
||||
out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName))
|
||||
out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode))
|
||||
out.DataSource = (*core.TypedLocalObjectReference)(unsafe.Pointer(in.DataSource))
|
||||
out.DataSourceRef = (*core.TypedLocalObjectReference)(unsafe.Pointer(in.DataSourceRef))
|
||||
out.DataSourceRef = (*core.TypedObjectReference)(unsafe.Pointer(in.DataSourceRef))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -5191,7 +5263,7 @@ func autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(
|
||||
out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName))
|
||||
out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode))
|
||||
out.DataSource = (*v1.TypedLocalObjectReference)(unsafe.Pointer(in.DataSource))
|
||||
out.DataSourceRef = (*v1.TypedLocalObjectReference)(unsafe.Pointer(in.DataSourceRef))
|
||||
out.DataSourceRef = (*v1.TypedObjectReference)(unsafe.Pointer(in.DataSourceRef))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -6069,6 +6141,52 @@ func Convert_core_PodReadinessGate_To_v1_PodReadinessGate(in *core.PodReadinessG
|
||||
return autoConvert_core_PodReadinessGate_To_v1_PodReadinessGate(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_PodResourceClaim_To_core_PodResourceClaim(in *v1.PodResourceClaim, out *core.PodResourceClaim, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
if err := Convert_v1_ClaimSource_To_core_ClaimSource(&in.Source, &out.Source, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_PodResourceClaim_To_core_PodResourceClaim is an autogenerated conversion function.
|
||||
func Convert_v1_PodResourceClaim_To_core_PodResourceClaim(in *v1.PodResourceClaim, out *core.PodResourceClaim, s conversion.Scope) error {
|
||||
return autoConvert_v1_PodResourceClaim_To_core_PodResourceClaim(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_PodResourceClaim_To_v1_PodResourceClaim(in *core.PodResourceClaim, out *v1.PodResourceClaim, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
if err := Convert_core_ClaimSource_To_v1_ClaimSource(&in.Source, &out.Source, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_PodResourceClaim_To_v1_PodResourceClaim is an autogenerated conversion function.
|
||||
func Convert_core_PodResourceClaim_To_v1_PodResourceClaim(in *core.PodResourceClaim, out *v1.PodResourceClaim, s conversion.Scope) error {
|
||||
return autoConvert_core_PodResourceClaim_To_v1_PodResourceClaim(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_PodSchedulingGate_To_core_PodSchedulingGate(in *v1.PodSchedulingGate, out *core.PodSchedulingGate, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_PodSchedulingGate_To_core_PodSchedulingGate is an autogenerated conversion function.
|
||||
func Convert_v1_PodSchedulingGate_To_core_PodSchedulingGate(in *v1.PodSchedulingGate, out *core.PodSchedulingGate, s conversion.Scope) error {
|
||||
return autoConvert_v1_PodSchedulingGate_To_core_PodSchedulingGate(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_PodSchedulingGate_To_v1_PodSchedulingGate(in *core.PodSchedulingGate, out *v1.PodSchedulingGate, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_PodSchedulingGate_To_v1_PodSchedulingGate is an autogenerated conversion function.
|
||||
func Convert_core_PodSchedulingGate_To_v1_PodSchedulingGate(in *core.PodSchedulingGate, out *v1.PodSchedulingGate, s conversion.Scope) error {
|
||||
return autoConvert_core_PodSchedulingGate_To_v1_PodSchedulingGate(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_PodSecurityContext_To_core_PodSecurityContext(in *v1.PodSecurityContext, out *core.PodSecurityContext, s conversion.Scope) error {
|
||||
out.SELinuxOptions = (*core.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions))
|
||||
out.WindowsOptions = (*core.WindowsSecurityContextOptions)(unsafe.Pointer(in.WindowsOptions))
|
||||
@ -6188,6 +6306,8 @@ func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s
|
||||
out.SetHostnameAsFQDN = (*bool)(unsafe.Pointer(in.SetHostnameAsFQDN))
|
||||
out.OS = (*core.PodOS)(unsafe.Pointer(in.OS))
|
||||
// INFO: in.HostUsers opted out of conversion generation
|
||||
out.SchedulingGates = *(*[]core.PodSchedulingGate)(unsafe.Pointer(&in.SchedulingGates))
|
||||
out.ResourceClaims = *(*[]core.PodResourceClaim)(unsafe.Pointer(&in.ResourceClaims))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -6241,6 +6361,8 @@ func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s
|
||||
out.EnableServiceLinks = (*bool)(unsafe.Pointer(in.EnableServiceLinks))
|
||||
out.TopologySpreadConstraints = *(*[]v1.TopologySpreadConstraint)(unsafe.Pointer(&in.TopologySpreadConstraints))
|
||||
out.OS = (*v1.PodOS)(unsafe.Pointer(in.OS))
|
||||
out.SchedulingGates = *(*[]v1.PodSchedulingGate)(unsafe.Pointer(&in.SchedulingGates))
|
||||
out.ResourceClaims = *(*[]v1.PodResourceClaim)(unsafe.Pointer(&in.ResourceClaims))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -6905,6 +7027,26 @@ func Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(
|
||||
return autoConvert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_ResourceClaim_To_core_ResourceClaim(in *v1.ResourceClaim, out *core.ResourceClaim, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_ResourceClaim_To_core_ResourceClaim is an autogenerated conversion function.
|
||||
func Convert_v1_ResourceClaim_To_core_ResourceClaim(in *v1.ResourceClaim, out *core.ResourceClaim, s conversion.Scope) error {
|
||||
return autoConvert_v1_ResourceClaim_To_core_ResourceClaim(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_ResourceClaim_To_v1_ResourceClaim(in *core.ResourceClaim, out *v1.ResourceClaim, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_ResourceClaim_To_v1_ResourceClaim is an autogenerated conversion function.
|
||||
func Convert_core_ResourceClaim_To_v1_ResourceClaim(in *core.ResourceClaim, out *v1.ResourceClaim, s conversion.Scope) error {
|
||||
return autoConvert_core_ResourceClaim_To_v1_ResourceClaim(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *core.ResourceFieldSelector, s conversion.Scope) error {
|
||||
out.ContainerName = in.ContainerName
|
||||
out.Resource = in.Resource
|
||||
@ -7032,6 +7174,7 @@ func Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *core.Resourc
|
||||
func autoConvert_v1_ResourceRequirements_To_core_ResourceRequirements(in *v1.ResourceRequirements, out *core.ResourceRequirements, s conversion.Scope) error {
|
||||
out.Limits = *(*core.ResourceList)(unsafe.Pointer(&in.Limits))
|
||||
out.Requests = *(*core.ResourceList)(unsafe.Pointer(&in.Requests))
|
||||
out.Claims = *(*[]core.ResourceClaim)(unsafe.Pointer(&in.Claims))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -7043,6 +7186,7 @@ func Convert_v1_ResourceRequirements_To_core_ResourceRequirements(in *v1.Resourc
|
||||
func autoConvert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error {
|
||||
out.Limits = *(*v1.ResourceList)(unsafe.Pointer(&in.Limits))
|
||||
out.Requests = *(*v1.ResourceList)(unsafe.Pointer(&in.Requests))
|
||||
out.Claims = *(*[]v1.ResourceClaim)(unsafe.Pointer(&in.Claims))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -8051,6 +8195,32 @@ func Convert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(in *
|
||||
return autoConvert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_TypedObjectReference_To_core_TypedObjectReference(in *v1.TypedObjectReference, out *core.TypedObjectReference, s conversion.Scope) error {
|
||||
out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup))
|
||||
out.Kind = in.Kind
|
||||
out.Name = in.Name
|
||||
out.Namespace = (*string)(unsafe.Pointer(in.Namespace))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_TypedObjectReference_To_core_TypedObjectReference is an autogenerated conversion function.
|
||||
func Convert_v1_TypedObjectReference_To_core_TypedObjectReference(in *v1.TypedObjectReference, out *core.TypedObjectReference, s conversion.Scope) error {
|
||||
return autoConvert_v1_TypedObjectReference_To_core_TypedObjectReference(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_TypedObjectReference_To_v1_TypedObjectReference(in *core.TypedObjectReference, out *v1.TypedObjectReference, s conversion.Scope) error {
|
||||
out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup))
|
||||
out.Kind = in.Kind
|
||||
out.Name = in.Name
|
||||
out.Namespace = (*string)(unsafe.Pointer(in.Namespace))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_TypedObjectReference_To_v1_TypedObjectReference is an autogenerated conversion function.
|
||||
func Convert_core_TypedObjectReference_To_v1_TypedObjectReference(in *core.TypedObjectReference, out *v1.TypedObjectReference, s conversion.Scope) error {
|
||||
return autoConvert_core_TypedObjectReference_To_v1_TypedObjectReference(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
if err := Convert_v1_VolumeSource_To_core_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil {
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/apis/core/validation/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/core/validation/OWNERS
generated
vendored
@ -20,4 +20,3 @@ reviewers:
|
||||
- soltysh
|
||||
- jsafrane
|
||||
- dims
|
||||
- fejta
|
||||
|
61
vendor/k8s.io/kubernetes/pkg/apis/core/validation/conditional_validation.go
generated
vendored
61
vendor/k8s.io/kubernetes/pkg/apis/core/validation/conditional_validation.go
generated
vendored
@ -1,61 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// ValidateConditionalService validates conditionally valid fields.
|
||||
func ValidateConditionalService(service, oldService *api.Service) field.ErrorList {
|
||||
var errs field.ErrorList
|
||||
|
||||
errs = append(errs, validateMixedProtocolLBService(service, oldService)...)
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
// validateMixedProtocolLBService checks if the old Service has type=LoadBalancer and whether the Service has different Protocols
|
||||
// on its ports. If the MixedProtocolLBService feature flag is disabled the usage of different Protocols in the new Service is
|
||||
// valid only if the old Service has different Protocols, too.
|
||||
func validateMixedProtocolLBService(service, oldService *api.Service) (errs field.ErrorList) {
|
||||
if service.Spec.Type != api.ServiceTypeLoadBalancer {
|
||||
return
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.MixedProtocolLBService) {
|
||||
return
|
||||
}
|
||||
|
||||
if serviceHasMixedProtocols(service) && !serviceHasMixedProtocols(oldService) {
|
||||
errs = append(errs, field.Invalid(field.NewPath("spec", "ports"), service.Spec.Ports, "may not contain more than 1 protocol when type is 'LoadBalancer'"))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func serviceHasMixedProtocols(service *api.Service) bool {
|
||||
if service == nil {
|
||||
return false
|
||||
}
|
||||
protos := map[string]bool{}
|
||||
for _, port := range service.Spec.Ports {
|
||||
protos[string(port.Protocol)] = true
|
||||
}
|
||||
return len(protos) > 1
|
||||
}
|
353
vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
generated
vendored
353
vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
generated
vendored
@ -309,7 +309,7 @@ func ValidateRuntimeClassName(name string, fldPath *field.Path) field.ErrorList
|
||||
// validateOverhead can be used to check whether the given Overhead is valid.
|
||||
func validateOverhead(overhead core.ResourceList, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
// reuse the ResourceRequirements validation logic
|
||||
return ValidateResourceRequirements(&core.ResourceRequirements{Limits: overhead}, fldPath, opts)
|
||||
return ValidateResourceRequirements(&core.ResourceRequirements{Limits: overhead}, nil, fldPath, opts)
|
||||
}
|
||||
|
||||
// Validates that given value is not negative.
|
||||
@ -1013,7 +1013,7 @@ func validateGlusterfsPersistentVolumeSource(glusterfs *core.GlusterfsPersistent
|
||||
func validateFlockerVolumeSource(flocker *core.FlockerVolumeSource, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if len(flocker.DatasetName) == 0 && len(flocker.DatasetUUID) == 0 {
|
||||
//TODO: consider adding a RequiredOneOf() error for this and similar cases
|
||||
// TODO: consider adding a RequiredOneOf() error for this and similar cases
|
||||
allErrs = append(allErrs, field.Required(fldPath, "one of datasetName and datasetUUID is required"))
|
||||
}
|
||||
if len(flocker.DatasetName) != 0 && len(flocker.DatasetUUID) != 0 {
|
||||
@ -1621,12 +1621,12 @@ func validateEphemeralVolumeSource(ephemeral *core.EphemeralVolumeSource, fldPat
|
||||
// ValidatePersistentVolumeClaimTemplate verifies that the embedded object meta and spec are valid.
|
||||
// Checking of the object data is very minimal because only labels and annotations are used.
|
||||
func ValidatePersistentVolumeClaimTemplate(claimTemplate *core.PersistentVolumeClaimTemplate, fldPath *field.Path, opts PersistentVolumeClaimSpecValidationOptions) field.ErrorList {
|
||||
allErrs := validatePersistentVolumeClaimTemplateObjectMeta(&claimTemplate.ObjectMeta, fldPath.Child("metadata"))
|
||||
allErrs := ValidateTemplateObjectMeta(&claimTemplate.ObjectMeta, fldPath.Child("metadata"))
|
||||
allErrs = append(allErrs, ValidatePersistentVolumeClaimSpec(&claimTemplate.Spec, fldPath.Child("spec"), opts)...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validatePersistentVolumeClaimTemplateObjectMeta(objMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {
|
||||
func ValidateTemplateObjectMeta(objMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := apimachineryvalidation.ValidateAnnotations(objMeta.Annotations, fldPath.Child("annotations"))
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(objMeta.Labels, fldPath.Child("labels"))...)
|
||||
// All other fields are not supported and thus must not be set
|
||||
@ -1634,11 +1634,11 @@ func validatePersistentVolumeClaimTemplateObjectMeta(objMeta *metav1.ObjectMeta,
|
||||
// but then adding a new one to ObjectMeta wouldn't be checked
|
||||
// unless this code gets updated. Instead, we ensure that
|
||||
// only allowed fields are set via reflection.
|
||||
allErrs = append(allErrs, validateFieldAllowList(*objMeta, allowedPVCTemplateObjectMetaFields, "cannot be set for an ephemeral volume", fldPath)...)
|
||||
allErrs = append(allErrs, validateFieldAllowList(*objMeta, allowedTemplateObjectMetaFields, "cannot be set", fldPath)...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
var allowedPVCTemplateObjectMetaFields = map[string]bool{
|
||||
var allowedTemplateObjectMetaFields = map[string]bool{
|
||||
"Annotations": true,
|
||||
"Labels": true,
|
||||
}
|
||||
@ -2029,6 +2029,8 @@ type PersistentVolumeClaimSpecValidationOptions struct {
|
||||
EnableRecoverFromExpansionFailure bool
|
||||
// Allow assigning StorageClass to unbound PVCs retroactively
|
||||
EnableRetroactiveDefaultStorageClass bool
|
||||
// Allow to validate the label value of the label selector
|
||||
AllowInvalidLabelValueInSelector bool
|
||||
}
|
||||
|
||||
func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolumeClaim) PersistentVolumeClaimSpecValidationOptions {
|
||||
@ -2036,11 +2038,19 @@ func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolum
|
||||
AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
|
||||
EnableRecoverFromExpansionFailure: utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure),
|
||||
EnableRetroactiveDefaultStorageClass: utilfeature.DefaultFeatureGate.Enabled(features.RetroactiveDefaultStorageClass),
|
||||
AllowInvalidLabelValueInSelector: false,
|
||||
}
|
||||
if oldPvc == nil {
|
||||
// If there's no old PVC, use the options based solely on feature enablement
|
||||
return opts
|
||||
}
|
||||
labelSelectorValidationOpts := unversionedvalidation.LabelSelectorValidationOptions{
|
||||
AllowInvalidLabelValueInSelector: opts.AllowInvalidLabelValueInSelector,
|
||||
}
|
||||
if len(unversionedvalidation.ValidateLabelSelector(oldPvc.Spec.Selector, labelSelectorValidationOpts, nil)) > 0 {
|
||||
// If the old object had an invalid label selector, continue to allow it in the new object
|
||||
opts.AllowInvalidLabelValueInSelector = true
|
||||
}
|
||||
|
||||
if helper.ContainsAccessMode(oldPvc.Spec.AccessModes, core.ReadWriteOncePod) {
|
||||
// If the old object allowed "ReadWriteOncePod", continue to allow it in the new object
|
||||
@ -2051,12 +2061,20 @@ func ValidationOptionsForPersistentVolumeClaim(pvc, oldPvc *core.PersistentVolum
|
||||
|
||||
func ValidationOptionsForPersistentVolumeClaimTemplate(claimTemplate, oldClaimTemplate *core.PersistentVolumeClaimTemplate) PersistentVolumeClaimSpecValidationOptions {
|
||||
opts := PersistentVolumeClaimSpecValidationOptions{
|
||||
AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
|
||||
AllowReadWriteOncePod: utilfeature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
|
||||
AllowInvalidLabelValueInSelector: false,
|
||||
}
|
||||
if oldClaimTemplate == nil {
|
||||
// If there's no old PVC template, use the options based solely on feature enablement
|
||||
return opts
|
||||
}
|
||||
labelSelectorValidationOpts := unversionedvalidation.LabelSelectorValidationOptions{
|
||||
AllowInvalidLabelValueInSelector: opts.AllowInvalidLabelValueInSelector,
|
||||
}
|
||||
if len(unversionedvalidation.ValidateLabelSelector(oldClaimTemplate.Spec.Selector, labelSelectorValidationOpts, nil)) > 0 {
|
||||
// If the old object had an invalid label selector, continue to allow it in the new object
|
||||
opts.AllowInvalidLabelValueInSelector = true
|
||||
}
|
||||
if helper.ContainsAccessMode(oldClaimTemplate.Spec.AccessModes, core.ReadWriteOncePod) {
|
||||
// If the old object allowed "ReadWriteOncePod", continue to allow it in the new object
|
||||
opts.AllowReadWriteOncePod = true
|
||||
@ -2086,7 +2104,34 @@ func validateDataSource(dataSource *core.TypedLocalObjectReference, fldPath *fie
|
||||
apiGroup = *dataSource.APIGroup
|
||||
}
|
||||
if len(apiGroup) == 0 && dataSource.Kind != "PersistentVolumeClaim" {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, dataSource.Kind, ""))
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, dataSource.Kind, "must be 'PersistentVolumeClaim' when referencing the default apiGroup"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateDataSourceRef validates a DataSourceRef in a PersistentVolumeClaimSpec
|
||||
func validateDataSourceRef(dataSourceRef *core.TypedObjectReference, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if len(dataSourceRef.Name) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
|
||||
}
|
||||
if len(dataSourceRef.Kind) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("kind"), ""))
|
||||
}
|
||||
apiGroup := ""
|
||||
if dataSourceRef.APIGroup != nil {
|
||||
apiGroup = *dataSourceRef.APIGroup
|
||||
}
|
||||
if len(apiGroup) == 0 && dataSourceRef.Kind != "PersistentVolumeClaim" {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, dataSourceRef.Kind, "must be 'PersistentVolumeClaim' when referencing the default apiGroup"))
|
||||
}
|
||||
|
||||
if dataSourceRef.Namespace != nil && len(*dataSourceRef.Namespace) > 0 {
|
||||
for _, msg := range ValidateNameFunc(ValidateNamespaceName)(*dataSourceRef.Namespace, false) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), *dataSourceRef.Namespace, msg))
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
@ -2099,7 +2144,10 @@ func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fld
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("accessModes"), "at least 1 access mode is required"))
|
||||
}
|
||||
if spec.Selector != nil {
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...)
|
||||
labelSelectorValidationOpts := unversionedvalidation.LabelSelectorValidationOptions{
|
||||
AllowInvalidLabelValueInSelector: opts.AllowInvalidLabelValueInSelector,
|
||||
}
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, labelSelectorValidationOpts, fldPath.Child("selector"))...)
|
||||
}
|
||||
|
||||
expandedSupportedAccessModes := sets.StringKeySet(supportedAccessModes)
|
||||
@ -2145,10 +2193,15 @@ func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fld
|
||||
allErrs = append(allErrs, validateDataSource(spec.DataSource, fldPath.Child("dataSource"))...)
|
||||
}
|
||||
if spec.DataSourceRef != nil {
|
||||
allErrs = append(allErrs, validateDataSource(spec.DataSourceRef, fldPath.Child("dataSourceRef"))...)
|
||||
allErrs = append(allErrs, validateDataSourceRef(spec.DataSourceRef, fldPath.Child("dataSourceRef"))...)
|
||||
}
|
||||
if spec.DataSource != nil && spec.DataSourceRef != nil {
|
||||
if !apiequality.Semantic.DeepEqual(spec.DataSource, spec.DataSourceRef) {
|
||||
if spec.DataSourceRef != nil && spec.DataSourceRef.Namespace != nil && len(*spec.DataSourceRef.Namespace) > 0 {
|
||||
if spec.DataSource != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, fldPath.Child("dataSource"),
|
||||
"may not be specified when dataSourceRef.namespace is specified"))
|
||||
}
|
||||
} else if spec.DataSource != nil && spec.DataSourceRef != nil {
|
||||
if !isDataSourceEqualDataSourceRef(spec.DataSource, spec.DataSourceRef) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, fldPath.Child("dataSource"),
|
||||
"must match dataSourceRef"))
|
||||
}
|
||||
@ -2157,6 +2210,10 @@ func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fld
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func isDataSourceEqualDataSourceRef(dataSource *core.TypedLocalObjectReference, dataSourceRef *core.TypedObjectReference) bool {
|
||||
return reflect.DeepEqual(dataSource.APIGroup, dataSourceRef.APIGroup) && dataSource.Kind == dataSourceRef.Kind && dataSource.Name == dataSourceRef.Name
|
||||
}
|
||||
|
||||
// ValidatePersistentVolumeClaimUpdate validates an update to a PersistentVolumeClaim
|
||||
func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeClaim, opts PersistentVolumeClaimSpecValidationOptions) field.ErrorList {
|
||||
allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata"))
|
||||
@ -2711,6 +2768,54 @@ func ValidateVolumeDevices(devices []core.VolumeDevice, volmounts map[string]str
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validatePodResourceClaims(claims []core.PodResourceClaim, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
podClaimNames := sets.NewString()
|
||||
for i, claim := range claims {
|
||||
allErrs = append(allErrs, validatePodResourceClaim(claim, &podClaimNames, fldPath.Index(i))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// gatherPodResourceClaimNames returns a set of all non-empty
|
||||
// PodResourceClaim.Name values. Validation that those names are valid is
|
||||
// handled by validatePodResourceClaims.
|
||||
func gatherPodResourceClaimNames(claims []core.PodResourceClaim) sets.String {
|
||||
podClaimNames := sets.String{}
|
||||
for _, claim := range claims {
|
||||
if claim.Name != "" {
|
||||
podClaimNames.Insert(claim.Name)
|
||||
}
|
||||
}
|
||||
return podClaimNames
|
||||
}
|
||||
|
||||
func validatePodResourceClaim(claim core.PodResourceClaim, podClaimNames *sets.String, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
if claim.Name == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
|
||||
} else if podClaimNames.Has(claim.Name) {
|
||||
allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), claim.Name))
|
||||
} else {
|
||||
allErrs = append(allErrs, ValidateDNS1123Label(claim.Name, fldPath.Child("name"))...)
|
||||
podClaimNames.Insert(claim.Name)
|
||||
}
|
||||
allErrs = append(allErrs, validatePodResourceClaimSource(claim.Source, fldPath.Child("source"))...)
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validatePodResourceClaimSource(claimSource core.ClaimSource, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
if claimSource.ResourceClaimName != nil && claimSource.ResourceClaimTemplateName != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, claimSource, "at most one of `resourceClaimName` or `resourceClaimTemplateName` may be specified"))
|
||||
}
|
||||
if claimSource.ResourceClaimName == nil && claimSource.ResourceClaimTemplateName == nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, claimSource, "must specify one of: `resourceClaimName`, `resourceClaimTemplateName`"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
@ -2933,8 +3038,8 @@ func validatePullPolicy(policy core.PullPolicy, fldPath *field.Path) field.Error
|
||||
|
||||
// validateEphemeralContainers is called by pod spec and template validation to validate the list of ephemeral containers.
|
||||
// Note that this is called for pod template even though ephemeral containers aren't allowed in pod templates.
|
||||
func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer, containers, initContainers []core.Container, volumes map[string]core.VolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer, containers, initContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
|
||||
if len(ephemeralContainers) == 0 {
|
||||
return allErrs
|
||||
@ -2954,7 +3059,7 @@ func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer,
|
||||
idxPath := fldPath.Index(i)
|
||||
|
||||
c := (*core.Container)(&ec.EphemeralContainerCommon)
|
||||
allErrs = append(allErrs, validateContainerCommon(c, volumes, idxPath, opts)...)
|
||||
allErrs = append(allErrs, validateContainerCommon(c, volumes, podClaimNames, idxPath, opts)...)
|
||||
// Ephemeral containers don't need looser constraints for pod templates, so it's convenient to apply both validations
|
||||
// here where we've already converted EphemeralContainerCommon to Container.
|
||||
allErrs = append(allErrs, validateContainerOnlyForPod(c, idxPath)...)
|
||||
@ -2992,7 +3097,7 @@ func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer,
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateFieldAcceptList checks that only allowed fields are set.
|
||||
// ValidateFieldAcceptList checks that only allowed fields are set.
|
||||
// The value must be a struct (not a pointer to a struct!).
|
||||
func validateFieldAllowList(value interface{}, allowedFields map[string]bool, errorText string, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
@ -3016,7 +3121,7 @@ func validateFieldAllowList(value interface{}, allowedFields map[string]bool, er
|
||||
}
|
||||
|
||||
// validateInitContainers is called by pod spec and template validation to validate the list of init containers
|
||||
func validateInitContainers(containers []core.Container, regularContainers []core.Container, volumes map[string]core.VolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
func validateInitContainers(containers []core.Container, regularContainers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
|
||||
allNames := sets.String{}
|
||||
@ -3027,7 +3132,7 @@ func validateInitContainers(containers []core.Container, regularContainers []cor
|
||||
idxPath := fldPath.Index(i)
|
||||
|
||||
// Apply the validation common to all container types
|
||||
allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, idxPath, opts)...)
|
||||
allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, podClaimNames, idxPath, opts)...)
|
||||
|
||||
// Names must be unique within regular and init containers. Collisions with ephemeral containers
|
||||
// will be detected by validateEphemeralContainers().
|
||||
@ -3060,8 +3165,8 @@ func validateInitContainers(containers []core.Container, regularContainers []cor
|
||||
|
||||
// validateContainerCommon applies validation common to all container types. It's called by regular, init, and ephemeral
|
||||
// container list validation to require a properly formatted name, image, etc.
|
||||
func validateContainerCommon(ctr *core.Container, volumes map[string]core.VolumeSource, path *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
func validateContainerCommon(ctr *core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, path *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
|
||||
namePath := path.Child("name")
|
||||
if len(ctr.Name) == 0 {
|
||||
@ -3097,7 +3202,7 @@ func validateContainerCommon(ctr *core.Container, volumes map[string]core.Volume
|
||||
allErrs = append(allErrs, ValidateVolumeMounts(ctr.VolumeMounts, volDevices, volumes, ctr, path.Child("volumeMounts"))...)
|
||||
allErrs = append(allErrs, ValidateVolumeDevices(ctr.VolumeDevices, volMounts, volumes, path.Child("volumeDevices"))...)
|
||||
allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, path.Child("imagePullPolicy"))...)
|
||||
allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, path.Child("resources"), opts)...)
|
||||
allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, podClaimNames, path.Child("resources"), opts)...)
|
||||
allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, path.Child("securityContext"))...)
|
||||
return allErrs
|
||||
}
|
||||
@ -3150,7 +3255,7 @@ func validateHostUsers(spec *core.PodSpec, fldPath *field.Path) field.ErrorList
|
||||
}
|
||||
|
||||
// validateContainers is called by pod spec and template validation to validate the list of regular containers.
|
||||
func validateContainers(containers []core.Container, volumes map[string]core.VolumeSource, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
func validateContainers(containers []core.Container, volumes map[string]core.VolumeSource, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if len(containers) == 0 {
|
||||
@ -3162,7 +3267,7 @@ func validateContainers(containers []core.Container, volumes map[string]core.Vol
|
||||
path := fldPath.Index(i)
|
||||
|
||||
// Apply validation common to all containers
|
||||
allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, path, opts)...)
|
||||
allErrs = append(allErrs, validateContainerCommon(&ctr, volumes, podClaimNames, path, opts)...)
|
||||
|
||||
// Container names must be unique within the list of regular containers.
|
||||
// Collisions with init or ephemeral container names will be detected by the init or ephemeral
|
||||
@ -3272,6 +3377,22 @@ func validateReadinessGates(readinessGates []core.PodReadinessGate, fldPath *fie
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateSchedulingGates(schedulingGates []core.PodSchedulingGate, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
// There should be no duplicates in the list of scheduling gates.
|
||||
seen := sets.String{}
|
||||
for i, schedulingGate := range schedulingGates {
|
||||
if schedulingGate.Name == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Index(i), "must not be empty"))
|
||||
}
|
||||
if seen.Has(schedulingGate.Name) {
|
||||
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), schedulingGate.Name))
|
||||
}
|
||||
seen.Insert(schedulingGate.Name)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validatePodDNSConfig(dnsConfig *core.PodDNSConfig, dnsPolicy *core.DNSPolicy, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
@ -3355,7 +3476,7 @@ func validateImagePullSecrets(imagePullSecrets []core.LocalObjectReference, fldP
|
||||
}
|
||||
|
||||
// validateAffinity checks if given affinities are valid
|
||||
func validateAffinity(affinity *core.Affinity, fldPath *field.Path) field.ErrorList {
|
||||
func validateAffinity(affinity *core.Affinity, opts PodValidationOptions, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if affinity != nil {
|
||||
@ -3363,10 +3484,10 @@ func validateAffinity(affinity *core.Affinity, fldPath *field.Path) field.ErrorL
|
||||
allErrs = append(allErrs, validateNodeAffinity(affinity.NodeAffinity, fldPath.Child("nodeAffinity"))...)
|
||||
}
|
||||
if affinity.PodAffinity != nil {
|
||||
allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, fldPath.Child("podAffinity"))...)
|
||||
allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, opts.AllowInvalidLabelValueInSelector, fldPath.Child("podAffinity"))...)
|
||||
}
|
||||
if affinity.PodAntiAffinity != nil {
|
||||
allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, fldPath.Child("podAntiAffinity"))...)
|
||||
allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, opts.AllowInvalidLabelValueInSelector, fldPath.Child("podAntiAffinity"))...)
|
||||
}
|
||||
}
|
||||
|
||||
@ -3420,6 +3541,28 @@ func validateOnlyAddedTolerations(newTolerations []core.Toleration, oldToleratio
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateOnlyDeletedSchedulingGates(newGates, oldGates []core.PodSchedulingGate, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if len(newGates) == 0 {
|
||||
return allErrs
|
||||
}
|
||||
|
||||
additionalGates := make(map[string]int)
|
||||
for i, newGate := range newGates {
|
||||
additionalGates[newGate.Name] = i
|
||||
}
|
||||
|
||||
for _, oldGate := range oldGates {
|
||||
delete(additionalGates, oldGate.Name)
|
||||
}
|
||||
|
||||
for gate, i := range additionalGates {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Index(i).Child("name"), fmt.Sprintf("only deletion is allowed, but found new scheduling gate '%s'", gate)))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateHostAliases(hostAliases []core.HostAlias, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for _, hostAlias := range hostAliases {
|
||||
@ -3505,10 +3648,10 @@ type PodValidationOptions struct {
|
||||
AllowDownwardAPIHugePages bool
|
||||
// Allow invalid pod-deletion-cost annotation value for backward compatibility.
|
||||
AllowInvalidPodDeletionCost bool
|
||||
// Allow invalid label-value in LabelSelector
|
||||
AllowInvalidLabelValueInSelector bool
|
||||
// Allow pod spec to use non-integer multiple of huge page unit size
|
||||
AllowIndivisibleHugePagesValues bool
|
||||
// Allow hostProcess field to be set in windows security context
|
||||
AllowWindowsHostProcessField bool
|
||||
// Allow more DNSSearchPaths and longer DNSSearchListChars
|
||||
AllowExpandedDNSConfig bool
|
||||
}
|
||||
@ -3579,7 +3722,7 @@ func validatePodIPs(pod *core.Pod) field.ErrorList {
|
||||
}
|
||||
|
||||
// There should be no duplicates in list of Pod.PodIPs
|
||||
seen := sets.String{} //:= make(map[string]int)
|
||||
seen := sets.String{} // := make(map[string]int)
|
||||
for i, podIP := range pod.Status.PodIPs {
|
||||
if seen.Has(podIP.IP) {
|
||||
allErrs = append(allErrs, field.Duplicate(podIPsField.Index(i), podIP))
|
||||
@ -3602,19 +3745,22 @@ func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *fi
|
||||
|
||||
vols, vErrs := ValidateVolumes(spec.Volumes, podMeta, fldPath.Child("volumes"), opts)
|
||||
allErrs = append(allErrs, vErrs...)
|
||||
allErrs = append(allErrs, validateContainers(spec.Containers, vols, fldPath.Child("containers"), opts)...)
|
||||
allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, fldPath.Child("initContainers"), opts)...)
|
||||
allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, fldPath.Child("ephemeralContainers"), opts)...)
|
||||
podClaimNames := gatherPodResourceClaimNames(spec.ResourceClaims)
|
||||
allErrs = append(allErrs, validatePodResourceClaims(spec.ResourceClaims, fldPath.Child("resourceClaims"))...)
|
||||
allErrs = append(allErrs, validateContainers(spec.Containers, vols, podClaimNames, fldPath.Child("containers"), opts)...)
|
||||
allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, podClaimNames, fldPath.Child("initContainers"), opts)...)
|
||||
allErrs = append(allErrs, validateEphemeralContainers(spec.EphemeralContainers, spec.Containers, spec.InitContainers, vols, podClaimNames, fldPath.Child("ephemeralContainers"), opts)...)
|
||||
allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...)
|
||||
allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...)
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"), opts)...)
|
||||
allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...)
|
||||
allErrs = append(allErrs, validateAffinity(spec.Affinity, fldPath.Child("affinity"))...)
|
||||
allErrs = append(allErrs, validateAffinity(spec.Affinity, opts, fldPath.Child("affinity"))...)
|
||||
allErrs = append(allErrs, validatePodDNSConfig(spec.DNSConfig, &spec.DNSPolicy, fldPath.Child("dnsConfig"), opts)...)
|
||||
allErrs = append(allErrs, validateReadinessGates(spec.ReadinessGates, fldPath.Child("readinessGates"))...)
|
||||
allErrs = append(allErrs, validateSchedulingGates(spec.SchedulingGates, fldPath.Child("schedulingGates"))...)
|
||||
allErrs = append(allErrs, validateTopologySpreadConstraints(spec.TopologySpreadConstraints, fldPath.Child("topologySpreadConstraints"))...)
|
||||
allErrs = append(allErrs, validateWindowsHostProcessPod(spec, fldPath, opts)...)
|
||||
allErrs = append(allErrs, validateWindowsHostProcessPod(spec, fldPath)...)
|
||||
allErrs = append(allErrs, validateHostUsers(spec, fldPath)...)
|
||||
if len(spec.ServiceAccountName) > 0 {
|
||||
for _, msg := range ValidateServiceAccountName(spec.ServiceAccountName, false) {
|
||||
@ -3973,12 +4119,10 @@ func ValidatePreferredSchedulingTerms(terms []core.PreferredSchedulingTerm, fldP
|
||||
}
|
||||
|
||||
// validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data
|
||||
func validatePodAffinityTerm(podAffinityTerm core.PodAffinityTerm, fldPath *field.Path) field.ErrorList {
|
||||
func validatePodAffinityTerm(podAffinityTerm core.PodAffinityTerm, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("labelSelector"))...)
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.NamespaceSelector, fldPath.Child("namespaceSelector"))...)
|
||||
|
||||
allErrs = append(allErrs, ValidatePodAffinityTermSelector(podAffinityTerm, allowInvalidLabelValueInSelector, fldPath)...)
|
||||
for _, name := range podAffinityTerm.Namespaces {
|
||||
for _, msg := range ValidateNamespaceName(name, false) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, msg))
|
||||
@ -3991,40 +4135,40 @@ func validatePodAffinityTerm(podAffinityTerm core.PodAffinityTerm, fldPath *fiel
|
||||
}
|
||||
|
||||
// validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data
|
||||
func validatePodAffinityTerms(podAffinityTerms []core.PodAffinityTerm, fldPath *field.Path) field.ErrorList {
|
||||
func validatePodAffinityTerms(podAffinityTerms []core.PodAffinityTerm, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for i, podAffinityTerm := range podAffinityTerms {
|
||||
allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, fldPath.Index(i))...)
|
||||
allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, allowInvalidLabelValueInSelector, fldPath.Index(i))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data
|
||||
func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []core.WeightedPodAffinityTerm, fldPath *field.Path) field.ErrorList {
|
||||
func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []core.WeightedPodAffinityTerm, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for j, weightedTerm := range weightedPodAffinityTerms {
|
||||
if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Index(j).Child("weight"), weightedTerm.Weight, "must be in the range 1-100"))
|
||||
}
|
||||
allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, fldPath.Index(j).Child("podAffinityTerm"))...)
|
||||
allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, allowInvalidLabelValueInSelector, fldPath.Index(j).Child("podAffinityTerm"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data
|
||||
func validatePodAntiAffinity(podAntiAffinity *core.PodAntiAffinity, fldPath *field.Path) field.ErrorList {
|
||||
func validatePodAntiAffinity(podAntiAffinity *core.PodAntiAffinity, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
// TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented.
|
||||
// if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
|
||||
// allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution, false,
|
||||
// fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
|
||||
//}
|
||||
// }
|
||||
if podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
|
||||
allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, allowInvalidLabelValueInSelector,
|
||||
fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
|
||||
}
|
||||
if podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
|
||||
allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, allowInvalidLabelValueInSelector,
|
||||
fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
|
||||
}
|
||||
return allErrs
|
||||
@ -4047,19 +4191,19 @@ func validateNodeAffinity(na *core.NodeAffinity, fldPath *field.Path) field.Erro
|
||||
}
|
||||
|
||||
// validatePodAffinity tests that the specified podAffinity fields have valid data
|
||||
func validatePodAffinity(podAffinity *core.PodAffinity, fldPath *field.Path) field.ErrorList {
|
||||
func validatePodAffinity(podAffinity *core.PodAffinity, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
// TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented.
|
||||
// if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil {
|
||||
// allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingRequiredDuringExecution, false,
|
||||
// fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...)
|
||||
//}
|
||||
// }
|
||||
if podAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
|
||||
allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution, allowInvalidLabelValueInSelector,
|
||||
fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...)
|
||||
}
|
||||
if podAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
|
||||
allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, allowInvalidLabelValueInSelector,
|
||||
fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...)
|
||||
}
|
||||
return allErrs
|
||||
@ -4260,7 +4404,7 @@ func ValidatePodSecurityContext(securityContext *core.PodSecurityContext, spec *
|
||||
func ValidateContainerUpdates(newContainers, oldContainers []core.Container, fldPath *field.Path) (allErrs field.ErrorList, stop bool) {
|
||||
allErrs = field.ErrorList{}
|
||||
if len(newContainers) != len(oldContainers) {
|
||||
//TODO: Pinpoint the specific container that causes the invalid error after we have strategic merge diff
|
||||
// TODO: Pinpoint the specific container that causes the invalid error after we have strategic merge diff
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath, "pod updates may not add or remove containers"))
|
||||
return allErrs, true
|
||||
}
|
||||
@ -4287,6 +4431,11 @@ func ValidatePodCreate(pod *core.Pod, opts PodValidationOptions) field.ErrorList
|
||||
if len(pod.Spec.EphemeralContainers) > 0 {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("ephemeralContainers"), "cannot be set on create"))
|
||||
}
|
||||
// A Pod cannot be assigned a Node if there are remaining scheduling gates.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodSchedulingReadiness) &&
|
||||
pod.Spec.NodeName != "" && len(pod.Spec.SchedulingGates) != 0 {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("nodeName"), "cannot be set until all schedulingGates have been cleared"))
|
||||
}
|
||||
allErrs = append(allErrs, validateSeccompAnnotationsAndFields(pod.ObjectMeta, &pod.Spec, fldPath)...)
|
||||
|
||||
return allErrs
|
||||
@ -4372,6 +4521,7 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
|
||||
// 2. spec.initContainers[*].image
|
||||
// 3. spec.activeDeadlineSeconds
|
||||
// 4. spec.terminationGracePeriodSeconds
|
||||
// 5. spec.schedulingGates
|
||||
|
||||
containerErrs, stop := ValidateContainerUpdates(newPod.Spec.Containers, oldPod.Spec.Containers, specPath.Child("containers"))
|
||||
allErrs = append(allErrs, containerErrs...)
|
||||
@ -4407,6 +4557,9 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
|
||||
// Allow only additions to tolerations updates.
|
||||
allErrs = append(allErrs, validateOnlyAddedTolerations(newPod.Spec.Tolerations, oldPod.Spec.Tolerations, specPath.Child("tolerations"))...)
|
||||
|
||||
// Allow only deletions to schedulingGates updates.
|
||||
allErrs = append(allErrs, validateOnlyDeletedSchedulingGates(newPod.Spec.SchedulingGates, oldPod.Spec.SchedulingGates, specPath.Child("schedulingGates"))...)
|
||||
|
||||
// the last thing to check is pod spec equality. If the pod specs are equal, then we can simply return the errors we have
|
||||
// so far and save the cost of a deep copy.
|
||||
if apiequality.Semantic.DeepEqual(newPod.Spec, oldPod.Spec) {
|
||||
@ -4435,6 +4588,8 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
|
||||
activeDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds
|
||||
mungedPodSpec.ActiveDeadlineSeconds = &activeDeadlineSeconds
|
||||
}
|
||||
// munge spec.schedulingGates
|
||||
mungedPodSpec.SchedulingGates = oldPod.Spec.SchedulingGates // +k8s:verify-mutation:reason=clone
|
||||
// tolerations are checked before the deep copy, so munge those too
|
||||
mungedPodSpec.Tolerations = oldPod.Spec.Tolerations // +k8s:verify-mutation:reason=clone
|
||||
|
||||
@ -4446,7 +4601,7 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
|
||||
|
||||
if !apiequality.Semantic.DeepEqual(mungedPodSpec, oldPod.Spec) {
|
||||
// This diff isn't perfect, but it's a helluva lot better an "I'm not going to tell you what the difference is".
|
||||
//TODO: Pinpoint the specific field that causes the invalid error after we have strategic merge diff
|
||||
// TODO: Pinpoint the specific field that causes the invalid error after we have strategic merge diff
|
||||
specDiff := cmp.Diff(oldPod.Spec, mungedPodSpec)
|
||||
allErrs = append(allErrs, field.Forbidden(specPath, fmt.Sprintf("pod updates may not change fields other than `spec.containers[*].image`, `spec.initContainers[*].image`, `spec.activeDeadlineSeconds`, `spec.tolerations` (only additions to existing tolerations) or `spec.terminationGracePeriodSeconds` (allow it to be set to 1 if it was previously negative)\n%v", specDiff)))
|
||||
}
|
||||
@ -4878,14 +5033,12 @@ func validateServiceExternalTrafficFieldsUpdate(before, after *core.Service) fie
|
||||
func validateServiceInternalTrafficFieldsValue(service *core.Service) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ServiceInternalTrafficPolicy) {
|
||||
if service.Spec.InternalTrafficPolicy == nil {
|
||||
// We do not forbid internalTrafficPolicy on other Service types because of historical reasons.
|
||||
// We did not check that before it went beta and we don't want to invalidate existing stored objects.
|
||||
if service.Spec.Type == core.ServiceTypeNodePort ||
|
||||
service.Spec.Type == core.ServiceTypeLoadBalancer || service.Spec.Type == core.ServiceTypeClusterIP {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("spec").Child("internalTrafficPolicy"), ""))
|
||||
}
|
||||
if service.Spec.InternalTrafficPolicy == nil {
|
||||
// We do not forbid internalTrafficPolicy on other Service types because of historical reasons.
|
||||
// We did not check that before it went beta and we don't want to invalidate existing stored objects.
|
||||
if service.Spec.Type == core.ServiceTypeNodePort ||
|
||||
service.Spec.Type == core.ServiceTypeLoadBalancer || service.Spec.Type == core.ServiceTypeClusterIP {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("spec").Child("internalTrafficPolicy"), ""))
|
||||
}
|
||||
}
|
||||
|
||||
@ -5259,6 +5412,8 @@ func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList {
|
||||
}
|
||||
|
||||
// validation specific to Node.Spec.ConfigSource
|
||||
// The field ConfigSource is deprecated and will not be used. The validation is kept in place
|
||||
// for the backward compatibility
|
||||
func validateNodeConfigSourceSpec(source *core.NodeConfigSource, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
count := int(0)
|
||||
@ -5276,6 +5431,8 @@ func validateNodeConfigSourceSpec(source *core.NodeConfigSource, fldPath *field.
|
||||
}
|
||||
|
||||
// validation specific to Node.Spec.ConfigSource.ConfigMap
|
||||
// The field ConfigSource is deprecated and will not be used. The validation is kept in place
|
||||
// for the backward compatibility
|
||||
func validateConfigMapNodeConfigSourceSpec(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
// uid and resourceVersion must not be set in spec
|
||||
@ -5749,7 +5906,7 @@ func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) fiel
|
||||
}
|
||||
|
||||
// Validates resource requirement spec.
|
||||
func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
func ValidateResourceRequirements(requirements *core.ResourceRequirements, podClaimNames sets.String, fldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
limPath := fldPath.Child("limits")
|
||||
reqPath := fldPath.Child("requests")
|
||||
@ -5812,6 +5969,42 @@ func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPa
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath, "HugePages require cpu or memory"))
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, validateResourceClaimNames(requirements.Claims, podClaimNames, fldPath.Child("claims"))...)
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateResourceClaimNames checks that the names in
|
||||
// ResourceRequirements.Claims have a corresponding entry in
|
||||
// PodSpec.ResourceClaims.
|
||||
func validateResourceClaimNames(claims []core.ResourceClaim, podClaimNames sets.String, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
names := sets.String{}
|
||||
for i, claim := range claims {
|
||||
name := claim.Name
|
||||
if name == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Index(i), ""))
|
||||
} else {
|
||||
if names.Has(name) {
|
||||
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), name))
|
||||
} else {
|
||||
names.Insert(name)
|
||||
}
|
||||
if !podClaimNames.Has(name) {
|
||||
// field.NotFound doesn't accept an
|
||||
// explanation. Adding one here is more
|
||||
// user-friendly.
|
||||
error := field.NotFound(fldPath.Index(i), name)
|
||||
error.Detail = "must be one of the names in pod.spec.resourceClaims"
|
||||
if len(podClaimNames) == 0 {
|
||||
error.Detail += " which is empty"
|
||||
} else {
|
||||
error.Detail += ": " + strings.Join(podClaimNames.List(), ", ")
|
||||
}
|
||||
allErrs = append(allErrs, error)
|
||||
}
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
@ -6119,7 +6312,7 @@ func validateEndpointSubsets(subsets []core.EndpointSubset, fldPath *field.Path)
|
||||
|
||||
// EndpointSubsets must include endpoint address. For headless service, we allow its endpoints not to have ports.
|
||||
if len(ss.Addresses) == 0 && len(ss.NotReadyAddresses) == 0 {
|
||||
//TODO: consider adding a RequiredOneOf() error for this and similar cases
|
||||
// TODO: consider adding a RequiredOneOf() error for this and similar cases
|
||||
allErrs = append(allErrs, field.Required(idxPath, "must specify `addresses` or `notReadyAddresses`"))
|
||||
}
|
||||
for addr := range ss.Addresses {
|
||||
@ -6208,7 +6401,7 @@ func validateEndpointPort(port *core.EndpointPort, requireName bool, fldPath *fi
|
||||
// ValidateSecurityContext ensures the security context contains valid settings
|
||||
func ValidateSecurityContext(sc *core.SecurityContext, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
//this should only be true for testing since SecurityContext is defaulted by the core
|
||||
// this should only be true for testing since SecurityContext is defaulted by the core
|
||||
if sc == nil {
|
||||
return allErrs
|
||||
}
|
||||
@ -6372,7 +6565,7 @@ func validateWindowsSecurityContextOptions(windowsOptions *core.WindowsSecurityC
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateWindowsHostProcessPod(podSpec *core.PodSpec, fieldPath *field.Path, opts PodValidationOptions) field.ErrorList {
|
||||
func validateWindowsHostProcessPod(podSpec *core.PodSpec, fieldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
// Keep track of container and hostProcess container count for validate
|
||||
@ -6384,13 +6577,6 @@ func validateWindowsHostProcessPod(podSpec *core.PodSpec, fieldPath *field.Path,
|
||||
podHostProcess = podSpec.SecurityContext.WindowsOptions.HostProcess
|
||||
}
|
||||
|
||||
if !opts.AllowWindowsHostProcessField && podHostProcess != nil {
|
||||
// Do not allow pods to persist data that sets hostProcess (true or false)
|
||||
errMsg := "not allowed when feature gate 'WindowsHostProcessContainers' is not enabled"
|
||||
allErrs = append(allErrs, field.Forbidden(fieldPath.Child("securityContext", "windowsOptions", "hostProcess"), errMsg))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
hostNetwork := false
|
||||
if podSpec.SecurityContext != nil {
|
||||
hostNetwork = podSpec.SecurityContext.HostNetwork
|
||||
@ -6404,12 +6590,6 @@ func validateWindowsHostProcessPod(podSpec *core.PodSpec, fieldPath *field.Path,
|
||||
containerHostProcess = c.SecurityContext.WindowsOptions.HostProcess
|
||||
}
|
||||
|
||||
if !opts.AllowWindowsHostProcessField && containerHostProcess != nil {
|
||||
// Do not allow pods to persist data that sets hostProcess (true or false)
|
||||
errMsg := "not allowed when feature gate 'WindowsHostProcessContainers' is not enabled"
|
||||
allErrs = append(allErrs, field.Forbidden(cFieldPath.Child("securityContext", "windowsOptions", "hostProcess"), errMsg))
|
||||
}
|
||||
|
||||
if podHostProcess != nil && containerHostProcess != nil && *podHostProcess != *containerHostProcess {
|
||||
errMsg := fmt.Sprintf("pod hostProcess value must be identical if both are specified, was %v", *podHostProcess)
|
||||
allErrs = append(allErrs, field.Invalid(cFieldPath.Child("securityContext", "windowsOptions", "hostProcess"), *containerHostProcess, errMsg))
|
||||
@ -6417,7 +6597,7 @@ func validateWindowsHostProcessPod(podSpec *core.PodSpec, fieldPath *field.Path,
|
||||
|
||||
switch {
|
||||
case containerHostProcess != nil && *containerHostProcess:
|
||||
// Container explitly sets hostProcess=true
|
||||
// Container explicitly sets hostProcess=true
|
||||
hostProcessContainerCount++
|
||||
case containerHostProcess == nil && podHostProcess != nil && *podHostProcess:
|
||||
// Container inherits hostProcess=true from pod settings
|
||||
@ -6428,13 +6608,6 @@ func validateWindowsHostProcessPod(podSpec *core.PodSpec, fieldPath *field.Path,
|
||||
})
|
||||
|
||||
if hostProcessContainerCount > 0 {
|
||||
// Fail Pod validation if feature is not enabled (unless podspec already exists and contains HostProcess fields) instead of dropping fields based on PRR reivew.
|
||||
if !opts.AllowWindowsHostProcessField {
|
||||
errMsg := "pod must not contain Windows hostProcess containers when feature gate 'WindowsHostProcessContainers' is not enabled"
|
||||
allErrs = append(allErrs, field.Forbidden(fieldPath, errMsg))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// At present, if a Windows Pods contains any HostProcess containers than all containers must be
|
||||
// HostProcess containers (explicitly set or inherited).
|
||||
if hostProcessContainerCount != containerCount {
|
||||
@ -6744,7 +6917,7 @@ func ValidateServiceClusterIPsRelatedFields(service *core.Service) field.ErrorLi
|
||||
}
|
||||
|
||||
// IPFamilyPolicy stand alone validation
|
||||
//note: nil is ok, defaulted in alloc check registry/core/service/*
|
||||
// note: nil is ok, defaulted in alloc check registry/core/service/*
|
||||
if service.Spec.IPFamilyPolicy != nil {
|
||||
// must have a supported value
|
||||
if !supportedServiceIPFamilyPolicy.Has(string(*(service.Spec.IPFamilyPolicy))) {
|
||||
@ -6992,3 +7165,11 @@ func sameLoadBalancerClass(oldService, service *core.Service) bool {
|
||||
}
|
||||
return *oldService.Spec.LoadBalancerClass == *service.Spec.LoadBalancerClass
|
||||
}
|
||||
|
||||
func ValidatePodAffinityTermSelector(podAffinityTerm core.PodAffinityTerm, allowInvalidLabelValueInSelector bool, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
labelSelectorValidationOptions := unversionedvalidation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: allowInvalidLabelValueInSelector}
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, labelSelectorValidationOptions, fldPath.Child("labelSelector"))...)
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.NamespaceSelector, labelSelectorValidationOptions, fldPath.Child("namespaceSelector"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
120
vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go
generated
vendored
120
vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go
generated
vendored
@ -419,6 +419,32 @@ func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClaimSource) DeepCopyInto(out *ClaimSource) {
|
||||
*out = *in
|
||||
if in.ResourceClaimName != nil {
|
||||
in, out := &in.ResourceClaimName, &out.ResourceClaimName
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.ResourceClaimTemplateName != nil {
|
||||
in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimSource.
|
||||
func (in *ClaimSource) DeepCopy() *ClaimSource {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClaimSource)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) {
|
||||
*out = *in
|
||||
@ -2965,7 +2991,7 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec
|
||||
}
|
||||
if in.DataSourceRef != nil {
|
||||
in, out := &in.DataSourceRef, &out.DataSourceRef
|
||||
*out = new(TypedLocalObjectReference)
|
||||
*out = new(TypedObjectReference)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
@ -3728,6 +3754,39 @@ func (in *PodReadinessGate) DeepCopy() *PodReadinessGate {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodResourceClaim) DeepCopyInto(out *PodResourceClaim) {
|
||||
*out = *in
|
||||
in.Source.DeepCopyInto(&out.Source)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourceClaim.
|
||||
func (in *PodResourceClaim) DeepCopy() *PodResourceClaim {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodResourceClaim)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodSchedulingGate) DeepCopyInto(out *PodSchedulingGate) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingGate.
|
||||
func (in *PodSchedulingGate) DeepCopy() *PodSchedulingGate {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodSchedulingGate)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
|
||||
*out = *in
|
||||
@ -3961,6 +4020,18 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
|
||||
*out = new(PodOS)
|
||||
**out = **in
|
||||
}
|
||||
if in.SchedulingGates != nil {
|
||||
in, out := &in.SchedulingGates, &out.SchedulingGates
|
||||
*out = make([]PodSchedulingGate, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ResourceClaims != nil {
|
||||
in, out := &in.ResourceClaims, &out.ResourceClaims
|
||||
*out = make([]PodResourceClaim, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -4539,6 +4610,22 @@ func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim.
|
||||
func (in *ResourceClaim) DeepCopy() *ResourceClaim {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ResourceClaim)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) {
|
||||
*out = *in
|
||||
@ -4719,6 +4806,11 @@ func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.Claims != nil {
|
||||
in, out := &in.Claims, &out.Claims
|
||||
*out = make([]ResourceClaim, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -5693,6 +5785,32 @@ func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TypedObjectReference) DeepCopyInto(out *TypedObjectReference) {
|
||||
*out = *in
|
||||
if in.APIGroup != nil {
|
||||
in, out := &in.APIGroup, &out.APIGroup
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.Namespace != nil {
|
||||
in, out := &in.Namespace, &out.Namespace
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedObjectReference.
|
||||
func (in *TypedObjectReference) DeepCopy() *TypedObjectReference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TypedObjectReference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Volume) DeepCopyInto(out *Volume) {
|
||||
*out = *in
|
||||
|
48
vendor/k8s.io/kubernetes/pkg/apis/networking/types.go
generated
vendored
48
vendor/k8s.io/kubernetes/pkg/apis/networking/types.go
generated
vendored
@ -158,15 +158,15 @@ type NetworkPolicyPort struct {
|
||||
EndPort *int32
|
||||
}
|
||||
|
||||
// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed
|
||||
// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed
|
||||
// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs
|
||||
// that should not be included within this rule.
|
||||
type IPBlock struct {
|
||||
// CIDR is a string representing the IP Block
|
||||
// Valid examples are "192.168.1.1/24" or "2001:db9::/64"
|
||||
// Valid examples are "192.168.1.0/24" or "2001:db8::/64"
|
||||
CIDR string
|
||||
// Except is a slice of CIDRs that should not be included within an IP Block
|
||||
// Valid examples are "192.168.1.1/24" or "2001:db9::/64"
|
||||
// Valid examples are "192.168.1.0/24" or "2001:db8::/64"
|
||||
// Except values will be rejected if they are outside the CIDR range
|
||||
// +optional
|
||||
Except []string
|
||||
@ -423,7 +423,45 @@ type IngressTLS struct {
|
||||
type IngressStatus struct {
|
||||
// LoadBalancer contains the current status of the load-balancer.
|
||||
// +optional
|
||||
LoadBalancer api.LoadBalancerStatus
|
||||
LoadBalancer IngressLoadBalancerStatus
|
||||
}
|
||||
|
||||
// IngressLoadBalancerStatus represents the status of a load-balancer
|
||||
type IngressLoadBalancerStatus struct {
|
||||
// Ingress is a list containing ingress points for the load-balancer.
|
||||
// +optional
|
||||
Ingress []IngressLoadBalancerIngress
|
||||
}
|
||||
|
||||
// IngressLoadBalancerIngress represents the status of a load-balancer ingress point.
|
||||
type IngressLoadBalancerIngress struct {
|
||||
// IP is set for load-balancer ingress points that are IP based.
|
||||
// +optional
|
||||
IP string
|
||||
|
||||
// Hostname is set for load-balancer ingress points that are DNS based.
|
||||
// +optional
|
||||
Hostname string
|
||||
|
||||
// Ports provides information about the ports exposed by this LoadBalancer.
|
||||
// +optional
|
||||
Ports []IngressPortStatus
|
||||
}
|
||||
|
||||
// IngressPortStatus represents the error condition of an ingress port
|
||||
type IngressPortStatus struct {
|
||||
// Port is the port number of the ingress port.
|
||||
Port int32
|
||||
|
||||
// Protocol is the protocol of the ingress port.
|
||||
Protocol api.Protocol
|
||||
|
||||
// Error indicates a problem on this port.
|
||||
// The format of the error must comply with the following rules:
|
||||
// - Kubernetes-defined error values use CamelCase names
|
||||
// - Provider-specific error values must follow label-name style (e.g.
|
||||
// example.com/name).
|
||||
Error *string
|
||||
}
|
||||
|
||||
// IngressRule represents the rules mapping the paths under a specified host to
|
||||
@ -628,7 +666,7 @@ type ClusterCIDRSpec struct {
|
||||
// +optional
|
||||
IPv4 string
|
||||
|
||||
// IPv6 defines an IPv6 IP block in CIDR notation(e.g. "fd12:3456:789a:1::/64").
|
||||
// IPv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64").
|
||||
// At least one of IPv4 and IPv6 must be specified.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
|
67
vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go
generated
vendored
67
vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go
generated
vendored
@ -374,6 +374,73 @@ func (in *IngressList) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IngressLoadBalancerIngress) DeepCopyInto(out *IngressLoadBalancerIngress) {
|
||||
*out = *in
|
||||
if in.Ports != nil {
|
||||
in, out := &in.Ports, &out.Ports
|
||||
*out = make([]IngressPortStatus, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerIngress.
|
||||
func (in *IngressLoadBalancerIngress) DeepCopy() *IngressLoadBalancerIngress {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IngressLoadBalancerIngress)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IngressLoadBalancerStatus) DeepCopyInto(out *IngressLoadBalancerStatus) {
|
||||
*out = *in
|
||||
if in.Ingress != nil {
|
||||
in, out := &in.Ingress, &out.Ingress
|
||||
*out = make([]IngressLoadBalancerIngress, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerStatus.
|
||||
func (in *IngressLoadBalancerStatus) DeepCopy() *IngressLoadBalancerStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IngressLoadBalancerStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IngressPortStatus) DeepCopyInto(out *IngressPortStatus) {
|
||||
*out = *in
|
||||
if in.Error != nil {
|
||||
in, out := &in.Error, &out.Error
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressPortStatus.
|
||||
func (in *IngressPortStatus) DeepCopy() *IngressPortStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IngressPortStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IngressRule) DeepCopyInto(out *IngressRule) {
|
||||
*out = *in
|
||||
|
48
vendor/k8s.io/kubernetes/pkg/apis/policy/types.go
generated
vendored
48
vendor/k8s.io/kubernetes/pkg/apis/policy/types.go
generated
vendored
@ -42,8 +42,56 @@ type PodDisruptionBudgetSpec struct {
|
||||
// by specifying 0. This is a mutually exclusive setting with "minAvailable".
|
||||
// +optional
|
||||
MaxUnavailable *intstr.IntOrString
|
||||
|
||||
// UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods
|
||||
// should be considered for eviction. Current implementation considers healthy pods,
|
||||
// as pods that have status.conditions item with type="Ready",status="True".
|
||||
//
|
||||
// Valid policies are IfHealthyBudget and AlwaysAllow.
|
||||
// If no policy is specified, the default behavior will be used,
|
||||
// which corresponds to the IfHealthyBudget policy.
|
||||
//
|
||||
// IfHealthyBudget policy means that running pods (status.phase="Running"),
|
||||
// but not yet healthy can be evicted only if the guarded application is not
|
||||
// disrupted (status.currentHealthy is at least equal to status.desiredHealthy).
|
||||
// Healthy pods will be subject to the PDB for eviction.
|
||||
//
|
||||
// AlwaysAllow policy means that all running pods (status.phase="Running"),
|
||||
// but not yet healthy are considered disrupted and can be evicted regardless
|
||||
// of whether the criteria in a PDB is met. This means perspective running
|
||||
// pods of a disrupted application might not get a chance to become healthy.
|
||||
// Healthy pods will be subject to the PDB for eviction.
|
||||
//
|
||||
// Additional policies may be added in the future.
|
||||
// Clients making eviction decisions should disallow eviction of unhealthy pods
|
||||
// if they encounter an unrecognized policy in this field.
|
||||
//
|
||||
// This field is alpha-level. The eviction API uses this field when
|
||||
// the feature gate PDBUnhealthyPodEvictionPolicy is enabled (disabled by default).
|
||||
// +optional
|
||||
UnhealthyPodEvictionPolicy *UnhealthyPodEvictionPolicyType
|
||||
}
|
||||
|
||||
// UnhealthyPodEvictionPolicyType defines the criteria for when unhealthy pods
|
||||
// should be considered for eviction.
|
||||
// +enum
|
||||
type UnhealthyPodEvictionPolicyType string
|
||||
|
||||
const (
|
||||
// IfHealthyBudget policy means that running pods (status.phase="Running"),
|
||||
// but not yet healthy can be evicted only if the guarded application is not
|
||||
// disrupted (status.currentHealthy is at least equal to status.desiredHealthy).
|
||||
// Healthy pods will be subject to the PDB for eviction.
|
||||
IfHealthyBudget UnhealthyPodEvictionPolicyType = "IfHealthyBudget"
|
||||
|
||||
// AlwaysAllow policy means that all running pods (status.phase="Running"),
|
||||
// but not yet healthy are considered disrupted and can be evicted regardless
|
||||
// of whether the criteria in a PDB is met. This means perspective running
|
||||
// pods of a disrupted application might not get a chance to become healthy.
|
||||
// Healthy pods will be subject to the PDB for eviction.
|
||||
AlwaysAllow UnhealthyPodEvictionPolicyType = "AlwaysAllow"
|
||||
)
|
||||
|
||||
// PodDisruptionBudgetStatus represents information about the status of a
|
||||
// PodDisruptionBudget. Status may trail the actual state of a system.
|
||||
type PodDisruptionBudgetStatus struct {
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/apis/policy/zz_generated.deepcopy.go
generated
vendored
@ -239,6 +239,11 @@ func (in *PodDisruptionBudgetSpec) DeepCopyInto(out *PodDisruptionBudgetSpec) {
|
||||
*out = new(intstr.IntOrString)
|
||||
**out = **in
|
||||
}
|
||||
if in.UnhealthyPodEvictionPolicy != nil {
|
||||
in, out := &in.UnhealthyPodEvictionPolicy, &out.UnhealthyPodEvictionPolicy
|
||||
*out = new(UnhealthyPodEvictionPolicyType)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
379
vendor/k8s.io/kubernetes/pkg/features/kube_features.go
generated
vendored
379
vendor/k8s.io/kubernetes/pkg/features/kube_features.go
generated
vendored
@ -27,7 +27,7 @@ const (
|
||||
// Every feature gate should add method here following this template:
|
||||
//
|
||||
// // owner: @username
|
||||
// // kep: http://kep.k8s.io/NNN
|
||||
// // kep: https://kep.k8s.io/NNN
|
||||
// // alpha: v1.X
|
||||
// MyFeature featuregate.Feature = "MyFeature"
|
||||
//
|
||||
@ -36,6 +36,13 @@ const (
|
||||
// of code conflicts because changes are more likely to be scattered
|
||||
// across the file.
|
||||
|
||||
// owner: @ttakahashi21 @mkimuram
|
||||
// kep: https://kep.k8s.io/3294
|
||||
// alpha: v1.26
|
||||
//
|
||||
// Enable usage of Provision of PVCs from snapshots in other namespaces
|
||||
CrossNamespaceVolumeDataSource featuregate.Feature = "CrossNamespaceVolumeDataSource"
|
||||
|
||||
// owner: @bswartz
|
||||
// alpha: v1.18
|
||||
// beta: v1.24
|
||||
@ -43,6 +50,12 @@ const (
|
||||
// Enables usage of any object for volume data source in PVCs
|
||||
AnyVolumeDataSource featuregate.Feature = "AnyVolumeDataSource"
|
||||
|
||||
// owner: @nabokihms
|
||||
// alpha: v1.26
|
||||
//
|
||||
// Enables API to get self subject attributes after authentication.
|
||||
APISelfSubjectReview featuregate.Feature = "APISelfSubjectReview"
|
||||
|
||||
// owner: @tallclair
|
||||
// beta: v1.4
|
||||
AppArmor featuregate.Feature = "AppArmor"
|
||||
@ -53,9 +66,10 @@ const (
|
||||
// Enable nodes to change CPUCFSQuotaPeriod
|
||||
CPUCFSQuotaPeriod featuregate.Feature = "CustomCPUCFSQuotaPeriod"
|
||||
|
||||
// owner: @ConnorDoyle
|
||||
// owner: @ConnorDoyle, @fromanirh (only for GA graduation)
|
||||
// alpha: v1.8
|
||||
// beta: v1.10
|
||||
// GA: v1.26
|
||||
//
|
||||
// Alternative container-level CPU affinity policies.
|
||||
CPUManager featuregate.Feature = "CPUManager"
|
||||
@ -128,6 +142,7 @@ const (
|
||||
// owner: @andyzhangx
|
||||
// alpha: v1.15
|
||||
// beta: v1.21
|
||||
// GA: v1.26
|
||||
//
|
||||
// Enables the Azure File in-tree driver to Azure File Driver migration feature.
|
||||
CSIMigrationAzureFile featuregate.Feature = "CSIMigrationAzureFile"
|
||||
@ -140,13 +155,6 @@ const (
|
||||
// Enables the GCE PD in-tree driver to GCE CSI Driver migration feature.
|
||||
CSIMigrationGCE featuregate.Feature = "CSIMigrationGCE"
|
||||
|
||||
// owner: @adisky
|
||||
// alpha: v1.14
|
||||
// beta: v1.18
|
||||
//
|
||||
// Enables the OpenStack Cinder in-tree driver to OpenStack Cinder CSI Driver migration feature.
|
||||
CSIMigrationOpenStack featuregate.Feature = "CSIMigrationOpenStack"
|
||||
|
||||
// owner: @trierra
|
||||
// alpha: v1.23
|
||||
//
|
||||
@ -161,12 +169,12 @@ const (
|
||||
|
||||
// owner: @divyenpatel
|
||||
// beta: v1.19 (requires: vSphere vCenter/ESXi Version: 7.0u2, HW Version: VM version 15)
|
||||
//
|
||||
// GA: 1.26
|
||||
// Enables the vSphere in-tree driver to vSphere CSI Driver migration feature.
|
||||
CSIMigrationvSphere featuregate.Feature = "CSIMigrationvSphere"
|
||||
|
||||
// owner: @humblec, @zhucan
|
||||
// kep: http://kep.k8s.io/3171
|
||||
// kep: https://kep.k8s.io/3171
|
||||
// alpha: v1.25
|
||||
//
|
||||
// Enables SecretRef field in CSI NodeExpandVolume request.
|
||||
@ -186,20 +194,19 @@ const (
|
||||
// Enables kubelet to detect CSI volume condition and send the event of the abnormal volume to the corresponding pod that is using it.
|
||||
CSIVolumeHealth featuregate.Feature = "CSIVolumeHealth"
|
||||
|
||||
// owner: @enj
|
||||
// beta: v1.22
|
||||
// ga: v1.24
|
||||
//
|
||||
// Allows clients to request a duration for certificates issued via the Kubernetes CSR API.
|
||||
CSRDuration featuregate.Feature = "CSRDuration"
|
||||
|
||||
// owner: @adrianreber
|
||||
// kep: http://kep.k8s.io/2008
|
||||
// kep: https://kep.k8s.io/2008
|
||||
// alpha: v1.25
|
||||
//
|
||||
// Enables container Checkpoint support in the kubelet
|
||||
ContainerCheckpoint featuregate.Feature = "ContainerCheckpoint"
|
||||
|
||||
// owner: @bhcleek @wzshiming
|
||||
// GA: v1.25
|
||||
//
|
||||
// Normalize HttpGet URL and Header passing for lifecycle handlers with probers.
|
||||
ConsistentHTTPGetHandlers featuregate.Feature = "ConsistentHTTPGetHandlers"
|
||||
|
||||
// owner: @jiahuif
|
||||
// alpha: v1.21
|
||||
// beta: v1.22
|
||||
@ -209,7 +216,7 @@ const (
|
||||
ControllerManagerLeaderMigration featuregate.Feature = "ControllerManagerLeaderMigration"
|
||||
|
||||
// owner: @deejross, @soltysh
|
||||
// kep: http://kep.k8s.io/3140
|
||||
// kep: https://kep.k8s.io/3140
|
||||
// alpha: v1.24
|
||||
// beta: v1.25
|
||||
//
|
||||
@ -223,25 +230,19 @@ const (
|
||||
// DaemonSets allow workloads to maintain availability during update per node
|
||||
DaemonSetUpdateSurge featuregate.Feature = "DaemonSetUpdateSurge"
|
||||
|
||||
// owner: @alculquicondor
|
||||
// alpha: v1.19
|
||||
// beta: v1.20
|
||||
// GA: v1.24
|
||||
//
|
||||
// Enables the use of PodTopologySpread scheduling plugin to do default
|
||||
// spreading and disables legacy SelectorSpread plugin.
|
||||
DefaultPodTopologySpread featuregate.Feature = "DefaultPodTopologySpread"
|
||||
|
||||
// owner: @gnufied, @verult
|
||||
// owner: @gnufied, @verult, @bertinatto
|
||||
// alpha: v1.22
|
||||
// beta: v1.23
|
||||
// GA: v1.26
|
||||
// If supported by the CSI driver, delegates the role of applying FSGroup to
|
||||
// the driver by passing FSGroup through the NodeStageVolume and
|
||||
// NodePublishVolume calls.
|
||||
DelegateFSGroupToCSIDriver featuregate.Feature = "DelegateFSGroupToCSIDriver"
|
||||
|
||||
// owner: @jiayingz
|
||||
// owner: @jiayingz, @swatisehgal (for GA graduation)
|
||||
// alpha: v1.8
|
||||
// beta: v1.10
|
||||
// GA: v1.26
|
||||
//
|
||||
// Enables support for Device Plugins
|
||||
DevicePlugins featuregate.Feature = "DevicePlugins"
|
||||
@ -273,16 +274,19 @@ const (
|
||||
// Enables usage of hugepages-<size> in downward API.
|
||||
DownwardAPIHugePages featuregate.Feature = "DownwardAPIHugePages"
|
||||
|
||||
// owner: @mtaufen
|
||||
// alpha: v1.4
|
||||
// beta: v1.11
|
||||
// deprecated: 1.22
|
||||
DynamicKubeletConfig featuregate.Feature = "DynamicKubeletConfig"
|
||||
// owner: @pohly
|
||||
// kep: http://kep.k8s.io/3063
|
||||
// alpha: v1.26
|
||||
//
|
||||
// Enables support for resources with custom parameters and a lifecycle
|
||||
// that is independent of a Pod.
|
||||
DynamicResourceAllocation featuregate.Feature = "DynamicResourceAllocation"
|
||||
|
||||
// owner: @andrewsykim
|
||||
// kep: http://kep.k8s.io/1672
|
||||
// kep: https://kep.k8s.io/1672
|
||||
// alpha: v1.20
|
||||
// beta: v1.22
|
||||
// GA: v1.26
|
||||
//
|
||||
// Enable Terminating condition in Endpoint Slices.
|
||||
EndpointSliceTerminatingCondition featuregate.Feature = "EndpointSliceTerminatingCondition"
|
||||
@ -295,6 +299,14 @@ const (
|
||||
// Allows running an ephemeral container in pod namespaces to troubleshoot a running pod.
|
||||
EphemeralContainers featuregate.Feature = "EphemeralContainers"
|
||||
|
||||
// owner: @harche
|
||||
// kep: http://kep.k8s.io/3386
|
||||
// alpha: v1.25
|
||||
//
|
||||
// Allows using event-driven PLEG (pod lifecycle event generator) through kubelet
|
||||
// which avoids frequent relisting of containers which helps optimize performance.
|
||||
EventedPLEG featuregate.Feature = "EventedPLEG"
|
||||
|
||||
// owner: @andrewsykim @SergeyKanzhelev
|
||||
// GA: v1.20
|
||||
//
|
||||
@ -323,8 +335,9 @@ const (
|
||||
ExpandPersistentVolumes featuregate.Feature = "ExpandPersistentVolumes"
|
||||
|
||||
// owner: @gjkim42
|
||||
// kep: http://kep.k8s.io/2595
|
||||
// kep: https://kep.k8s.io/2595
|
||||
// alpha: v1.22
|
||||
// beta: v1.26
|
||||
//
|
||||
// Enables apiserver and kubelet to allow up to 32 DNSSearchPaths and up to 2048 DNSSearchListChars.
|
||||
ExpandedDNSConfig featuregate.Feature = "ExpandedDNSConfig"
|
||||
@ -338,7 +351,7 @@ const (
|
||||
ExperimentalHostUserNamespaceDefaultingGate featuregate.Feature = "ExperimentalHostUserNamespaceDefaulting"
|
||||
|
||||
// owner: @yuzhiquan, @bowei, @PxyUp, @SergeyKanzhelev
|
||||
// kep: http://kep.k8s.io/2727
|
||||
// kep: https://kep.k8s.io/2727
|
||||
// alpha: v1.23
|
||||
// beta: v1.24
|
||||
//
|
||||
@ -371,7 +384,7 @@ const (
|
||||
HPAScaleToZero featuregate.Feature = "HPAScaleToZero"
|
||||
|
||||
// owner: @deepakkinni @xing-yang
|
||||
// kep: http://kep.k8s.io/2680
|
||||
// kep: https://kep.k8s.io/2680
|
||||
// alpha: v1.23
|
||||
//
|
||||
// Honor Persistent Volume Reclaim Policy when it is "Delete" irrespective of PV-PVC
|
||||
@ -434,24 +447,17 @@ const (
|
||||
// Disables the vSphere in-tree driver.
|
||||
InTreePluginvSphereUnregister featuregate.Feature = "InTreePluginvSphereUnregister"
|
||||
|
||||
// owner: @alculquicondor
|
||||
// alpha: v1.21
|
||||
// beta: v1.22
|
||||
// stable: v1.24
|
||||
//
|
||||
// Allows Job controller to manage Pod completions per completion index.
|
||||
IndexedJob featuregate.Feature = "IndexedJob"
|
||||
|
||||
// owner: @danwinship
|
||||
// kep: http://kep.k8s.io/3178
|
||||
// kep: https://kep.k8s.io/3178
|
||||
// alpha: v1.25
|
||||
//
|
||||
// Causes kubelet to no longer create legacy IPTables rules
|
||||
IPTablesOwnershipCleanup featuregate.Feature = "IPTablesOwnershipCleanup"
|
||||
|
||||
// owner: @mimowo
|
||||
// kep: http://kep.k8s.io/3329
|
||||
// kep: https://kep.k8s.io/3329
|
||||
// alpha: v1.25
|
||||
// beta: v1.26
|
||||
//
|
||||
// Allow users to specify handling of pod failures based on container exit codes
|
||||
// and pod conditions.
|
||||
@ -475,6 +481,7 @@ const (
|
||||
// owner: @alculquicondor
|
||||
// alpha: v1.22
|
||||
// beta: v1.23
|
||||
// stable: v1.26
|
||||
//
|
||||
// Track Job completion without relying on Pod remaining in the cluster
|
||||
// indefinitely. Pod finalizers, in addition to a field in the Job status
|
||||
@ -482,9 +489,10 @@ const (
|
||||
// yet.
|
||||
JobTrackingWithFinalizers featuregate.Feature = "JobTrackingWithFinalizers"
|
||||
|
||||
// owner: @andrewsykim @adisky
|
||||
// owner: @andrewsykim @adisky @ndixita
|
||||
// alpha: v1.20
|
||||
// beta: v1.24
|
||||
// GA: v1.26
|
||||
//
|
||||
// Enable kubelet exec plugins for image pull credentials.
|
||||
KubeletCredentialProviders featuregate.Feature = "KubeletCredentialProviders"
|
||||
@ -511,19 +519,26 @@ const (
|
||||
KubeletPodResourcesGetAllocatable featuregate.Feature = "KubeletPodResourcesGetAllocatable"
|
||||
|
||||
// owner: @sallyom
|
||||
// kep: http://kep.k8s.io/2832
|
||||
// kep: https://kep.k8s.io/2832
|
||||
// alpha: v1.25
|
||||
//
|
||||
// Add support for distributed tracing in the kubelet
|
||||
KubeletTracing featuregate.Feature = "KubeletTracing"
|
||||
|
||||
// owner: @zshihang
|
||||
// kep: http://kep.k8s.io/2800
|
||||
// kep: https://kep.k8s.io/2800
|
||||
// beta: v1.24
|
||||
//
|
||||
// Stop auto-generation of secret-based service account tokens.
|
||||
LegacyServiceAccountTokenNoAutoGeneration featuregate.Feature = "LegacyServiceAccountTokenNoAutoGeneration"
|
||||
|
||||
// owner: @zshihang
|
||||
// kep: http://kep.k8s.io/2800
|
||||
// alpha: v1.25
|
||||
//
|
||||
// Enables tracking of secret-based service account tokens usage.
|
||||
LegacyServiceAccountTokenTracking featuregate.Feature = "LegacyServiceAccountTokenTracking"
|
||||
|
||||
// owner: @jinxu
|
||||
// beta: v1.10
|
||||
// stable: v1.25
|
||||
@ -546,7 +561,7 @@ const (
|
||||
LogarithmicScaleDown featuregate.Feature = "LogarithmicScaleDown"
|
||||
|
||||
// owner: @denkensk
|
||||
// kep: http://kep.k8s.io/3243
|
||||
// kep: https://kep.k8s.io/3243
|
||||
// alpha: v1.25
|
||||
//
|
||||
// Enable MatchLabelKeys in PodTopologySpread.
|
||||
@ -565,37 +580,45 @@ const (
|
||||
MemoryManager featuregate.Feature = "MemoryManager"
|
||||
|
||||
// owner: @xiaoxubeii
|
||||
// kep: http://kep.k8s.io/2570
|
||||
// kep: https://kep.k8s.io/2570
|
||||
// alpha: v1.22
|
||||
//
|
||||
// Enables kubelet to support memory QoS with cgroups v2.
|
||||
MemoryQoS featuregate.Feature = "MemoryQoS"
|
||||
|
||||
// owner: @sanposhiho
|
||||
// kep: http://kep.k8s.io/3022
|
||||
// kep: https://kep.k8s.io/3022
|
||||
// alpha: v1.24
|
||||
// beta: v1.25
|
||||
//
|
||||
// Enable MinDomains in Pod Topology Spread.
|
||||
MinDomainsInPodTopologySpread featuregate.Feature = "MinDomainsInPodTopologySpread"
|
||||
|
||||
// owner: @danwinship
|
||||
// kep: http://kep.k8s.io/3453
|
||||
// alpha: v1.26
|
||||
//
|
||||
// Enables new performance-improving code in kube-proxy iptables mode
|
||||
MinimizeIPTablesRestore featuregate.Feature = "MinimizeIPTablesRestore"
|
||||
|
||||
// owner: @janosi @bridgetkromhout
|
||||
// kep: http://kep.k8s.io/1435
|
||||
// kep: https://kep.k8s.io/1435
|
||||
// alpha: v1.20
|
||||
// beta: v1.24
|
||||
// ga: v1.26
|
||||
//
|
||||
// Enables the usage of different protocols in the same Service with type=LoadBalancer
|
||||
MixedProtocolLBService featuregate.Feature = "MixedProtocolLBService"
|
||||
|
||||
// owner: @sarveshr7
|
||||
// kep: http://kep.k8s.io/2593
|
||||
// kep: https://kep.k8s.io/2593
|
||||
// alpha: v1.25
|
||||
//
|
||||
// Enables the MultiCIDR Range allocator.
|
||||
MultiCIDRRangeAllocator featuregate.Feature = "MultiCIDRRangeAllocator"
|
||||
|
||||
// owner: @rikatz
|
||||
// kep: http://kep.k8s.io/2079
|
||||
// kep: https://kep.k8s.io/2079
|
||||
// alpha: v1.21
|
||||
// beta: v1.22
|
||||
// ga: v1.25
|
||||
@ -604,15 +627,16 @@ const (
|
||||
NetworkPolicyEndPort featuregate.Feature = "NetworkPolicyEndPort"
|
||||
|
||||
// owner: @rikatz
|
||||
// kep: http://kep.k8s.io/2943
|
||||
// kep: https://kep.k8s.io/2943
|
||||
// alpha: v1.24
|
||||
//
|
||||
// Enables NetworkPolicy status subresource
|
||||
NetworkPolicyStatus featuregate.Feature = "NetworkPolicyStatus"
|
||||
|
||||
// owner: @xing-yang @sonasingh46
|
||||
// kep: http://kep.k8s.io/2268
|
||||
// kep: https://kep.k8s.io/2268
|
||||
// alpha: v1.24
|
||||
// beta: v1.26
|
||||
//
|
||||
// Allow pods to failover to a different node in case of non graceful node shutdown
|
||||
NodeOutOfServiceVolumeDetach featuregate.Feature = "NodeOutOfServiceVolumeDetach"
|
||||
@ -623,24 +647,15 @@ const (
|
||||
// Permits kubelet to run with swap enabled
|
||||
NodeSwap featuregate.Feature = "NodeSwap"
|
||||
|
||||
// owner: @denkensk
|
||||
// alpha: v1.15
|
||||
// beta: v1.19
|
||||
// ga: v1.24
|
||||
// owner: @mortent, @atiratree, @ravig
|
||||
// kep: http://kep.k8s.io/3018
|
||||
// alpha: v1.26
|
||||
//
|
||||
// Enables NonPreempting option for priorityClass and pod.
|
||||
NonPreemptingPriority featuregate.Feature = "NonPreemptingPriority"
|
||||
|
||||
// owner: @ahg-g
|
||||
// alpha: v1.21
|
||||
// beta: v1.22
|
||||
// GA: v1.24
|
||||
//
|
||||
// Allow specifying NamespaceSelector in PodAffinityTerm.
|
||||
PodAffinityNamespaceSelector featuregate.Feature = "PodAffinityNamespaceSelector"
|
||||
// Enables PDBUnhealthyPodEvictionPolicy for PodDisruptionBudgets
|
||||
PDBUnhealthyPodEvictionPolicy featuregate.Feature = "PDBUnhealthyPodEvictionPolicy"
|
||||
|
||||
// owner: @haircommander
|
||||
// kep: http://kep.k8s.io/2364
|
||||
// kep: https://kep.k8s.io/2364
|
||||
// alpha: v1.23
|
||||
//
|
||||
// Configures the Kubelet to use the CRI to populate pod and container stats, instead of supplimenting with stats from cAdvisor.
|
||||
@ -655,8 +670,9 @@ const (
|
||||
PodDeletionCost featuregate.Feature = "PodDeletionCost"
|
||||
|
||||
// owner: @mimowo
|
||||
// kep: http://kep.k8s.io/3329
|
||||
// kep: https://kep.k8s.io/3329
|
||||
// alpha: v1.25
|
||||
// beta: v1.26
|
||||
//
|
||||
// Enables support for appending a dedicated pod condition indicating that
|
||||
// the pod is being deleted due to a disruption.
|
||||
@ -669,13 +685,12 @@ const (
|
||||
// sandbox creation and network configuration completes successfully
|
||||
PodHasNetworkCondition featuregate.Feature = "PodHasNetworkCondition"
|
||||
|
||||
// owner: @egernst
|
||||
// alpha: v1.16
|
||||
// beta: v1.18
|
||||
// ga: v1.24
|
||||
// owner: @Huang-Wei
|
||||
// kep: https://kep.k8s.io/3521
|
||||
// alpha: v1.26
|
||||
//
|
||||
// Enables PodOverhead, for accounting pod overheads which are specific to a given RuntimeClass
|
||||
PodOverhead featuregate.Feature = "PodOverhead"
|
||||
// Enable users to specify when a Pod is ready for scheduling.
|
||||
PodSchedulingReadiness featuregate.Feature = "PodSchedulingReadiness"
|
||||
|
||||
// owner: @liggitt, @tallclair, sig-auth
|
||||
// alpha: v1.22
|
||||
@ -685,18 +700,6 @@ const (
|
||||
// Enables the PodSecurity admission plugin
|
||||
PodSecurity featuregate.Feature = "PodSecurity"
|
||||
|
||||
// owner: @chendave
|
||||
// alpha: v1.21
|
||||
// beta: v1.22
|
||||
// GA: v1.24
|
||||
//
|
||||
// PreferNominatedNode tells scheduler whether the nominated node will be checked first before looping
|
||||
// all the rest of nodes in the cluster.
|
||||
// Enabling this feature also implies the preemptor pod might not be dispatched to the best candidate in
|
||||
// some corner case, e.g. another node releases enough resources after the nominated node has been set
|
||||
// and hence is the best candidate instead.
|
||||
PreferNominatedNode featuregate.Feature = "PreferNominatedNode"
|
||||
|
||||
// owner: @ehashman
|
||||
// alpha: v1.21
|
||||
// beta: v1.22
|
||||
@ -711,8 +714,9 @@ const (
|
||||
ProcMountType featuregate.Feature = "ProcMountType"
|
||||
|
||||
// owner: @andrewsykim
|
||||
// kep: http://kep.k8s.io/1669
|
||||
// kep: https://kep.k8s.io/1669
|
||||
// alpha: v1.22
|
||||
// beta: v1.26
|
||||
//
|
||||
// Enable kube-proxy to handle terminating ednpoints when externalTrafficPolicy=Local
|
||||
ProxyTerminatingEndpoints featuregate.Feature = "ProxyTerminatingEndpoints"
|
||||
@ -731,14 +735,14 @@ const (
|
||||
ReadWriteOncePod featuregate.Feature = "ReadWriteOncePod"
|
||||
|
||||
// owner: @gnufied
|
||||
// kep: http://kep.k8s.io/1790
|
||||
// kep: https://kep.k8s.io/1790
|
||||
// alpha: v1.23
|
||||
//
|
||||
// Allow users to recover from volume expansion failure
|
||||
RecoverVolumeExpansionFailure featuregate.Feature = "RecoverVolumeExpansionFailure"
|
||||
|
||||
// owner: @RomanBednar
|
||||
// kep: http://kep.k8s.io/3333
|
||||
// kep: https://kep.k8s.io/3333
|
||||
// alpha: v1.25
|
||||
//
|
||||
// Allow assigning StorageClass to unbound PVCs retroactively
|
||||
@ -754,45 +758,31 @@ const (
|
||||
RotateKubeletServerCertificate featuregate.Feature = "RotateKubeletServerCertificate"
|
||||
|
||||
// owner: @saschagrunert
|
||||
// kep: https://kep.k8s.io/2413
|
||||
// alpha: v1.22
|
||||
// beta: v1.25
|
||||
//
|
||||
// Enables the use of `RuntimeDefault` as the default seccomp profile for all workloads.
|
||||
SeccompDefault featuregate.Feature = "SeccompDefault"
|
||||
|
||||
// owner: @maplain @andrewsykim
|
||||
// kep: http://kep.k8s.io/2086
|
||||
// kep: https://kep.k8s.io/2086
|
||||
// alpha: v1.21
|
||||
// beta: v1.22
|
||||
// GA: v1.26
|
||||
//
|
||||
// Enables node-local routing for Service internal traffic
|
||||
ServiceInternalTrafficPolicy featuregate.Feature = "ServiceInternalTrafficPolicy"
|
||||
|
||||
// owner: @aojea
|
||||
// kep: http://kep.k8s.io/3070
|
||||
// kep: https://kep.k8s.io/3070
|
||||
// alpha: v1.24
|
||||
// beta: v1.25
|
||||
// ga: v1.26
|
||||
//
|
||||
// Subdivide the ClusterIP range for dynamic and static IP allocation.
|
||||
ServiceIPStaticSubrange featuregate.Feature = "ServiceIPStaticSubrange"
|
||||
|
||||
// owner: @andrewsykim @uablrek
|
||||
// kep: http://kep.k8s.io/1864
|
||||
// alpha: v1.20
|
||||
// beta: v1.22
|
||||
// ga: v1.24
|
||||
//
|
||||
// Allows control if NodePorts shall be created for services with "type: LoadBalancer" by defining the spec.AllocateLoadBalancerNodePorts field (bool)
|
||||
ServiceLBNodePortControl featuregate.Feature = "ServiceLBNodePortControl"
|
||||
|
||||
// owner: @andrewsykim @XudongLiuHarold
|
||||
// kep: http://kep.k8s.io/1959
|
||||
// alpha: v1.21
|
||||
// beta: v1.22
|
||||
// GA: v1.24
|
||||
//
|
||||
// Enable support multiple Service "type: LoadBalancer" implementations in a cluster by specifying LoadBalancerClass
|
||||
ServiceLoadBalancerClass featuregate.Feature = "ServiceLoadBalancerClass"
|
||||
|
||||
// owner: @derekwaynecarr
|
||||
// alpha: v1.20
|
||||
// beta: v1.22
|
||||
@ -814,16 +804,14 @@ const (
|
||||
// StatefulSetMinReadySeconds allows minReadySeconds to be respected by StatefulSet controller
|
||||
StatefulSetMinReadySeconds featuregate.Feature = "StatefulSetMinReadySeconds"
|
||||
|
||||
// owner: @adtac
|
||||
// alpha: v1.21
|
||||
// beta: v1.22
|
||||
// GA: v1.24
|
||||
// owner: @psch
|
||||
// alpha: v1.26
|
||||
//
|
||||
// Allows jobs to be created in the suspended state.
|
||||
SuspendJob featuregate.Feature = "SuspendJob"
|
||||
// Enables a StatefulSet to start from an arbitrary non zero ordinal
|
||||
StatefulSetStartOrdinal featuregate.Feature = "StatefulSetStartOrdinal"
|
||||
|
||||
// owner: @robscott
|
||||
// kep: http://kep.k8s.io/2433
|
||||
// kep: https://kep.k8s.io/2433
|
||||
// alpha: v1.21
|
||||
// beta: v1.23
|
||||
//
|
||||
@ -837,8 +825,36 @@ const (
|
||||
// Enable resource managers to make NUMA aligned decisions
|
||||
TopologyManager featuregate.Feature = "TopologyManager"
|
||||
|
||||
// owner: @PiotrProkop
|
||||
// kep: https://kep.k8s.io/3545
|
||||
// alpha: v1.26
|
||||
//
|
||||
// Allow fine-tuning of topology manager policies with alpha options.
|
||||
// This feature gate:
|
||||
// - will guard *a group* of topology manager options whose quality level is alpha.
|
||||
// - will never graduate to beta or stable.
|
||||
TopologyManagerPolicyAlphaOptions featuregate.Feature = "TopologyManagerPolicyAlphaOptions"
|
||||
|
||||
// owner: @PiotrProkop
|
||||
// kep: https://kep.k8s.io/3545
|
||||
// alpha: v1.26
|
||||
//
|
||||
// Allow fine-tuning of topology manager policies with beta options.
|
||||
// This feature gate:
|
||||
// - will guard *a group* of topology manager options whose quality level is beta.
|
||||
// - is thus *introduced* as beta
|
||||
// - will never graduate to stable.
|
||||
TopologyManagerPolicyBetaOptions featuregate.Feature = "TopologyManagerPolicyBetaOptions"
|
||||
|
||||
// owner: @PiotrProkop
|
||||
// kep: https://kep.k8s.io/3545
|
||||
// alpha: v1.26
|
||||
//
|
||||
// Allow the usage of options to fine-tune the topology manager policies.
|
||||
TopologyManagerPolicyOptions featuregate.Feature = "TopologyManagerPolicyOptions"
|
||||
|
||||
// owner: @rata, @giuseppe
|
||||
// kep: http://kep.k8s.io/127
|
||||
// kep: https://kep.k8s.io/127
|
||||
// alpha: v1.25
|
||||
//
|
||||
// Enables user namespace support for stateless pods.
|
||||
@ -861,23 +877,32 @@ const (
|
||||
// Allows kube-proxy to run in Overlay mode for Windows
|
||||
WinOverlay featuregate.Feature = "WinOverlay"
|
||||
|
||||
// owner: @marosset
|
||||
// kep: https://kep.k8s.io/3503
|
||||
// alpha: v1.26
|
||||
//
|
||||
// Enables support for joining Windows containers to a hosts' network namespace.
|
||||
WindowsHostNetwork featuregate.Feature = "WindowsHostNetwork"
|
||||
|
||||
// owner: @marosset
|
||||
// alpha: v1.22
|
||||
// beta: v1.23
|
||||
// GA: v1.26
|
||||
//
|
||||
// Enables support for 'HostProcess' containers on Windows nodes.
|
||||
WindowsHostProcessContainers featuregate.Feature = "WindowsHostProcessContainers"
|
||||
|
||||
// owner: @kerthcet
|
||||
// kep: http://kep.k8s.io/3094
|
||||
// kep: https://kep.k8s.io/3094
|
||||
// alpha: v1.25
|
||||
// beta: v1.26
|
||||
//
|
||||
// Allow users to specify whether to take nodeAffinity/nodeTaint into consideration when
|
||||
// calculating pod topology spread skew.
|
||||
NodeInclusionPolicyInPodTopologySpread featuregate.Feature = "NodeInclusionPolicyInPodTopologySpread"
|
||||
|
||||
// owner: @jsafrane
|
||||
// kep: http://kep.k8s.io/1710
|
||||
// kep: https://kep.k8s.io/1710
|
||||
// alpha: v1.25
|
||||
// Speed up container startup by mounting volumes with the correct SELinux label
|
||||
// instead of changing each file on the volumes recursively.
|
||||
@ -896,13 +921,17 @@ func init() {
|
||||
// Entries are separated from each other with blank lines to avoid sweeping gofmt changes
|
||||
// when adding or removing one entry.
|
||||
var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
CrossNamespaceVolumeDataSource: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
AnyVolumeDataSource: {Default: true, PreRelease: featuregate.Beta}, // on by default in 1.24
|
||||
|
||||
APISelfSubjectReview: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
AppArmor: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
CPUManager: {Default: true, PreRelease: featuregate.Beta},
|
||||
CPUManager: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.26
|
||||
|
||||
CPUManagerPolicyAlphaOptions: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
@ -914,21 +943,19 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
CSIMigration: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
|
||||
|
||||
CSIMigrationAWS: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
CSIMigrationAWS: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
|
||||
|
||||
CSIMigrationAzureDisk: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // On by default in 1.23 (requires Azure Disk CSI driver)
|
||||
CSIMigrationAzureDisk: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
|
||||
|
||||
CSIMigrationAzureFile: {Default: true, PreRelease: featuregate.Beta}, // On by default in 1.24 (requires Azure File CSI driver)
|
||||
CSIMigrationAzureFile: {Default: true, PreRelease: featuregate.GA}, // remove in 1.28
|
||||
|
||||
CSIMigrationGCE: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.25 (requires GCE PD CSI Driver)
|
||||
|
||||
CSIMigrationOpenStack: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
|
||||
CSIMigrationGCE: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
|
||||
|
||||
CSIMigrationPortworx: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires Portworx CSI driver)
|
||||
|
||||
CSIMigrationRBD: {Default: false, PreRelease: featuregate.Alpha}, // Off by default (requires RBD CSI driver)
|
||||
|
||||
CSIMigrationvSphere: {Default: true, PreRelease: featuregate.Beta},
|
||||
CSIMigrationvSphere: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
|
||||
|
||||
CSINodeExpandSecret: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
@ -936,21 +963,19 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
CSIVolumeHealth: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
CSRDuration: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
|
||||
|
||||
ContainerCheckpoint: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
ConsistentHTTPGetHandlers: {Default: true, PreRelease: featuregate.GA},
|
||||
|
||||
ControllerManagerLeaderMigration: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
|
||||
|
||||
CronJobTimeZone: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
DaemonSetUpdateSurge: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
|
||||
|
||||
DefaultPodTopologySpread: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
|
||||
DelegateFSGroupToCSIDriver: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
|
||||
|
||||
DelegateFSGroupToCSIDriver: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
DevicePlugins: {Default: true, PreRelease: featuregate.Beta},
|
||||
DevicePlugins: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.26
|
||||
|
||||
DisableAcceleratorUsageMetrics: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
|
||||
@ -960,12 +985,14 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
DownwardAPIHugePages: {Default: true, PreRelease: featuregate.Beta}, // on by default in 1.22
|
||||
|
||||
DynamicKubeletConfig: {Default: false, PreRelease: featuregate.Deprecated}, // feature gate is deprecated in 1.22, kubelet logic is removed in 1.24, api server logic can be removed in 1.26
|
||||
EndpointSliceTerminatingCondition: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in v1.28
|
||||
|
||||
EndpointSliceTerminatingCondition: {Default: true, PreRelease: featuregate.Beta},
|
||||
DynamicResourceAllocation: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
EphemeralContainers: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
|
||||
|
||||
EventedPLEG: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
ExecProbeTimeout: {Default: true, PreRelease: featuregate.GA}, // lock to default and remove after v1.22 based on KEP #1972 update
|
||||
|
||||
ExpandCSIVolumes: {Default: true, PreRelease: featuregate.GA}, // remove in 1.26
|
||||
@ -974,7 +1001,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
ExpandPersistentVolumes: {Default: true, PreRelease: featuregate.GA}, // remove in 1.26
|
||||
|
||||
ExpandedDNSConfig: {Default: false, PreRelease: featuregate.Alpha},
|
||||
ExpandedDNSConfig: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: featuregate.Beta},
|
||||
|
||||
@ -1006,19 +1033,17 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
InTreePluginvSphereUnregister: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
IndexedJob: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
|
||||
|
||||
IPTablesOwnershipCleanup: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
JobPodFailurePolicy: {Default: false, PreRelease: featuregate.Alpha},
|
||||
JobPodFailurePolicy: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
JobMutableNodeSchedulingDirectives: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
JobReadyPods: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
JobTrackingWithFinalizers: {Default: true, PreRelease: featuregate.Beta},
|
||||
JobTrackingWithFinalizers: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
|
||||
|
||||
KubeletCredentialProviders: {Default: true, PreRelease: featuregate.Beta},
|
||||
KubeletCredentialProviders: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
|
||||
|
||||
KubeletInUserNamespace: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
@ -1028,7 +1053,9 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
KubeletTracing: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
LegacyServiceAccountTokenNoAutoGeneration: {Default: true, PreRelease: featuregate.Beta},
|
||||
LegacyServiceAccountTokenNoAutoGeneration: {Default: true, PreRelease: featuregate.GA},
|
||||
|
||||
LegacyServiceAccountTokenTracking: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
LocalStorageCapacityIsolation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
|
||||
|
||||
@ -1046,7 +1073,9 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
MinDomainsInPodTopologySpread: {Default: false, PreRelease: featuregate.Beta},
|
||||
|
||||
MixedProtocolLBService: {Default: true, PreRelease: featuregate.Beta},
|
||||
MinimizeIPTablesRestore: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
MixedProtocolLBService: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
|
||||
|
||||
MultiCIDRRangeAllocator: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
@ -1054,33 +1083,29 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
NetworkPolicyStatus: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
NodeOutOfServiceVolumeDetach: {Default: false, PreRelease: featuregate.Alpha},
|
||||
NodeOutOfServiceVolumeDetach: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
NodeSwap: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
NonPreemptingPriority: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
|
||||
|
||||
PodAffinityNamespaceSelector: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
|
||||
PDBUnhealthyPodEvictionPolicy: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
PodAndContainerStatsFromCRI: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
PodDeletionCost: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
PodDisruptionConditions: {Default: false, PreRelease: featuregate.Alpha},
|
||||
PodDisruptionConditions: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
PodHasNetworkCondition: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
PodOverhead: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
|
||||
PodSchedulingReadiness: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
PodSecurity: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
|
||||
PreferNominatedNode: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
|
||||
|
||||
ProbeTerminationGracePeriod: {Default: true, PreRelease: featuregate.Beta}, // Default to true in beta 1.25
|
||||
|
||||
ProcMountType: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
ProxyTerminatingEndpoints: {Default: false, PreRelease: featuregate.Alpha},
|
||||
ProxyTerminatingEndpoints: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
QOSReserved: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
@ -1088,19 +1113,15 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
RecoverVolumeExpansionFailure: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
RetroactiveDefaultStorageClass: {Default: false, PreRelease: featuregate.Alpha},
|
||||
RetroactiveDefaultStorageClass: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
RotateKubeletServerCertificate: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
SeccompDefault: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
ServiceIPStaticSubrange: {Default: true, PreRelease: featuregate.Beta},
|
||||
ServiceIPStaticSubrange: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
|
||||
|
||||
ServiceInternalTrafficPolicy: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
ServiceLBNodePortControl: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
|
||||
|
||||
ServiceLoadBalancerClass: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
|
||||
ServiceInternalTrafficPolicy: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
|
||||
|
||||
SizeMemoryBackedVolumes: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
@ -1108,12 +1129,18 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
StatefulSetMinReadySeconds: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
|
||||
|
||||
SuspendJob: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
|
||||
StatefulSetStartOrdinal: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
TopologyAwareHints: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
TopologyManager: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
TopologyManagerPolicyAlphaOptions: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
TopologyManagerPolicyBetaOptions: {Default: false, PreRelease: featuregate.Beta},
|
||||
|
||||
TopologyManagerPolicyOptions: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
VolumeCapacityPriority: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
UserNamespacesStatelessPodsSupport: {Default: false, PreRelease: featuregate.Alpha},
|
||||
@ -1122,15 +1149,19 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
WinOverlay: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
WindowsHostProcessContainers: {Default: true, PreRelease: featuregate.Beta},
|
||||
WindowsHostNetwork: {Default: true, PreRelease: featuregate.Alpha},
|
||||
|
||||
NodeInclusionPolicyInPodTopologySpread: {Default: false, PreRelease: featuregate.Alpha},
|
||||
WindowsHostProcessContainers: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
|
||||
|
||||
NodeInclusionPolicyInPodTopologySpread: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
SELinuxMountReadWriteOncePod: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
|
||||
// unintentionally on either side:
|
||||
|
||||
genericfeatures.AggregatedDiscoveryEndpoint: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
genericfeatures.APIListChunking: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
genericfeatures.APIPriorityAndFairness: {Default: true, PreRelease: featuregate.Beta},
|
||||
@ -1139,15 +1170,17 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
genericfeatures.AdvancedAuditing: {Default: true, PreRelease: featuregate.GA},
|
||||
|
||||
genericfeatures.ValidatingAdmissionPolicy: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
genericfeatures.CustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
genericfeatures.DryRun: {Default: true, PreRelease: featuregate.GA},
|
||||
genericfeatures.DryRun: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
|
||||
|
||||
genericfeatures.OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
genericfeatures.OpenAPIV3: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.GA},
|
||||
genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
|
||||
|
||||
genericfeatures.ServerSideFieldValidation: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go
generated
vendored
@ -241,6 +241,10 @@ type KubeletConfiguration struct {
|
||||
// Default: "container"
|
||||
// +optional
|
||||
TopologyManagerScope string
|
||||
// TopologyManagerPolicyOptions is a set of key=value which allows to set extra options
|
||||
// to fine tune the behaviour of the topology manager policies.
|
||||
// Requires both the "TopologyManager" and "TopologyManagerPolicyOptions" feature gates to be enabled.
|
||||
TopologyManagerPolicyOptions map[string]string
|
||||
// Map of QoS resource reservation percentages (memory only for now).
|
||||
// Requires the QOSReserved feature gate to be enabled.
|
||||
QOSReserved map[string]string
|
||||
@ -289,6 +293,7 @@ type KubeletConfiguration struct {
|
||||
// serializeImagePulls when enabled, tells the Kubelet to pull images one at a time.
|
||||
SerializeImagePulls bool
|
||||
// Map of signal names to quantities that defines hard eviction thresholds. For example: {"memory.available": "300Mi"}.
|
||||
// Some default signals are Linux only: nodefs.inodesFree
|
||||
EvictionHard map[string]string
|
||||
// Map of signal names to quantities that defines soft eviction thresholds. For example: {"memory.available": "300Mi"}.
|
||||
EvictionSoft map[string]string
|
||||
@ -432,7 +437,7 @@ type KubeletConfiguration struct {
|
||||
// when setting the cgroupv2 memory.high value to enforce MemoryQoS.
|
||||
// Decreasing this factor will set lower high limit for container cgroups and put heavier reclaim pressure
|
||||
// while increasing will put less reclaim pressure.
|
||||
// See http://kep.k8s.io/2570 for more details.
|
||||
// See https://kep.k8s.io/2570 for more details.
|
||||
// Default: 0.8
|
||||
// +featureGate=MemoryQoS
|
||||
// +optional
|
||||
@ -446,7 +451,7 @@ type KubeletConfiguration struct {
|
||||
// +optional
|
||||
RegisterNode bool
|
||||
// Tracing specifies the versioned configuration for OpenTelemetry tracing clients.
|
||||
// See http://kep.k8s.io/2832 for more details.
|
||||
// See https://kep.k8s.io/2832 for more details.
|
||||
// +featureGate=KubeletTracing
|
||||
// +optional
|
||||
Tracing *tracingapi.TracingConfiguration
|
||||
@ -595,6 +600,7 @@ type CredentialProvider struct {
|
||||
// MUST use the same encoding version as the input. Current supported values are:
|
||||
// - credentialprovider.kubelet.k8s.io/v1alpha1
|
||||
// - credentialprovider.kubelet.k8s.io/v1beta1
|
||||
// - credentialprovider.kubelet.k8s.io/v1
|
||||
APIVersion string
|
||||
|
||||
// Arguments to pass to the command when executing it.
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/zz_generated.deepcopy.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/zz_generated.deepcopy.go
generated
vendored
@ -211,6 +211,13 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) {
|
||||
}
|
||||
}
|
||||
out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod
|
||||
if in.TopologyManagerPolicyOptions != nil {
|
||||
in, out := &in.TopologyManagerPolicyOptions, &out.TopologyManagerPolicyOptions
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.QOSReserved != nil {
|
||||
in, out := &in.QOSReserved, &out.QOSReserved
|
||||
*out = make(map[string]string, len(*in))
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go
generated
vendored
@ -118,6 +118,15 @@ func IsZeroCIDR(cidr string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsLoopBack checks if a given IP address is a loopback address.
|
||||
func IsLoopBack(ip string) bool {
|
||||
netIP := netutils.ParseIPSloppy(ip)
|
||||
if netIP != nil {
|
||||
return netIP.IsLoopback()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsProxyableIP checks if a given IP address is permitted to be proxied
|
||||
func IsProxyableIP(ip string) error {
|
||||
netIP := netutils.ParseIPSloppy(ip)
|
||||
|
75
vendor/k8s.io/kubernetes/pkg/util/slice/slice.go
generated
vendored
Normal file
75
vendor/k8s.io/kubernetes/pkg/util/slice/slice.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package slice provides utility methods for common operations on slices.
|
||||
package slice
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// CopyStrings copies the contents of the specified string slice
|
||||
// into a new slice.
|
||||
func CopyStrings(s []string) []string {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
c := make([]string, len(s))
|
||||
copy(c, s)
|
||||
return c
|
||||
}
|
||||
|
||||
// SortStrings sorts the specified string slice in place. It returns the same
|
||||
// slice that was provided in order to facilitate method chaining.
|
||||
func SortStrings(s []string) []string {
|
||||
sort.Strings(s)
|
||||
return s
|
||||
}
|
||||
|
||||
// ContainsString checks if a given slice of strings contains the provided string.
|
||||
// If a modifier func is provided, it is called with the slice item before the comparation.
|
||||
func ContainsString(slice []string, s string, modifier func(s string) string) bool {
|
||||
for _, item := range slice {
|
||||
if item == s {
|
||||
return true
|
||||
}
|
||||
if modifier != nil && modifier(item) == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RemoveString returns a newly created []string that contains all items from slice that
|
||||
// are not equal to s and modifier(s) in case modifier func is provided.
|
||||
func RemoveString(slice []string, s string, modifier func(s string) string) []string {
|
||||
newSlice := make([]string, 0)
|
||||
for _, item := range slice {
|
||||
if item == s {
|
||||
continue
|
||||
}
|
||||
if modifier != nil && modifier(item) == s {
|
||||
continue
|
||||
}
|
||||
newSlice = append(newSlice, item)
|
||||
}
|
||||
if len(newSlice) == 0 {
|
||||
// Sanitize for unit tests so we don't need to distinguish empty array
|
||||
// and nil.
|
||||
newSlice = nil
|
||||
}
|
||||
return newSlice
|
||||
}
|
69
vendor/k8s.io/kubernetes/pkg/util/taints/taints.go
generated
vendored
69
vendor/k8s.io/kubernetes/pkg/util/taints/taints.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
@ -125,58 +124,6 @@ func ParseTaints(spec []string) ([]v1.Taint, []v1.Taint, error) {
|
||||
return taints, taintsToRemove, nil
|
||||
}
|
||||
|
||||
// ReorganizeTaints returns the updated set of taints, taking into account old taints that were not updated,
|
||||
// old taints that were updated, old taints that were deleted, and new taints.
|
||||
func ReorganizeTaints(node *v1.Node, overwrite bool, taintsToAdd []v1.Taint, taintsToRemove []v1.Taint) (string, []v1.Taint, error) {
|
||||
newTaints := append([]v1.Taint{}, taintsToAdd...)
|
||||
oldTaints := node.Spec.Taints
|
||||
// add taints that already existing but not updated to newTaints
|
||||
added := addTaints(oldTaints, &newTaints)
|
||||
allErrs, deleted := deleteTaints(taintsToRemove, &newTaints)
|
||||
if (added && deleted) || overwrite {
|
||||
return MODIFIED, newTaints, utilerrors.NewAggregate(allErrs)
|
||||
} else if added {
|
||||
return TAINTED, newTaints, utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
return UNTAINTED, newTaints, utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
// deleteTaints deletes the given taints from the node's taintlist.
|
||||
func deleteTaints(taintsToRemove []v1.Taint, newTaints *[]v1.Taint) ([]error, bool) {
|
||||
allErrs := []error{}
|
||||
var removed bool
|
||||
for _, taintToRemove := range taintsToRemove {
|
||||
removed = false // nolint:ineffassign
|
||||
if len(taintToRemove.Effect) > 0 {
|
||||
*newTaints, removed = DeleteTaint(*newTaints, &taintToRemove)
|
||||
} else {
|
||||
*newTaints, removed = DeleteTaintsByKey(*newTaints, taintToRemove.Key)
|
||||
}
|
||||
if !removed {
|
||||
allErrs = append(allErrs, fmt.Errorf("taint %q not found", taintToRemove.ToString()))
|
||||
}
|
||||
}
|
||||
return allErrs, removed
|
||||
}
|
||||
|
||||
// addTaints adds the newTaints list to existing ones and updates the newTaints List.
|
||||
// TODO: This needs a rewrite to take only the new values instead of appended newTaints list to be consistent.
|
||||
func addTaints(oldTaints []v1.Taint, newTaints *[]v1.Taint) bool {
|
||||
for _, oldTaint := range oldTaints {
|
||||
existsInNew := false
|
||||
for _, taint := range *newTaints {
|
||||
if taint.MatchTaint(&oldTaint) {
|
||||
existsInNew = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !existsInNew {
|
||||
*newTaints = append(*newTaints, oldTaint)
|
||||
}
|
||||
}
|
||||
return len(oldTaints) != len(*newTaints)
|
||||
}
|
||||
|
||||
// CheckIfTaintsAlreadyExists checks if the node already has taints that we want to add and returns a string with taint keys.
|
||||
func CheckIfTaintsAlreadyExists(oldTaints []v1.Taint, taints []v1.Taint) string {
|
||||
var existingTaintList = make([]string, 0)
|
||||
@ -285,16 +232,21 @@ func TaintKeyExists(taints []v1.Taint, taintKeyToMatch string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func TaintSetDiff(t1, t2 []v1.Taint) (taintsToAdd []*v1.Taint, taintsToRemove []*v1.Taint) {
|
||||
for _, taint := range t1 {
|
||||
if !TaintExists(t2, &taint) {
|
||||
// TaintSetDiff finds the difference between two taint slices and
|
||||
// returns all new and removed elements of the new slice relative to the old slice.
|
||||
// for example:
|
||||
// input: taintsNew=[a b] taintsOld=[a c]
|
||||
// output: taintsToAdd=[b] taintsToRemove=[c]
|
||||
func TaintSetDiff(taintsNew, taintsOld []v1.Taint) (taintsToAdd []*v1.Taint, taintsToRemove []*v1.Taint) {
|
||||
for _, taint := range taintsNew {
|
||||
if !TaintExists(taintsOld, &taint) {
|
||||
t := taint
|
||||
taintsToAdd = append(taintsToAdd, &t)
|
||||
}
|
||||
}
|
||||
|
||||
for _, taint := range t2 {
|
||||
if !TaintExists(t1, &taint) {
|
||||
for _, taint := range taintsOld {
|
||||
if !TaintExists(taintsNew, &taint) {
|
||||
t := taint
|
||||
taintsToRemove = append(taintsToRemove, &t)
|
||||
}
|
||||
@ -303,6 +255,7 @@ func TaintSetDiff(t1, t2 []v1.Taint) (taintsToAdd []*v1.Taint, taintsToRemove []
|
||||
return
|
||||
}
|
||||
|
||||
// TaintSetFilter filters from the taint slice according to the passed fn function to get the filtered taint slice.
|
||||
func TaintSetFilter(taints []v1.Taint, fn func(*v1.Taint) bool) []v1.Taint {
|
||||
res := []v1.Taint{}
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/metrics_cached.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/metrics_cached.go
generated
vendored
@ -38,7 +38,7 @@ func NewCachedMetrics(provider MetricsProvider) MetricsProvider {
|
||||
return &cachedMetrics{wrapped: provider}
|
||||
}
|
||||
|
||||
// GetMetrics runs the wrapped metrics provider's GetMetrics methd once and
|
||||
// GetMetrics runs the wrapped metrics provider's GetMetrics method once and
|
||||
// caches the result. Will not cache result if there is an error.
|
||||
// See MetricsProvider.GetMetrics
|
||||
func (md *cachedMetrics) GetMetrics() (*Metrics, error) {
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/noop_expandable_plugin.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/noop_expandable_plugin.go
generated
vendored
@ -60,8 +60,8 @@ func (n *noopExpandableVolumePluginInstance) NewUnmounter(name string, podUID ty
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (n *noopExpandableVolumePluginInstance) ConstructVolumeSpec(volumeName, mountPath string) (*Spec, error) {
|
||||
return n.spec, nil
|
||||
func (n *noopExpandableVolumePluginInstance) ConstructVolumeSpec(volumeName, mountPath string) (ReconstructedVolume, error) {
|
||||
return ReconstructedVolume{Spec: n.spec}, nil
|
||||
}
|
||||
|
||||
func (n *noopExpandableVolumePluginInstance) SupportsMountOption() bool {
|
||||
|
37
vendor/k8s.io/kubernetes/pkg/volume/plugins.go
generated
vendored
37
vendor/k8s.io/kubernetes/pkg/volume/plugins.go
generated
vendored
@ -64,16 +64,6 @@ const (
|
||||
ProbeRemove
|
||||
)
|
||||
|
||||
var (
|
||||
deprecatedVolumeProviders = map[string]string{
|
||||
"kubernetes.io/cinder": "The Cinder volume provider is deprecated and will be removed in a future release",
|
||||
"kubernetes.io/storageos": "The StorageOS volume provider is deprecated and will be removed in a future release",
|
||||
"kubernetes.io/quobyte": "The Quobyte volume provider is deprecated and will be removed in a future release",
|
||||
"kubernetes.io/flocker": "The Flocker volume provider is deprecated and will be removed in a future release",
|
||||
"kubernetes.io/glusterfs": "The GlusterFS volume provider is deprecated and will be removed soon after in a subsequent release",
|
||||
}
|
||||
)
|
||||
|
||||
// VolumeOptions contains option information about a volume.
|
||||
type VolumeOptions struct {
|
||||
// The attributes below are required by volume.Provisioner
|
||||
@ -176,7 +166,7 @@ type VolumePlugin interface {
|
||||
// and volumePath. The spec may have incomplete information due to limited
|
||||
// information from input. This function is used by volume manager to reconstruct
|
||||
// volume spec by reading the volume directories from disk
|
||||
ConstructVolumeSpec(volumeName, volumePath string) (*Spec, error)
|
||||
ConstructVolumeSpec(volumeName, volumePath string) (ReconstructedVolume, error)
|
||||
|
||||
// SupportsMountOption returns true if volume plugins supports Mount options
|
||||
// Specifying mount options in a volume plugin that doesn't support
|
||||
@ -580,6 +570,16 @@ type VolumeConfig struct {
|
||||
ProvisioningEnabled bool
|
||||
}
|
||||
|
||||
// ReconstructedVolume contains information about a volume reconstructed by
|
||||
// ConstructVolumeSpec().
|
||||
type ReconstructedVolume struct {
|
||||
// Spec is the volume spec of a mounted volume
|
||||
Spec *Spec
|
||||
// SELinuxMountContext is value of -o context=XYZ mount option.
|
||||
// If empty, no such mount option is used.
|
||||
SELinuxMountContext string
|
||||
}
|
||||
|
||||
// NewSpecFromVolume creates an Spec from an v1.Volume
|
||||
func NewSpecFromVolume(vs *v1.Volume) *Spec {
|
||||
return &Spec{
|
||||
@ -698,8 +698,6 @@ func (pm *VolumePluginMgr) FindPluginBySpec(spec *Spec) (VolumePlugin, error) {
|
||||
return nil, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matchedPluginNames, ","))
|
||||
}
|
||||
|
||||
// Issue warning if the matched provider is deprecated
|
||||
pm.logDeprecation(match.GetPluginName())
|
||||
return match, nil
|
||||
}
|
||||
|
||||
@ -726,22 +724,9 @@ func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) {
|
||||
if match == nil {
|
||||
return nil, fmt.Errorf("no volume plugin matched name: %s", name)
|
||||
}
|
||||
|
||||
// Issue warning if the matched provider is deprecated
|
||||
pm.logDeprecation(match.GetPluginName())
|
||||
return match, nil
|
||||
}
|
||||
|
||||
// logDeprecation logs warning when a deprecated plugin is used.
|
||||
func (pm *VolumePluginMgr) logDeprecation(plugin string) {
|
||||
if detail, ok := deprecatedVolumeProviders[plugin]; ok && !pm.loggedDeprecationWarnings.Has(plugin) {
|
||||
klog.Warningf("WARNING: %s built-in volume provider is now deprecated. %s", plugin, detail)
|
||||
// Make sure the message is logged only once. It has Warning severity
|
||||
// and we don't want to spam the log too much.
|
||||
pm.loggedDeprecationWarnings.Insert(plugin)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if probedPlugin cache update is required.
|
||||
// If it is, initialize all probed plugins and replace the cache with them.
|
||||
func (pm *VolumePluginMgr) refreshProbedPlugins() {
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/util/device_util_linux.go
generated
vendored
@ -52,7 +52,7 @@ func (handler *deviceHandler) FindMultipathDeviceForDevice(device string) string
|
||||
return ""
|
||||
}
|
||||
|
||||
// findDeviceForPath Find the underlaying disk for a linked path such as /dev/disk/by-path/XXXX or /dev/mapper/XXXX
|
||||
// findDeviceForPath Find the underlying disk for a linked path such as /dev/disk/by-path/XXXX or /dev/mapper/XXXX
|
||||
// will return sdX or hdX etc, if /dev/sdX is passed in then sdX will be returned
|
||||
func findDeviceForPath(path string, io IoUtil) (string, error) {
|
||||
devicePath, err := io.EvalSymlinks(path)
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/volume/util/fs/fs_windows.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/volume/util/fs/fs_windows.go
generated
vendored
@ -20,7 +20,6 @@ limitations under the License.
|
||||
package fs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
@ -105,7 +104,7 @@ func diskUsage(currPath string, info os.FileInfo) (int64, error) {
|
||||
|
||||
for _, file := range files {
|
||||
if file.IsDir() {
|
||||
s, err := diskUsage(fmt.Sprintf("%s/%s", currPath, file.Name()), file)
|
||||
s, err := diskUsage(filepath.Join(currPath, file.Name()), file)
|
||||
if err != nil {
|
||||
return size, err
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go
generated
vendored
@ -116,3 +116,9 @@ func (hu *FakeHostUtil) GetSELinuxSupport(pathname string) (bool, error) {
|
||||
func (hu *FakeHostUtil) GetMode(pathname string) (os.FileMode, error) {
|
||||
return 0, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// GetSELinuxMountContext returns value of -o context=XYZ mount option on
|
||||
// given mount point.
|
||||
func (hu *FakeHostUtil) GetSELinuxMountContext(pathname string) (string, error) {
|
||||
return "", errors.New("not implemented")
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil.go
generated
vendored
@ -68,6 +68,9 @@ type HostUtils interface {
|
||||
GetSELinuxSupport(pathname string) (bool, error)
|
||||
// GetMode returns permissions of the path.
|
||||
GetMode(pathname string) (os.FileMode, error)
|
||||
// GetSELinuxMountContext returns value of -o context=XYZ mount option on
|
||||
// given mount point.
|
||||
GetSELinuxMountContext(pathname string) (string, error)
|
||||
}
|
||||
|
||||
// Compile-time check to ensure all HostUtil implementations satisfy
|
||||
|
32
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go
generated
vendored
32
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go
generated
vendored
@ -299,3 +299,35 @@ func GetModeLinux(pathname string) (os.FileMode, error) {
|
||||
}
|
||||
return info.Mode(), nil
|
||||
}
|
||||
|
||||
// GetSELinuxMountContext returns value of -o context=XYZ mount option on
|
||||
// given mount point.
|
||||
func (hu *HostUtil) GetSELinuxMountContext(pathname string) (string, error) {
|
||||
return getSELinuxMountContext(pathname, procMountInfoPath, selinux.GetEnabled)
|
||||
}
|
||||
|
||||
// getSELinux is common implementation of GetSELinuxSupport on Linux.
|
||||
// Using an extra function for unit tests.
|
||||
func getSELinuxMountContext(path string, mountInfoFilename string, selinuxEnabled seLinuxEnabledFunc) (string, error) {
|
||||
// Skip /proc/mounts parsing if SELinux is disabled.
|
||||
if !selinuxEnabled() {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
info, err := findMountInfo(path, mountInfoFilename)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, opt := range info.SuperOptions {
|
||||
if !strings.HasPrefix(opt, "context=") {
|
||||
continue
|
||||
}
|
||||
// Remove context=
|
||||
context := strings.TrimPrefix(opt, "context=")
|
||||
// Remove double quotes
|
||||
context = strings.Trim(context, "\"")
|
||||
return context, nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_unsupported.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_unsupported.go
generated
vendored
@ -101,3 +101,9 @@ func (hu *HostUtil) GetMode(pathname string) (os.FileMode, error) {
|
||||
func getDeviceNameFromMount(mounter mount.Interface, mountPath, pluginMountDir string) (string, error) {
|
||||
return "", errUnsupported
|
||||
}
|
||||
|
||||
// GetSELinuxMountContext returns value of -o context=XYZ mount option on
|
||||
// given mount point.
|
||||
func (hu *HostUtil) GetSELinuxMountContext(pathname string) (string, error) {
|
||||
return "", errUnsupported
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_windows.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_windows.go
generated
vendored
@ -123,3 +123,9 @@ func (hu *HostUtil) GetMode(pathname string) (os.FileMode, error) {
|
||||
}
|
||||
return info.Mode(), nil
|
||||
}
|
||||
|
||||
// GetSELinuxMountContext returns value of -o context=XYZ mount option on
|
||||
// given mount point.
|
||||
func (hu *HostUtil) GetSELinuxMountContext(pathname string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/volume/util/metrics.go
generated
vendored
@ -42,7 +42,8 @@ const (
|
||||
* involves explicitly acknowledging support for the metric across multiple releases, in accordance with
|
||||
* the metric stability policy.
|
||||
*/
|
||||
var storageOperationMetric = metrics.NewHistogramVec(
|
||||
|
||||
var StorageOperationMetric = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Name: "storage_operation_duration_seconds",
|
||||
Help: "Storage operation duration",
|
||||
@ -80,7 +81,7 @@ func init() {
|
||||
func registerMetrics() {
|
||||
// legacyregistry is the internal k8s wrapper around the prometheus
|
||||
// global registry, used specifically for metric stability enforcement
|
||||
legacyregistry.MustRegister(storageOperationMetric)
|
||||
legacyregistry.MustRegister(StorageOperationMetric)
|
||||
legacyregistry.MustRegister(storageOperationEndToEndLatencyMetric)
|
||||
legacyregistry.MustRegister(csiOperationsLatencyMetric)
|
||||
}
|
||||
@ -101,7 +102,7 @@ func OperationCompleteHook(plugin, operationName string) func(types.CompleteFunc
|
||||
if c.Migrated != nil {
|
||||
migrated = *c.Migrated
|
||||
}
|
||||
storageOperationMetric.WithLabelValues(plugin, operationName, status, strconv.FormatBool(migrated)).Observe(timeTaken)
|
||||
StorageOperationMetric.WithLabelValues(plugin, operationName, status, strconv.FormatBool(migrated)).Observe(timeTaken)
|
||||
}
|
||||
return opComplete
|
||||
}
|
||||
|
22
vendor/k8s.io/kubernetes/pkg/volume/util/nested_volumes.go
generated
vendored
22
vendor/k8s.io/kubernetes/pkg/volume/util/nested_volumes.go
generated
vendored
@ -51,8 +51,14 @@ func getNestedMountpoints(name, baseDir string, pod v1.Pod) ([]string, error) {
|
||||
return fmt.Errorf("invalid container mount point %v", myMountPoint)
|
||||
}
|
||||
myMPSlash := myMountPoint + string(os.PathSeparator)
|
||||
// The previously found nested mountpoint (or "" if none found yet)
|
||||
prevNestedMP := ""
|
||||
// The previously found nested mountpoints.
|
||||
// NOTE: We can't simply rely on sort.Strings to have all the mountpoints sorted and
|
||||
// grouped. For example, the following strings are sorted in this exact order:
|
||||
// /dir/nested, /dir/nested-vol, /dir/nested.vol, /dir/nested/double, /dir/nested2
|
||||
// The issue is a bit worse for Windows paths, since the \'s value is higher than /'s:
|
||||
// \dir\nested, \dir\nested-vol, \dir\nested.vol, \dir\nested2, \dir\nested\double
|
||||
// Because of this, we should use a list of previously mounted mountpoints, rather than only one.
|
||||
prevNestedMPs := []string{}
|
||||
// examine each mount point to see if it's nested beneath this volume
|
||||
// (but skip any that are double-nested beneath this volume)
|
||||
// For example, if this volume is mounted as /dir and other volumes are mounted
|
||||
@ -61,11 +67,19 @@ func getNestedMountpoints(name, baseDir string, pod v1.Pod) ([]string, error) {
|
||||
if !strings.HasPrefix(mp, myMPSlash) {
|
||||
continue // skip -- not nested beneath myMountPoint
|
||||
}
|
||||
if prevNestedMP != "" && strings.HasPrefix(mp, prevNestedMP) {
|
||||
|
||||
isNested := false
|
||||
for _, prevNestedMP := range prevNestedMPs {
|
||||
if strings.HasPrefix(mp, prevNestedMP) {
|
||||
isNested = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if isNested {
|
||||
continue // skip -- double nested beneath myMountPoint
|
||||
}
|
||||
// since this mount point is nested, remember it so that we can check that following ones aren't nested beneath this one
|
||||
prevNestedMP = mp + string(os.PathSeparator)
|
||||
prevNestedMPs = append(prevNestedMPs, mp+string(os.PathSeparator))
|
||||
retval = append(retval, mp[len(myMPSlash):])
|
||||
}
|
||||
}
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go
generated
vendored
@ -54,11 +54,6 @@ type resizeProcessStatus struct {
|
||||
processed bool
|
||||
}
|
||||
|
||||
// ClaimToClaimKey return namespace/name string for pvc
|
||||
func ClaimToClaimKey(claim *v1.PersistentVolumeClaim) string {
|
||||
return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name)
|
||||
}
|
||||
|
||||
// UpdatePVSize updates just pv size after cloudprovider resizing is successful
|
||||
func UpdatePVSize(
|
||||
pv *v1.PersistentVolume,
|
||||
|
17
vendor/k8s.io/kubernetes/pkg/volume/util/storageclass.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/volume/util/storageclass.go
generated
vendored
@ -17,9 +17,9 @@ limitations under the License.
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
storagev1listers "k8s.io/client-go/listers/storage/v1"
|
||||
@ -54,10 +54,19 @@ func GetDefaultClass(lister storagev1listers.StorageClassLister) (*storagev1.Sto
|
||||
if len(defaultClasses) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Primary sort by creation timestamp, newest first
|
||||
// Secondary sort by class name, ascending order
|
||||
sort.Slice(defaultClasses, func(i, j int) bool {
|
||||
if defaultClasses[i].CreationTimestamp.UnixNano() == defaultClasses[j].CreationTimestamp.UnixNano() {
|
||||
return defaultClasses[i].Name < defaultClasses[j].Name
|
||||
}
|
||||
return defaultClasses[i].CreationTimestamp.UnixNano() > defaultClasses[j].CreationTimestamp.UnixNano()
|
||||
})
|
||||
if len(defaultClasses) > 1 {
|
||||
klog.V(4).Infof("GetDefaultClass %d defaults found", len(defaultClasses))
|
||||
return nil, errors.NewInternalError(fmt.Errorf("%d default StorageClasses were found", len(defaultClasses)))
|
||||
klog.V(4).Infof("%d default StorageClasses were found, choosing the newest: %s", len(defaultClasses), defaultClasses[0].Name)
|
||||
}
|
||||
|
||||
return defaultClasses[0], nil
|
||||
}
|
||||
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_windows.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_windows.go
generated
vendored
@ -114,7 +114,8 @@ func evalSymlink(path string) (string, error) {
|
||||
}
|
||||
}
|
||||
// This command will give the target path of a given symlink
|
||||
cmd := fmt.Sprintf("(Get-Item -LiteralPath %q).Target", upperpath)
|
||||
// The -Force parameter will allow Get-Item to also evaluate hidden folders, like AppData.
|
||||
cmd := fmt.Sprintf("(Get-Item -Force -LiteralPath %q).Target", upperpath)
|
||||
output, err := exec.Command("powershell", "/c", cmd).CombinedOutput()
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -125,7 +126,7 @@ func evalSymlink(path string) (string, error) {
|
||||
klog.V(4).Infof("Path '%s' has a target %s. Return its original form.", path, linkedPath)
|
||||
return path, nil
|
||||
}
|
||||
// If the target is not an absoluate path, join iit with the current upperpath
|
||||
// If the target is not an absolute path, join iit with the current upperpath
|
||||
if !filepath.IsAbs(linkedPath) {
|
||||
linkedPath = filepath.Join(getUpperPath(upperpath), linkedPath)
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/util/volumepathhandler/volume_path_handler.go
generated
vendored
@ -42,9 +42,9 @@ type BlockVolumePathHandler interface {
|
||||
UnmapDevice(mapPath string, linkName string, bindMount bool) error
|
||||
// RemovePath removes a file or directory on specified map path
|
||||
RemoveMapPath(mapPath string) error
|
||||
// IsSymlinkExist retruns true if specified symbolic link exists
|
||||
// IsSymlinkExist returns true if specified symbolic link exists
|
||||
IsSymlinkExist(mapPath string) (bool, error)
|
||||
// IsDeviceBindMountExist retruns true if specified bind mount exists
|
||||
// IsDeviceBindMountExist returns true if specified bind mount exists
|
||||
IsDeviceBindMountExist(mapPath string) (bool, error)
|
||||
// GetDeviceBindMountRefs searches bind mounts under global map path
|
||||
GetDeviceBindMountRefs(devPath string, mapPath string) ([]string, error)
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/volume.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/volume.go
generated
vendored
@ -189,7 +189,7 @@ type CustomBlockVolumeMapper interface {
|
||||
// MapPodDevice maps the block device to a path and return the path.
|
||||
// Unique device path across kubelet node reboot is required to avoid
|
||||
// unexpected block volume destruction.
|
||||
// If empty string is returned, the path retuned by attacher.Attach() and
|
||||
// If empty string is returned, the path returned by attacher.Attach() and
|
||||
// attacher.WaitForAttach() will be used.
|
||||
MapPodDevice() (publishPath string, err error)
|
||||
|
||||
@ -286,7 +286,7 @@ type DeviceMounter interface {
|
||||
|
||||
type BulkVolumeVerifier interface {
|
||||
// BulkVerifyVolumes checks whether the list of volumes still attached to the
|
||||
// the clusters in the node. It returns a map which maps from the volume spec to the checking result.
|
||||
// clusters in the node. It returns a map which maps from the volume spec to the checking result.
|
||||
// If an error occurs during check - error should be returned and volume on nodes
|
||||
// should be assumed as still attached.
|
||||
BulkVerifyVolumes(volumesByNode map[types.NodeName][]*Spec) (map[types.NodeName]map[*Spec]bool, error)
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
@ -8,6 +8,7 @@ rules:
|
||||
- k8s.io/kubernetes/pkg/api/v1/service
|
||||
- k8s.io/kubernetes/pkg/api/pod
|
||||
- k8s.io/kubernetes/pkg/api/node
|
||||
- k8s.io/kubernetes/pkg/api/persistentvolumeclaim
|
||||
- k8s.io/kubernetes/pkg/apis/apps
|
||||
- k8s.io/kubernetes/pkg/apis/apps/validation
|
||||
- k8s.io/kubernetes/pkg/apis/autoscaling
|
||||
@ -168,7 +169,6 @@ rules:
|
||||
- k8s.io/kubernetes/pkg/proxy/ipvs
|
||||
- k8s.io/kubernetes/pkg/proxy/metaproxier
|
||||
- k8s.io/kubernetes/pkg/proxy/metrics
|
||||
- k8s.io/kubernetes/pkg/proxy/userspace
|
||||
- k8s.io/kubernetes/pkg/proxy/util
|
||||
- k8s.io/kubernetes/pkg/registry/core/service/allocator
|
||||
- k8s.io/kubernetes/pkg/registry/core/service/portallocator
|
||||
|
88
vendor/k8s.io/kubernetes/test/e2e/framework/README.md
generated
vendored
Normal file
88
vendor/k8s.io/kubernetes/test/e2e/framework/README.md
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
# Overview
|
||||
|
||||
The Kubernetes E2E framework simplifies writing Ginkgo tests suites. It's main
|
||||
usage is for these tests suites in the Kubernetes repository itself:
|
||||
- test/e2e: runs as client for a Kubernetes cluster. The e2e.test binary is
|
||||
used for conformance testing.
|
||||
- test/e2e_node: runs on the same node as a kublet instance. Used for testing
|
||||
kubelet.
|
||||
- test/e2e_kubeadm: test suite for kubeadm.
|
||||
|
||||
Usage of the framework outside of Kubernetes is possible, but not encouraged.
|
||||
Downstream users have to be prepared to deal with API changes.
|
||||
|
||||
# Code Organization
|
||||
|
||||
The core framework is the `k8s.io/kubernetes/test/e2e/framework` package. It
|
||||
contains functionality that all E2E suites are expected to need:
|
||||
- connecting to the apiserver
|
||||
- managing per-test namespaces
|
||||
- logging (`Logf`)
|
||||
- failure handling (`Fail`, `Failf`)
|
||||
- writing concise JUnit test results
|
||||
|
||||
It also contains a `TestContext` with settings that can be controlled via
|
||||
command line flags. For historic reasons, this also contains settings for
|
||||
individual tests or packages that are not part of the core framework.
|
||||
|
||||
Optional functionality is placed in sub packages like
|
||||
`test/e2e/framework/pod`. The core framework does not depend on those. Sub
|
||||
packages may depend on the core framework.
|
||||
|
||||
The advantages of splitting the code like this are:
|
||||
- leaner go doc packages by grouping related functions together
|
||||
- not forcing all E2E suites to import all functionality
|
||||
- avoiding import cycles
|
||||
|
||||
# Execution Flow
|
||||
|
||||
When a test suite gets invoked, the top-level `Describe` calls register the
|
||||
callbacks that define individual tests, but does not invoke them yet. After
|
||||
that init phase, command line flags are parsed and the `Describe` callbacks are
|
||||
invoked. Those then define the actual tests for the test suite. Command line
|
||||
flags can be used to influence the test definitions.
|
||||
|
||||
Now `Context/BeforeEach/AfterEach/It` define code that will be called later
|
||||
when executing a specific test. During this setup phase, `f :=
|
||||
framework.NewDefaultFramework("some tests")` creates a `Framework` instance for
|
||||
one or more tests. `NewDefaultFramework` initializes that instance anew for
|
||||
each test with a `BeforeEach` callback. Starting with Kubernetes 1.26, that
|
||||
instance gets cleaned up after all other code for a test has been invoked, so
|
||||
the following code is correct:
|
||||
|
||||
```
|
||||
f := framework.NewDefaultFramework("some tests")
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
# Do something with f.ClientSet.
|
||||
}
|
||||
|
||||
ginkgo.It("test something", func() {
|
||||
# The actual test.
|
||||
})
|
||||
```
|
||||
|
||||
Optional functionality can be injected into each test by adding a callback to
|
||||
`NewFrameworkExtensions` in an init function. `NewDefaultFramework` will invoke
|
||||
those callbacks as if the corresponding code had been added to each test like this:
|
||||
|
||||
```
|
||||
f := framework.NewDefaultFramework("some tests")
|
||||
|
||||
optional.SomeCallback(f)
|
||||
```
|
||||
|
||||
`SomeCallback` then can register additional `BeforeEach` or `AfterEach`
|
||||
callbacks that use the test's `Framework` instance.
|
||||
|
||||
When a test runs, callbacks defined for it with `BeforeEach` and `AfterEach`
|
||||
are called in first-in-first-out order. Since the migration to ginkgo v2 in
|
||||
Kubernetes 1.25, the `AfterEach` callback is called also when there has been a
|
||||
test failure. This can be used to run cleanup code for a test
|
||||
reliably. However,
|
||||
[`ginkgo.DeferCleanup`](https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup)
|
||||
is often a better alternative. Its callbacks are executed in first-in-last-out
|
||||
order.
|
||||
|
||||
`test/e2e/framework/internal/unittests/cleanup/cleanup.go` shows how these
|
||||
different callbacks can be used and in which order they are going to run.
|
78
vendor/k8s.io/kubernetes/test/e2e/framework/cleanup.go
generated
vendored
78
vendor/k8s.io/kubernetes/test/e2e/framework/cleanup.go
generated
vendored
@ -1,78 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// CleanupActionHandle is an integer pointer type for handling cleanup action
|
||||
type CleanupActionHandle *int
|
||||
type cleanupFuncHandle struct {
|
||||
actionHandle CleanupActionHandle
|
||||
actionHook func()
|
||||
}
|
||||
|
||||
var cleanupActionsLock sync.Mutex
|
||||
var cleanupHookList = []cleanupFuncHandle{}
|
||||
|
||||
// AddCleanupAction installs a function that will be called in the event of the
|
||||
// whole test being terminated. This allows arbitrary pieces of the overall
|
||||
// test to hook into SynchronizedAfterSuite().
|
||||
// The hooks are called in last-in-first-out order.
|
||||
func AddCleanupAction(fn func()) CleanupActionHandle {
|
||||
p := CleanupActionHandle(new(int))
|
||||
cleanupActionsLock.Lock()
|
||||
defer cleanupActionsLock.Unlock()
|
||||
c := cleanupFuncHandle{actionHandle: p, actionHook: fn}
|
||||
cleanupHookList = append([]cleanupFuncHandle{c}, cleanupHookList...)
|
||||
return p
|
||||
}
|
||||
|
||||
// RemoveCleanupAction removes a function that was installed by
|
||||
// AddCleanupAction.
|
||||
func RemoveCleanupAction(p CleanupActionHandle) {
|
||||
cleanupActionsLock.Lock()
|
||||
defer cleanupActionsLock.Unlock()
|
||||
for i, item := range cleanupHookList {
|
||||
if item.actionHandle == p {
|
||||
cleanupHookList = append(cleanupHookList[:i], cleanupHookList[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RunCleanupActions runs all functions installed by AddCleanupAction. It does
|
||||
// not remove them (see RemoveCleanupAction) but it does run unlocked, so they
|
||||
// may remove themselves.
|
||||
func RunCleanupActions() {
|
||||
list := []func(){}
|
||||
func() {
|
||||
cleanupActionsLock.Lock()
|
||||
defer cleanupActionsLock.Unlock()
|
||||
for _, p := range cleanupHookList {
|
||||
list = append(list, p.actionHook)
|
||||
}
|
||||
}()
|
||||
// Run unlocked.
|
||||
for _, fn := range list {
|
||||
Logf("Running Cleanup Action: %v", runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name())
|
||||
fn()
|
||||
}
|
||||
}
|
187
vendor/k8s.io/kubernetes/test/e2e/framework/debug/dump.go
generated
vendored
Normal file
187
vendor/k8s.io/kubernetes/test/e2e/framework/debug/dump.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package debug
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
// EventsLister is a func that lists events.
|
||||
type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error)
|
||||
|
||||
// dumpEventsInNamespace dumps events in the given namespace.
|
||||
func dumpEventsInNamespace(eventsLister EventsLister, namespace string) {
|
||||
ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
|
||||
events, err := eventsLister(metav1.ListOptions{}, namespace)
|
||||
framework.ExpectNoError(err, "failed to list events in namespace %q", namespace)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Found %d events.", len(events.Items)))
|
||||
// Sort events by their first timestamp
|
||||
sortedEvents := events.Items
|
||||
if len(sortedEvents) > 1 {
|
||||
sort.Sort(byFirstTimestamp(sortedEvents))
|
||||
}
|
||||
for _, e := range sortedEvents {
|
||||
framework.Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
|
||||
}
|
||||
// Note that we don't wait for any Cleanup to propagate, which means
|
||||
// that if you delete a bunch of pods right before ending your test,
|
||||
// you may or may not see the killing/deletion/Cleanup events.
|
||||
}
|
||||
|
||||
// DumpAllNamespaceInfo dumps events, pods and nodes information in the given namespace.
|
||||
func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
|
||||
dumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) {
|
||||
return c.CoreV1().Events(ns).List(context.TODO(), opts)
|
||||
}, namespace)
|
||||
|
||||
e2epod.DumpAllPodInfoForNamespace(c, namespace, framework.TestContext.ReportDir)
|
||||
|
||||
// If cluster is large, then the following logs are basically useless, because:
|
||||
// 1. it takes tens of minutes or hours to grab all of them
|
||||
// 2. there are so many of them that working with them are mostly impossible
|
||||
// So we dump them only if the cluster is relatively small.
|
||||
maxNodesForDump := framework.TestContext.MaxNodesToGather
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("unable to fetch node list: %v", err)
|
||||
return
|
||||
}
|
||||
if len(nodes.Items) <= maxNodesForDump {
|
||||
dumpAllNodeInfo(c, nodes)
|
||||
} else {
|
||||
framework.Logf("skipping dumping cluster info - cluster too large")
|
||||
}
|
||||
}
|
||||
|
||||
// byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker.
|
||||
type byFirstTimestamp []v1.Event
|
||||
|
||||
func (o byFirstTimestamp) Len() int { return len(o) }
|
||||
func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
func (o byFirstTimestamp) Less(i, j int) bool {
|
||||
if o[i].FirstTimestamp.Equal(&o[j].FirstTimestamp) {
|
||||
return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name
|
||||
}
|
||||
return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp)
|
||||
}
|
||||
|
||||
func dumpAllNodeInfo(c clientset.Interface, nodes *v1.NodeList) {
|
||||
names := make([]string, len(nodes.Items))
|
||||
for ix := range nodes.Items {
|
||||
names[ix] = nodes.Items[ix].Name
|
||||
}
|
||||
DumpNodeDebugInfo(c, names, framework.Logf)
|
||||
}
|
||||
|
||||
// DumpNodeDebugInfo dumps debug information of the given nodes.
|
||||
func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) {
|
||||
for _, n := range nodeNames {
|
||||
logFunc("\nLogging node info for node %v", n)
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), n, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
logFunc("Error getting node info %v", err)
|
||||
}
|
||||
logFunc("Node Info: %v", node)
|
||||
|
||||
logFunc("\nLogging kubelet events for node %v", n)
|
||||
for _, e := range getNodeEvents(c, n) {
|
||||
logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
|
||||
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
|
||||
}
|
||||
logFunc("\nLogging pods the kubelet thinks is on node %v", n)
|
||||
podList, err := getKubeletPods(c, n)
|
||||
if err != nil {
|
||||
logFunc("Unable to retrieve kubelet pods for node %v: %v", n, err)
|
||||
continue
|
||||
}
|
||||
for _, p := range podList.Items {
|
||||
logFunc("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
|
||||
for _, c := range p.Status.InitContainerStatuses {
|
||||
logFunc("\tInit container %v ready: %v, restart count %v",
|
||||
c.Name, c.Ready, c.RestartCount)
|
||||
}
|
||||
for _, c := range p.Status.ContainerStatuses {
|
||||
logFunc("\tContainer %v ready: %v, restart count %v",
|
||||
c.Name, c.Ready, c.RestartCount)
|
||||
}
|
||||
}
|
||||
e2emetrics.HighLatencyKubeletOperations(c, 10*time.Second, n, logFunc)
|
||||
// TODO: Log node resource info
|
||||
}
|
||||
}
|
||||
|
||||
// getKubeletPods retrieves the list of pods on the kubelet.
|
||||
func getKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
|
||||
var client restclient.Result
|
||||
finished := make(chan struct{}, 1)
|
||||
go func() {
|
||||
// call chain tends to hang in some cases when Node is not ready. Add an artificial timeout for this call. #22165
|
||||
client = c.CoreV1().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", node, framework.KubeletPort)).
|
||||
Suffix("pods").
|
||||
Do(context.TODO())
|
||||
|
||||
finished <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-finished:
|
||||
result := &v1.PodList{}
|
||||
if err := client.Into(result); err != nil {
|
||||
return &v1.PodList{}, err
|
||||
}
|
||||
return result, nil
|
||||
case <-time.After(framework.PodGetTimeout):
|
||||
return &v1.PodList{}, fmt.Errorf("Waiting up to %v for getting the list of pods", framework.PodGetTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
// logNodeEvents logs kubelet events from the given node. This includes kubelet
|
||||
// restart and node unhealthy events. Note that listing events like this will mess
|
||||
// with latency metrics, beware of calling it during a test.
|
||||
func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
|
||||
selector := fields.Set{
|
||||
"involvedObject.kind": "Node",
|
||||
"involvedObject.name": nodeName,
|
||||
"involvedObject.namespace": metav1.NamespaceAll,
|
||||
"source": "kubelet",
|
||||
}.AsSelector().String()
|
||||
options := metav1.ListOptions{FieldSelector: selector}
|
||||
events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
framework.Logf("Unexpected error retrieving node events %v", err)
|
||||
return []v1.Event{}
|
||||
}
|
||||
return events.Items
|
||||
}
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package debug
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -27,7 +27,7 @@ import (
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
@ -109,7 +109,7 @@ func (s *LogsSizeDataSummary) PrintHumanReadable() string {
|
||||
|
||||
// PrintJSON returns the summary of log size data with JSON format.
|
||||
func (s *LogsSizeDataSummary) PrintJSON() string {
|
||||
return PrettyPrintJSON(*s)
|
||||
return framework.PrettyPrintJSON(*s)
|
||||
}
|
||||
|
||||
// SummaryKind returns the summary of log size data summary.
|
||||
@ -158,8 +158,8 @@ func (d *LogsSizeData) addNewData(ip, path string, timestamp time.Time, size int
|
||||
// NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed
|
||||
func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
|
||||
nodeAddresses, err := e2essh.NodeSSHHosts(c)
|
||||
ExpectNoError(err)
|
||||
instanceAddress := APIAddress() + ":22"
|
||||
framework.ExpectNoError(err)
|
||||
instanceAddress := framework.APIAddress() + ":22"
|
||||
|
||||
workChannel := make(chan WorkItem, len(nodeAddresses)+1)
|
||||
workers := make([]*LogSizeGatherer, workersNo)
|
||||
@ -256,13 +256,13 @@ func (g *LogSizeGatherer) Work() bool {
|
||||
sshResult, err := e2essh.SSH(
|
||||
fmt.Sprintf("ls -l %v | awk '{print $9, $5}' | tr '\n' ' '", strings.Join(workItem.paths, " ")),
|
||||
workItem.ip,
|
||||
TestContext.Provider,
|
||||
framework.TestContext.Provider,
|
||||
)
|
||||
if err != nil {
|
||||
Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
|
||||
framework.Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
|
||||
// In case of repeated error give up.
|
||||
if workItem.backoffMultiplier >= 128 {
|
||||
Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip)
|
||||
framework.Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip)
|
||||
g.wg.Done()
|
||||
return false
|
||||
}
|
||||
@ -278,7 +278,7 @@ func (g *LogSizeGatherer) Work() bool {
|
||||
path := results[i]
|
||||
size, err := strconv.Atoi(results[i+1])
|
||||
if err != nil {
|
||||
Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
|
||||
framework.Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
|
||||
continue
|
||||
}
|
||||
g.data.addNewData(workItem.ip, path, now, size)
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package debug
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@ -38,7 +38,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeletstatsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
@ -91,7 +91,7 @@ func (s *ResourceUsageSummary) PrintHumanReadable() string {
|
||||
|
||||
// PrintJSON prints resource usage summary in JSON.
|
||||
func (s *ResourceUsageSummary) PrintJSON() string {
|
||||
return PrettyPrintJSON(*s)
|
||||
return framework.PrettyPrintJSON(*s)
|
||||
}
|
||||
|
||||
// SummaryKind returns string of ResourceUsageSummary
|
||||
@ -198,13 +198,13 @@ func (w *resourceGatherWorker) singleProbe() {
|
||||
} else {
|
||||
nodeUsage, err := getOneTimeResourceUsageOnNode(w.c, w.nodeName, w.probeDuration, func() []string { return w.containerIDs })
|
||||
if err != nil {
|
||||
Logf("Error while reading data from %v: %v", w.nodeName, err)
|
||||
framework.Logf("Error while reading data from %v: %v", w.nodeName, err)
|
||||
return
|
||||
}
|
||||
for k, v := range nodeUsage {
|
||||
data[k] = v
|
||||
if w.printVerboseLogs {
|
||||
Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
|
||||
framework.Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -290,13 +290,13 @@ func getOneTimeResourceUsageOnNode(
|
||||
|
||||
// getStatsSummary contacts kubelet for the container information.
|
||||
func getStatsSummary(c clientset.Interface, nodeName string) (*kubeletstatsv1alpha1.Summary, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
data, err := c.CoreV1().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, KubeletPort)).
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, framework.KubeletPort)).
|
||||
Suffix("stats/summary").
|
||||
Do(ctx).Raw()
|
||||
|
||||
@ -322,7 +322,7 @@ func removeUint64Ptr(ptr *uint64) uint64 {
|
||||
func (w *resourceGatherWorker) gather(initialSleep time.Duration) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer w.wg.Done()
|
||||
defer Logf("Closing worker for %v", w.nodeName)
|
||||
defer framework.Logf("Closing worker for %v", w.nodeName)
|
||||
defer func() { w.finished = true }()
|
||||
select {
|
||||
case <-time.After(initialSleep):
|
||||
@ -384,7 +384,7 @@ func nodeHasControlPlanePods(c clientset.Interface, nodeName string) (bool, erro
|
||||
return false, err
|
||||
}
|
||||
if len(podList.Items) < 1 {
|
||||
Logf("Can't find any pods in namespace %s to grab metrics from", metav1.NamespaceSystem)
|
||||
framework.Logf("Can't find any pods in namespace %s to grab metrics from", metav1.NamespaceSystem)
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
if regKubeScheduler.MatchString(pod.Name) || regKubeControllerManager.MatchString(pod.Name) {
|
||||
@ -422,7 +422,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
||||
if pods == nil {
|
||||
pods, err = c.CoreV1().Pods("kube-system").List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error while listing Pods: %v", err)
|
||||
framework.Logf("Error while listing Pods: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -458,7 +458,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
||||
}
|
||||
nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error while listing Nodes: %v", err)
|
||||
framework.Logf("Error while listing Nodes: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -510,7 +510,7 @@ func (g *ContainerResourceGatherer) StartGatheringData() {
|
||||
// specified resource constraints.
|
||||
func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) (*ResourceUsageSummary, error) {
|
||||
close(g.stopCh)
|
||||
Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
|
||||
framework.Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
|
||||
finished := make(chan struct{}, 1)
|
||||
go func() {
|
||||
g.workerWg.Wait()
|
||||
@ -518,7 +518,7 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
|
||||
}()
|
||||
select {
|
||||
case <-finished:
|
||||
Logf("Waitgroup finished.")
|
||||
framework.Logf("Waitgroup finished.")
|
||||
case <-time.After(2 * time.Minute):
|
||||
unfinished := make([]string, 0)
|
||||
for i := range g.workers {
|
||||
@ -526,11 +526,11 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
|
||||
unfinished = append(unfinished, g.workers[i].nodeName)
|
||||
}
|
||||
}
|
||||
Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished)
|
||||
framework.Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished)
|
||||
}
|
||||
|
||||
if len(percentiles) == 0 {
|
||||
Logf("Warning! Empty percentile list for stopAndPrintData.")
|
||||
framework.Logf("Warning! Empty percentile list for stopAndPrintData.")
|
||||
return &ResourceUsageSummary{}, fmt.Errorf("Failed to get any resource usage data")
|
||||
}
|
||||
data := make(map[int]ResourceUsagePerContainer)
|
||||
@ -604,7 +604,7 @@ type kubemarkResourceUsage struct {
|
||||
}
|
||||
|
||||
func getMasterUsageByPrefix(prefix string) (string, error) {
|
||||
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), APIAddress()+":22", TestContext.Provider)
|
||||
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), framework.APIAddress()+":22", framework.TestContext.Provider)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -617,7 +617,7 @@ func getKubemarkMasterComponentsResourceUsage() map[string]*kubemarkResourceUsag
|
||||
// Get kubernetes component resource usage
|
||||
sshResult, err := getMasterUsageByPrefix("kube")
|
||||
if err != nil {
|
||||
Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
|
||||
framework.Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
|
||||
return nil
|
||||
}
|
||||
scanner := bufio.NewScanner(strings.NewReader(sshResult))
|
||||
@ -635,7 +635,7 @@ func getKubemarkMasterComponentsResourceUsage() map[string]*kubemarkResourceUsag
|
||||
// Get etcd resource usage
|
||||
sshResult, err = getMasterUsageByPrefix("bin/etcd")
|
||||
if err != nil {
|
||||
Logf("Error when trying to SSH to master machine. Skipping probe")
|
||||
framework.Logf("Error when trying to SSH to master machine. Skipping probe")
|
||||
return nil
|
||||
}
|
||||
scanner = bufio.NewScanner(strings.NewReader(sshResult))
|
340
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
340
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@ -28,7 +28,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -50,10 +49,6 @@ import (
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -61,6 +56,34 @@ const (
|
||||
DefaultNamespaceDeletionTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
// NewFrameworkExtensions lists functions that get called by
|
||||
// NewFramework after constructing a new framework and after
|
||||
// calling ginkgo.BeforeEach for the framework.
|
||||
//
|
||||
// This can be used by extensions of the core framework to modify
|
||||
// settings in the framework instance or to add additional callbacks
|
||||
// with gingko.BeforeEach/AfterEach/DeferCleanup.
|
||||
//
|
||||
// When a test runs, functions will be invoked in this order:
|
||||
// - BeforeEaches defined by tests before f.NewDefaultFramework
|
||||
// in the order in which they were defined (first-in-first-out)
|
||||
// - f.BeforeEach
|
||||
// - BeforeEaches defined by tests after f.NewDefaultFramework
|
||||
// - It callback
|
||||
// - all AfterEaches in the order in which they were defined
|
||||
// - all DeferCleanups with the order reversed (first-in-last-out)
|
||||
// - f.AfterEach
|
||||
//
|
||||
// Because a test might skip test execution in a BeforeEach that runs
|
||||
// before f.BeforeEach, AfterEach callbacks that depend on the
|
||||
// framework instance must check whether it was initialized. They can
|
||||
// do that by checking f.ClientSet for nil. DeferCleanup callbacks
|
||||
// don't need to do this because they get defined when the test
|
||||
// runs.
|
||||
NewFrameworkExtensions []func(f *Framework)
|
||||
)
|
||||
|
||||
// Framework supports common operations used by e2e tests; it will keep a client & a namespace for you.
|
||||
// Eventual goal is to merge this with integration test framework.
|
||||
type Framework struct {
|
||||
@ -85,32 +108,9 @@ type Framework struct {
|
||||
NamespaceDeletionTimeout time.Duration
|
||||
NamespacePodSecurityEnforceLevel admissionapi.Level // The pod security enforcement level for namespaces to be applied.
|
||||
|
||||
gatherer *ContainerResourceGatherer
|
||||
// Constraints that passed to a check which is executed after data is gathered to
|
||||
// see if 99% of results are within acceptable bounds. It has to be injected in the test,
|
||||
// as expectations vary greatly. Constraints are grouped by the container names.
|
||||
AddonResourceConstraints map[string]ResourceConstraint
|
||||
|
||||
logsSizeWaitGroup sync.WaitGroup
|
||||
logsSizeCloseChannel chan bool
|
||||
logsSizeVerifier *LogsSizeVerifier
|
||||
|
||||
// Flaky operation failures in an e2e test can be captured through this.
|
||||
flakeReport *FlakeReport
|
||||
|
||||
// To make sure that this framework cleans up after itself, no matter what,
|
||||
// we install a Cleanup action before each test and clear it after. If we
|
||||
// should abort, the AfterSuite hook should run all Cleanup actions.
|
||||
cleanupHandle CleanupActionHandle
|
||||
|
||||
// afterEaches is a map of name to function to be called after each test. These are not
|
||||
// cleared. The call order is randomized so that no dependencies can grow between
|
||||
// the various afterEaches
|
||||
afterEaches map[string]AfterEachActionFunc
|
||||
|
||||
// beforeEachStarted indicates that BeforeEach has started
|
||||
beforeEachStarted bool
|
||||
|
||||
// configuration for framework's client
|
||||
Options Options
|
||||
|
||||
@ -118,15 +118,17 @@ type Framework struct {
|
||||
// or stdout if ReportDir is not set once test ends.
|
||||
TestSummaries []TestDataSummary
|
||||
|
||||
// Place to keep ClusterAutoscaler metrics from before test in order to compute delta.
|
||||
clusterAutoscalerMetricsBeforeTest e2emetrics.Collection
|
||||
|
||||
// Timeouts contains the custom timeouts used during the test execution.
|
||||
Timeouts *TimeoutContext
|
||||
|
||||
// DumpAllNamespaceInfo is invoked by the framework to record
|
||||
// information about a namespace after a test failure.
|
||||
DumpAllNamespaceInfo DumpAllNamespaceInfoAction
|
||||
}
|
||||
|
||||
// AfterEachActionFunc is a function that can be called after each test
|
||||
type AfterEachActionFunc func(f *Framework, failed bool)
|
||||
// DumpAllNamespaceInfoAction is called after each failed test for namespaces
|
||||
// created for the test.
|
||||
type DumpAllNamespaceInfoAction func(f *Framework, namespace string)
|
||||
|
||||
// TestDataSummary is an interface for managing test data.
|
||||
type TestDataSummary interface {
|
||||
@ -149,8 +151,10 @@ func NewFrameworkWithCustomTimeouts(baseName string, timeouts *TimeoutContext) *
|
||||
return f
|
||||
}
|
||||
|
||||
// NewDefaultFramework makes a new framework and sets up a BeforeEach/AfterEach for
|
||||
// you (you can write additional before/after each functions).
|
||||
// NewDefaultFramework makes a new framework and sets up a BeforeEach which
|
||||
// initializes the framework instance. It cleans up with a DeferCleanup,
|
||||
// which runs last, so a AfterEach in the test still has a valid framework
|
||||
// instance.
|
||||
func NewDefaultFramework(baseName string) *Framework {
|
||||
options := Options{
|
||||
ClientQPS: 20,
|
||||
@ -162,79 +166,72 @@ func NewDefaultFramework(baseName string) *Framework {
|
||||
// NewFramework creates a test framework.
|
||||
func NewFramework(baseName string, options Options, client clientset.Interface) *Framework {
|
||||
f := &Framework{
|
||||
BaseName: baseName,
|
||||
AddonResourceConstraints: make(map[string]ResourceConstraint),
|
||||
Options: options,
|
||||
ClientSet: client,
|
||||
Timeouts: NewTimeoutContextWithDefaults(),
|
||||
BaseName: baseName,
|
||||
Options: options,
|
||||
ClientSet: client,
|
||||
Timeouts: NewTimeoutContextWithDefaults(),
|
||||
}
|
||||
|
||||
f.AddAfterEach("dumpNamespaceInfo", func(f *Framework, failed bool) {
|
||||
if !failed {
|
||||
return
|
||||
}
|
||||
if !TestContext.DumpLogsOnFailure {
|
||||
return
|
||||
}
|
||||
if !f.SkipNamespaceCreation {
|
||||
for _, ns := range f.namespacesToDelete {
|
||||
DumpAllNamespaceInfo(f.ClientSet, ns.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(f.BeforeEach)
|
||||
ginkgo.AfterEach(f.AfterEach)
|
||||
// The order is important here: if the extension calls ginkgo.BeforeEach
|
||||
// itself, then it can be sure that f.BeforeEach already ran when its
|
||||
// own callback gets invoked.
|
||||
ginkgo.BeforeEach(f.BeforeEach, AnnotatedLocation("set up framework"))
|
||||
for _, extension := range NewFrameworkExtensions {
|
||||
extension(f)
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// BeforeEach gets a client and makes a namespace.
|
||||
func (f *Framework) BeforeEach() {
|
||||
f.beforeEachStarted = true
|
||||
// DeferCleanup, in contrast to AfterEach, triggers execution in
|
||||
// first-in-last-out order. This ensures that the framework instance
|
||||
// remains valid as long as possible.
|
||||
//
|
||||
// In addition, AfterEach will not be called if a test never gets here.
|
||||
ginkgo.DeferCleanup(f.AfterEach, AnnotatedLocation("tear down framework"))
|
||||
|
||||
// The fact that we need this feels like a bug in ginkgo.
|
||||
// https://github.com/onsi/ginkgo/v2/issues/222
|
||||
f.cleanupHandle = AddCleanupAction(f.AfterEach)
|
||||
if f.ClientSet == nil {
|
||||
ginkgo.By("Creating a kubernetes client")
|
||||
config, err := LoadConfig()
|
||||
ExpectNoError(err)
|
||||
// Registered later and thus runs before deleting namespaces.
|
||||
ginkgo.DeferCleanup(f.dumpNamespaceInfo, AnnotatedLocation("dump namespaces"))
|
||||
|
||||
config.QPS = f.Options.ClientQPS
|
||||
config.Burst = f.Options.ClientBurst
|
||||
if f.Options.GroupVersion != nil {
|
||||
config.GroupVersion = f.Options.GroupVersion
|
||||
}
|
||||
if TestContext.KubeAPIContentType != "" {
|
||||
config.ContentType = TestContext.KubeAPIContentType
|
||||
}
|
||||
f.clientConfig = rest.CopyConfig(config)
|
||||
f.ClientSet, err = clientset.NewForConfig(config)
|
||||
ExpectNoError(err)
|
||||
f.DynamicClient, err = dynamic.NewForConfig(config)
|
||||
ExpectNoError(err)
|
||||
ginkgo.By("Creating a kubernetes client")
|
||||
config, err := LoadConfig()
|
||||
ExpectNoError(err)
|
||||
|
||||
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
|
||||
// as they are required when creating a REST client.
|
||||
if config.GroupVersion == nil {
|
||||
config.GroupVersion = &schema.GroupVersion{}
|
||||
}
|
||||
if config.NegotiatedSerializer == nil {
|
||||
config.NegotiatedSerializer = scheme.Codecs
|
||||
}
|
||||
restClient, err := rest.RESTClientFor(config)
|
||||
ExpectNoError(err)
|
||||
discoClient, err := discovery.NewDiscoveryClientForConfig(config)
|
||||
ExpectNoError(err)
|
||||
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient)
|
||||
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient)
|
||||
restMapper.Reset()
|
||||
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
|
||||
f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
|
||||
|
||||
TestContext.CloudConfig.Provider.FrameworkBeforeEach(f)
|
||||
config.QPS = f.Options.ClientQPS
|
||||
config.Burst = f.Options.ClientBurst
|
||||
if f.Options.GroupVersion != nil {
|
||||
config.GroupVersion = f.Options.GroupVersion
|
||||
}
|
||||
if TestContext.KubeAPIContentType != "" {
|
||||
config.ContentType = TestContext.KubeAPIContentType
|
||||
}
|
||||
f.clientConfig = rest.CopyConfig(config)
|
||||
f.ClientSet, err = clientset.NewForConfig(config)
|
||||
ExpectNoError(err)
|
||||
f.DynamicClient, err = dynamic.NewForConfig(config)
|
||||
ExpectNoError(err)
|
||||
|
||||
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
|
||||
// as they are required when creating a REST client.
|
||||
if config.GroupVersion == nil {
|
||||
config.GroupVersion = &schema.GroupVersion{}
|
||||
}
|
||||
if config.NegotiatedSerializer == nil {
|
||||
config.NegotiatedSerializer = scheme.Codecs
|
||||
}
|
||||
restClient, err := rest.RESTClientFor(config)
|
||||
ExpectNoError(err)
|
||||
discoClient, err := discovery.NewDiscoveryClientForConfig(config)
|
||||
ExpectNoError(err)
|
||||
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient)
|
||||
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient)
|
||||
restMapper.Reset()
|
||||
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
|
||||
f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
|
||||
|
||||
TestContext.CloudConfig.Provider.FrameworkBeforeEach(f)
|
||||
|
||||
if !f.SkipNamespaceCreation {
|
||||
ginkgo.By(fmt.Sprintf("Building a namespace api object, basename %s", f.BaseName))
|
||||
@ -261,60 +258,23 @@ func (f *Framework) BeforeEach() {
|
||||
f.UniqueName = fmt.Sprintf("%s-%08x", f.BaseName, rand.Int31())
|
||||
}
|
||||
|
||||
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" {
|
||||
var err error
|
||||
var nodeMode NodesSet
|
||||
switch TestContext.GatherKubeSystemResourceUsageData {
|
||||
case "master":
|
||||
nodeMode = MasterNodes
|
||||
case "masteranddns":
|
||||
nodeMode = MasterAndDNSNodes
|
||||
default:
|
||||
nodeMode = AllNodes
|
||||
}
|
||||
f.flakeReport = NewFlakeReport()
|
||||
}
|
||||
|
||||
f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{
|
||||
InKubemark: ProviderIs("kubemark"),
|
||||
Nodes: nodeMode,
|
||||
ResourceDataGatheringPeriod: 60 * time.Second,
|
||||
ProbeDuration: 15 * time.Second,
|
||||
PrintVerboseLogs: false,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
Logf("Error while creating NewResourceUsageGatherer: %v", err)
|
||||
} else {
|
||||
go f.gatherer.StartGatheringData()
|
||||
}
|
||||
func (f *Framework) dumpNamespaceInfo() {
|
||||
if !ginkgo.CurrentSpecReport().Failed() {
|
||||
return
|
||||
}
|
||||
|
||||
if TestContext.GatherLogsSizes {
|
||||
f.logsSizeWaitGroup = sync.WaitGroup{}
|
||||
f.logsSizeWaitGroup.Add(1)
|
||||
f.logsSizeCloseChannel = make(chan bool)
|
||||
f.logsSizeVerifier = NewLogsVerifier(f.ClientSet, f.logsSizeCloseChannel)
|
||||
go func() {
|
||||
f.logsSizeVerifier.Run()
|
||||
f.logsSizeWaitGroup.Done()
|
||||
}()
|
||||
if !TestContext.DumpLogsOnFailure {
|
||||
return
|
||||
}
|
||||
|
||||
gatherMetricsAfterTest := TestContext.GatherMetricsAfterTest == "true" || TestContext.GatherMetricsAfterTest == "master"
|
||||
if gatherMetricsAfterTest && TestContext.IncludeClusterAutoscalerMetrics {
|
||||
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics, false)
|
||||
if err != nil {
|
||||
Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
|
||||
} else {
|
||||
f.clusterAutoscalerMetricsBeforeTest, err = grabber.Grab()
|
||||
if err != nil {
|
||||
Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
|
||||
} else {
|
||||
Logf("Gathered ClusterAutoscaler metrics before test")
|
||||
ginkgo.By("dump namespace information after failure", func() {
|
||||
if !f.SkipNamespaceCreation {
|
||||
for _, ns := range f.namespacesToDelete {
|
||||
f.DumpAllNamespaceInfo(f, ns.Name)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
f.flakeReport = NewFlakeReport()
|
||||
})
|
||||
}
|
||||
|
||||
// printSummaries prints summaries of tests.
|
||||
@ -354,29 +314,8 @@ func printSummaries(summaries []TestDataSummary, testBaseName string) {
|
||||
}
|
||||
}
|
||||
|
||||
// AddAfterEach is a way to add a function to be called after every test. The execution order is intentionally random
|
||||
// to avoid growing dependencies. If you register the same name twice, it is a coding error and will panic.
|
||||
func (f *Framework) AddAfterEach(name string, fn AfterEachActionFunc) {
|
||||
if _, ok := f.afterEaches[name]; ok {
|
||||
panic(fmt.Sprintf("%q is already registered", name))
|
||||
}
|
||||
|
||||
if f.afterEaches == nil {
|
||||
f.afterEaches = map[string]AfterEachActionFunc{}
|
||||
}
|
||||
f.afterEaches[name] = fn
|
||||
}
|
||||
|
||||
// AfterEach deletes the namespace, after reading its events.
|
||||
func (f *Framework) AfterEach() {
|
||||
// If BeforeEach never started AfterEach should be skipped.
|
||||
// Currently some tests under e2e/storage have this condition.
|
||||
if !f.beforeEachStarted {
|
||||
return
|
||||
}
|
||||
|
||||
RemoveCleanupAction(f.cleanupHandle)
|
||||
|
||||
// This should not happen. Given ClientSet is a public field a test must have updated it!
|
||||
// Error out early before any API calls during cleanup.
|
||||
if f.ClientSet == nil {
|
||||
@ -398,8 +337,8 @@ func (f *Framework) AfterEach() {
|
||||
nsDeletionErrors[ns.Name] = err
|
||||
|
||||
// Dump namespace if we are unable to delete the namespace and the dump was not already performed.
|
||||
if !ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure {
|
||||
DumpAllNamespaceInfo(f.ClientSet, ns.Name)
|
||||
if !ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure && f.DumpAllNamespaceInfo != nil {
|
||||
f.DumpAllNamespaceInfo(f, ns.Name)
|
||||
}
|
||||
} else {
|
||||
Logf("Namespace %v was already deleted", ns.Name)
|
||||
@ -414,7 +353,9 @@ func (f *Framework) AfterEach() {
|
||||
}
|
||||
}
|
||||
|
||||
// Paranoia-- prevent reuse!
|
||||
// Unsetting this is relevant for a following test that uses
|
||||
// the same instance because it might not reach f.BeforeEach
|
||||
// when some other BeforeEach skips the test first.
|
||||
f.Namespace = nil
|
||||
f.clientConfig = nil
|
||||
f.ClientSet = nil
|
||||
@ -430,42 +371,6 @@ func (f *Framework) AfterEach() {
|
||||
}
|
||||
}()
|
||||
|
||||
// run all aftereach functions in random order to ensure no dependencies grow
|
||||
for _, afterEachFn := range f.afterEaches {
|
||||
afterEachFn(f, ginkgo.CurrentSpecReport().Failed())
|
||||
}
|
||||
|
||||
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
|
||||
ginkgo.By("Collecting resource usage data")
|
||||
summary, resourceViolationError := f.gatherer.StopAndSummarize([]int{90, 99, 100}, f.AddonResourceConstraints)
|
||||
defer ExpectNoError(resourceViolationError)
|
||||
f.TestSummaries = append(f.TestSummaries, summary)
|
||||
}
|
||||
|
||||
if TestContext.GatherLogsSizes {
|
||||
ginkgo.By("Gathering log sizes data")
|
||||
close(f.logsSizeCloseChannel)
|
||||
f.logsSizeWaitGroup.Wait()
|
||||
f.TestSummaries = append(f.TestSummaries, f.logsSizeVerifier.GetSummary())
|
||||
}
|
||||
|
||||
if TestContext.GatherMetricsAfterTest != "false" {
|
||||
ginkgo.By("Gathering metrics")
|
||||
// Grab apiserver, scheduler, controller-manager metrics and (optionally) nodes' kubelet metrics.
|
||||
grabMetricsFromKubelets := TestContext.GatherMetricsAfterTest != "master" && !ProviderIs("kubemark")
|
||||
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics, false)
|
||||
if err != nil {
|
||||
Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
|
||||
} else {
|
||||
received, err := grabber.Grab()
|
||||
if err != nil {
|
||||
Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
|
||||
}
|
||||
(*e2emetrics.ComponentCollection)(&received).ComputeClusterAutoscalerMetricsDelta(f.clusterAutoscalerMetricsBeforeTest)
|
||||
f.TestSummaries = append(f.TestSummaries, (*e2emetrics.ComponentCollection)(&received))
|
||||
}
|
||||
}
|
||||
|
||||
TestContext.CloudConfig.Provider.FrameworkAfterEach(f)
|
||||
|
||||
// Report any flakes that were observed in the e2e test and reset.
|
||||
@ -475,13 +380,6 @@ func (f *Framework) AfterEach() {
|
||||
}
|
||||
|
||||
printSummaries(f.TestSummaries, f.BaseName)
|
||||
|
||||
// Check whether all nodes are ready after the test.
|
||||
// This is explicitly done at the very end of the test, to avoid
|
||||
// e.g. not removing namespace in case of this failure.
|
||||
if err := AllNodesReady(f.ClientSet, 3*time.Minute); err != nil {
|
||||
Failf("All nodes should be ready after test, %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteNamespace can be used to delete a namespace. Additionally it can be used to
|
||||
@ -510,8 +408,8 @@ func (f *Framework) DeleteNamespace(name string) {
|
||||
}
|
||||
}()
|
||||
// if current test failed then we should dump namespace information
|
||||
if !f.SkipNamespaceCreation && ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure {
|
||||
DumpAllNamespaceInfo(f.ClientSet, name)
|
||||
if !f.SkipNamespaceCreation && ginkgo.CurrentSpecReport().Failed() && TestContext.DumpLogsOnFailure && f.DumpAllNamespaceInfo != nil {
|
||||
f.DumpAllNamespaceInfo(f, name)
|
||||
}
|
||||
|
||||
}
|
||||
@ -574,20 +472,6 @@ func (f *Framework) ClientConfig() *rest.Config {
|
||||
return ret
|
||||
}
|
||||
|
||||
// TestContainerOutput runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using a substring matcher.
|
||||
func (f *Framework) TestContainerOutput(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
|
||||
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, gomega.ContainSubstring)
|
||||
}
|
||||
|
||||
// TestContainerOutputRegexp runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using a regexp matcher.
|
||||
func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
|
||||
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp)
|
||||
}
|
||||
|
||||
// KubeUser is a struct for managing kubernetes user info.
|
||||
type KubeUser struct {
|
||||
Name string `yaml:"name"`
|
||||
|
33
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go
generated
vendored
Normal file
33
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"path"
|
||||
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
)
|
||||
|
||||
// AnnotatedLocation can be used to provide more informative source code
|
||||
// locations by passing the result as additional parameter to a
|
||||
// BeforeEach/AfterEach/DeferCleanup/It/etc.
|
||||
func AnnotatedLocation(annotation string) types.CodeLocation {
|
||||
codeLocation := types.NewCodeLocation(1)
|
||||
codeLocation.FileName = path.Base(codeLocation.FileName)
|
||||
codeLocation = types.NewCustomCodeLocation(annotation + " | " + codeLocation.String())
|
||||
return codeLocation
|
||||
}
|
106
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/wrapper.go
generated
vendored
106
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/wrapper.go
generated
vendored
@ -1,106 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package ginkgowrapper wraps Ginkgo Fail and Skip functions to panic
|
||||
// with structured data instead of a constant string.
|
||||
package ginkgowrapper
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
)
|
||||
|
||||
// FailurePanic is the value that will be panicked from Fail.
|
||||
type FailurePanic struct {
|
||||
Message string // The failure message passed to Fail
|
||||
Filename string // The filename that is the source of the failure
|
||||
Line int // The line number of the filename that is the source of the failure
|
||||
FullStackTrace string // A full stack trace starting at the source of the failure
|
||||
}
|
||||
|
||||
const ginkgoFailurePanic = `
|
||||
Your test failed.
|
||||
Ginkgo panics to prevent subsequent assertions from running.
|
||||
Normally Ginkgo rescues this panic so you shouldn't see it.
|
||||
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
|
||||
To circumvent this, you should call
|
||||
defer GinkgoRecover()
|
||||
at the top of the goroutine that caused this panic.
|
||||
`
|
||||
|
||||
// String makes FailurePanic look like the old Ginkgo panic when printed.
|
||||
func (FailurePanic) String() string { return ginkgoFailurePanic }
|
||||
|
||||
// Fail wraps ginkgo.Fail so that it panics with more useful
|
||||
// information about the failure. This function will panic with a
|
||||
// FailurePanic.
|
||||
func Fail(message string, callerSkip ...int) {
|
||||
skip := 1
|
||||
if len(callerSkip) > 0 {
|
||||
skip += callerSkip[0]
|
||||
}
|
||||
|
||||
_, file, line, _ := runtime.Caller(skip)
|
||||
fp := FailurePanic{
|
||||
Message: message,
|
||||
Filename: file,
|
||||
Line: line,
|
||||
FullStackTrace: pruneStack(skip),
|
||||
}
|
||||
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
panic(fp)
|
||||
}
|
||||
}()
|
||||
|
||||
ginkgo.Fail(message, skip)
|
||||
}
|
||||
|
||||
// ginkgo adds a lot of test running infrastructure to the stack, so
|
||||
// we filter those out
|
||||
var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo/v2`)
|
||||
|
||||
func pruneStack(skip int) string {
|
||||
skip += 2 // one for pruneStack and one for debug.Stack
|
||||
stack := debug.Stack()
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(stack))
|
||||
var prunedStack []string
|
||||
|
||||
// skip the top of the stack
|
||||
for i := 0; i < 2*skip+1; i++ {
|
||||
scanner.Scan()
|
||||
}
|
||||
|
||||
for scanner.Scan() {
|
||||
if stackSkipPattern.Match(scanner.Bytes()) {
|
||||
scanner.Scan() // these come in pairs
|
||||
} else {
|
||||
prunedStack = append(prunedStack, scanner.Text())
|
||||
scanner.Scan() // these come in pairs
|
||||
prunedStack = append(prunedStack, scanner.Text())
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(prunedStack, "\n")
|
||||
}
|
191
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/builder.go
generated
vendored
Normal file
191
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/builder.go
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubectl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
uexec "k8s.io/utils/exec"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// KubectlBuilder is used to build, customize and execute a kubectl Command.
|
||||
// Add more functions to customize the builder as needed.
|
||||
type KubectlBuilder struct {
|
||||
cmd *exec.Cmd
|
||||
timeout <-chan time.Time
|
||||
}
|
||||
|
||||
// NewKubectlCommand returns a KubectlBuilder for running kubectl.
|
||||
func NewKubectlCommand(namespace string, args ...string) *KubectlBuilder {
|
||||
b := new(KubectlBuilder)
|
||||
tk := NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, namespace)
|
||||
b.cmd = tk.KubectlCmd(args...)
|
||||
return b
|
||||
}
|
||||
|
||||
// WithEnv sets the given environment and returns itself.
|
||||
func (b *KubectlBuilder) WithEnv(env []string) *KubectlBuilder {
|
||||
b.cmd.Env = env
|
||||
return b
|
||||
}
|
||||
|
||||
// WithTimeout sets the given timeout and returns itself.
|
||||
func (b *KubectlBuilder) WithTimeout(t <-chan time.Time) *KubectlBuilder {
|
||||
b.timeout = t
|
||||
return b
|
||||
}
|
||||
|
||||
// WithStdinData sets the given data to stdin and returns itself.
|
||||
func (b KubectlBuilder) WithStdinData(data string) *KubectlBuilder {
|
||||
b.cmd.Stdin = strings.NewReader(data)
|
||||
return &b
|
||||
}
|
||||
|
||||
// WithStdinReader sets the given reader and returns itself.
|
||||
func (b KubectlBuilder) WithStdinReader(reader io.Reader) *KubectlBuilder {
|
||||
b.cmd.Stdin = reader
|
||||
return &b
|
||||
}
|
||||
|
||||
// ExecOrDie runs the kubectl executable or dies if error occurs.
|
||||
func (b KubectlBuilder) ExecOrDie(namespace string) string {
|
||||
str, err := b.Exec()
|
||||
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
|
||||
// Note that we're still dying after retrying so that we can get visibility to triage it further.
|
||||
if isTimeout(err) {
|
||||
framework.Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.")
|
||||
time.Sleep(2 * time.Second)
|
||||
retryStr, retryErr := RunKubectl(namespace, "version")
|
||||
framework.Logf("stdout: %q", retryStr)
|
||||
framework.Logf("err: %v", retryErr)
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
return str
|
||||
}
|
||||
|
||||
func isTimeout(err error) bool {
|
||||
switch err := err.(type) {
|
||||
case *url.Error:
|
||||
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
|
||||
return true
|
||||
}
|
||||
case net.Error:
|
||||
if err.Timeout() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Exec runs the kubectl executable.
|
||||
func (b KubectlBuilder) Exec() (string, error) {
|
||||
stdout, _, err := b.ExecWithFullOutput()
|
||||
return stdout, err
|
||||
}
|
||||
|
||||
// ExecWithFullOutput runs the kubectl executable, and returns the stdout and stderr.
|
||||
func (b KubectlBuilder) ExecWithFullOutput() (string, string, error) {
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd := b.cmd
|
||||
cmd.Stdout, cmd.Stderr = &stdout, &stderr
|
||||
|
||||
framework.Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately
|
||||
if err := cmd.Start(); err != nil {
|
||||
return "", "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v", cmd, cmd.Stdout, cmd.Stderr, err)
|
||||
}
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
errCh <- cmd.Wait()
|
||||
}()
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
var rc = 127
|
||||
if ee, ok := err.(*exec.ExitError); ok {
|
||||
rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus())
|
||||
framework.Logf("rc: %d", rc)
|
||||
}
|
||||
return stdout.String(), stderr.String(), uexec.CodeExitError{
|
||||
Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v", cmd, cmd.Stdout, cmd.Stderr, err),
|
||||
Code: rc,
|
||||
}
|
||||
}
|
||||
case <-b.timeout:
|
||||
b.cmd.Process.Kill()
|
||||
return "", "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v", cmd, cmd.Stdout, cmd.Stderr)
|
||||
}
|
||||
framework.Logf("stderr: %q", stderr.String())
|
||||
framework.Logf("stdout: %q", stdout.String())
|
||||
return stdout.String(), stderr.String(), nil
|
||||
}
|
||||
|
||||
// RunKubectlOrDie is a convenience wrapper over kubectlBuilder
|
||||
func RunKubectlOrDie(namespace string, args ...string) string {
|
||||
return NewKubectlCommand(namespace, args...).ExecOrDie(namespace)
|
||||
}
|
||||
|
||||
// RunKubectl is a convenience wrapper over kubectlBuilder
|
||||
func RunKubectl(namespace string, args ...string) (string, error) {
|
||||
return NewKubectlCommand(namespace, args...).Exec()
|
||||
}
|
||||
|
||||
// RunKubectlWithFullOutput is a convenience wrapper over kubectlBuilder
|
||||
// It will also return the command's stderr.
|
||||
func RunKubectlWithFullOutput(namespace string, args ...string) (string, string, error) {
|
||||
return NewKubectlCommand(namespace, args...).ExecWithFullOutput()
|
||||
}
|
||||
|
||||
// RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin
|
||||
func RunKubectlOrDieInput(namespace string, data string, args ...string) string {
|
||||
return NewKubectlCommand(namespace, args...).WithStdinData(data).ExecOrDie(namespace)
|
||||
}
|
||||
|
||||
// RunKubectlInput is a convenience wrapper over kubectlBuilder that takes input to stdin
|
||||
func RunKubectlInput(namespace string, data string, args ...string) (string, error) {
|
||||
return NewKubectlCommand(namespace, args...).WithStdinData(data).Exec()
|
||||
}
|
||||
|
||||
// RunKubemciWithKubeconfig is a convenience wrapper over RunKubemciCmd
|
||||
func RunKubemciWithKubeconfig(args ...string) (string, error) {
|
||||
if framework.TestContext.KubeConfig != "" {
|
||||
args = append(args, "--"+clientcmd.RecommendedConfigPathFlag+"="+framework.TestContext.KubeConfig)
|
||||
}
|
||||
return RunKubemciCmd(args...)
|
||||
}
|
||||
|
||||
// RunKubemciCmd is a convenience wrapper over kubectlBuilder to run kubemci.
|
||||
// It assumes that kubemci exists in PATH.
|
||||
func RunKubemciCmd(args ...string) (string, error) {
|
||||
// kubemci is assumed to be in PATH.
|
||||
kubemci := "kubemci"
|
||||
b := new(KubectlBuilder)
|
||||
args = append(args, "--gcp-project="+framework.TestContext.CloudConfig.ProjectID)
|
||||
|
||||
b.cmd = exec.Command(kubemci, args...)
|
||||
return b.Exec()
|
||||
}
|
16
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/kubectl_utils.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/kubectl_utils.go
generated
vendored
@ -29,7 +29,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
@ -109,7 +109,7 @@ func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm stri
|
||||
logFunc("Running kubectl logs on non-ready containers in %v", ns)
|
||||
for _, pod := range podList.Items {
|
||||
if res, err := testutils.PodRunningReady(&pod); !res || err != nil {
|
||||
kubectlLogPod(c, pod, "", e2elog.Logf)
|
||||
kubectlLogPod(c, pod, "", framework.Logf)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -144,7 +144,7 @@ func (tk *TestKubeconfig) WriteFileViaContainer(podName, containerName string, p
|
||||
command := fmt.Sprintf("echo '%s' > '%s'; sync", contents, path)
|
||||
stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "/bin/sh", "-c", command)
|
||||
if err != nil {
|
||||
e2elog.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
framework.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -155,7 +155,7 @@ func (tk *TestKubeconfig) ReadFileViaContainer(podName, containerName string, pa
|
||||
|
||||
stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "cat", path)
|
||||
if err != nil {
|
||||
e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
framework.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return string(stdout), err
|
||||
}
|
||||
@ -163,19 +163,19 @@ func (tk *TestKubeconfig) ReadFileViaContainer(podName, containerName string, pa
|
||||
func (tk *TestKubeconfig) kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
||||
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
|
||||
if numRetries > 0 {
|
||||
e2elog.Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
|
||||
framework.Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
|
||||
}
|
||||
|
||||
stdOutBytes, stdErrBytes, err := tk.kubectlExec(namespace, podName, containerName, args...)
|
||||
if err != nil {
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") {
|
||||
// Retry on "i/o timeout" errors
|
||||
e2elog.Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
framework.Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
continue
|
||||
}
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") {
|
||||
// Retry on "container not found" errors
|
||||
e2elog.Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
framework.Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
@ -200,7 +200,7 @@ func (tk *TestKubeconfig) kubectlExec(namespace string, podName, containerName s
|
||||
cmd := tk.KubectlCmd(cmdArgs...)
|
||||
cmd.Stdout, cmd.Stderr = &stdout, &stderr
|
||||
|
||||
e2elog.Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
|
||||
framework.Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
|
||||
err := cmd.Run()
|
||||
return stdout.Bytes(), stderr.Bytes(), err
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/test/e2e/framework/log.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e/framework/log.go
generated
vendored
@ -24,9 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||
)
|
||||
|
||||
func nowStamp() string {
|
||||
@ -42,13 +40,13 @@ func Logf(format string, args ...interface{}) {
|
||||
log("INFO", format, args...)
|
||||
}
|
||||
|
||||
// Failf logs the fail info, including a stack trace starts at 2 levels above its caller
|
||||
// (for example, for call chain f -> g -> Failf("foo", ...) error would be logged for "f").
|
||||
// Failf logs the fail info, including a stack trace starts with its direct caller
|
||||
// (for example, for call chain f -> g -> Failf("foo", ...) error would be logged for "g").
|
||||
func Failf(format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
skip := 2
|
||||
skip := 1
|
||||
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
|
||||
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
|
||||
ginkgo.Fail(nowStamp()+": "+msg, skip)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
@ -60,7 +58,7 @@ func Fail(msg string, callerSkip ...int) {
|
||||
skip += callerSkip[0]
|
||||
}
|
||||
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
|
||||
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip)
|
||||
ginkgo.Fail(nowStamp()+": "+msg, skip)
|
||||
}
|
||||
|
||||
var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/v2/`)
|
||||
@ -79,7 +77,7 @@ var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/v2/`)
|
||||
func PrunedStack(skip int) []byte {
|
||||
fullStackTrace := debug.Stack()
|
||||
stack := bytes.Split(fullStackTrace, []byte("\n"))
|
||||
// Ensure that the even entries are the method names and the
|
||||
// Ensure that the even entries are the method names and
|
||||
// the odd entries the source code information.
|
||||
if len(stack) > 0 && bytes.HasPrefix(stack[0], []byte("goroutine ")) {
|
||||
// Ignore "goroutine 29 [running]:" line.
|
||||
|
54
vendor/k8s.io/kubernetes/test/e2e/framework/log/logger.go
generated
vendored
54
vendor/k8s.io/kubernetes/test/e2e/framework/log/logger.go
generated
vendored
@ -1,54 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package log will be removed after switching to use core framework log.
|
||||
// Do not make further changes here!
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||
)
|
||||
|
||||
func nowStamp() string {
|
||||
return time.Now().Format(time.StampMilli)
|
||||
}
|
||||
|
||||
func log(level string, format string, args ...interface{}) {
|
||||
fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
|
||||
}
|
||||
|
||||
// Logf logs the info.
|
||||
func Logf(format string, args ...interface{}) {
|
||||
log("INFO", format, args...)
|
||||
}
|
||||
|
||||
// Failf logs the fail info.
|
||||
func Failf(format string, args ...interface{}) {
|
||||
FailfWithOffset(1, format, args...)
|
||||
}
|
||||
|
||||
// FailfWithOffset calls "Fail" and logs the error at "offset" levels above its caller
|
||||
// (for example, for call chain f -> g -> FailfWithOffset(1, ...) error would be logged for "f").
|
||||
func FailfWithOffset(offset int, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
log("FAIL", msg)
|
||||
e2eginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
|
||||
}
|
6
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/e2e_metrics.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/e2e_metrics.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/component-base/metrics/testutil"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -94,12 +94,12 @@ func (m *ComponentCollection) PrintHumanReadable() string {
|
||||
func PrettyPrintJSON(metrics interface{}) string {
|
||||
output := &bytes.Buffer{}
|
||||
if err := json.NewEncoder(output).Encode(metrics); err != nil {
|
||||
e2elog.Logf("Error building encoder: %v", err)
|
||||
framework.Logf("Error building encoder: %v", err)
|
||||
return ""
|
||||
}
|
||||
formatted := &bytes.Buffer{}
|
||||
if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil {
|
||||
e2elog.Logf("Error indenting: %v", err)
|
||||
framework.Logf("Error indenting: %v", err)
|
||||
return ""
|
||||
}
|
||||
return string(formatted.Bytes())
|
||||
|
73
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/grab.go
generated
vendored
Normal file
73
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/grab.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
func GrabBeforeEach(f *framework.Framework) (result *Collection) {
|
||||
gatherMetricsAfterTest := framework.TestContext.GatherMetricsAfterTest == "true" || framework.TestContext.GatherMetricsAfterTest == "master"
|
||||
if !gatherMetricsAfterTest || !framework.TestContext.IncludeClusterAutoscalerMetrics {
|
||||
return nil
|
||||
}
|
||||
|
||||
ginkgo.By("Gathering metrics before test", func() {
|
||||
grabber, err := NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), !framework.ProviderIs("kubemark"), false, false, false, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
|
||||
return
|
||||
}
|
||||
metrics, err := grabber.Grab()
|
||||
if err != nil {
|
||||
framework.Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
|
||||
return
|
||||
}
|
||||
framework.Logf("Gathered ClusterAutoscaler metrics before test")
|
||||
result = &metrics
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func GrabAfterEach(f *framework.Framework, before *Collection) {
|
||||
if framework.TestContext.GatherMetricsAfterTest == "false" {
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("Gathering metrics after test", func() {
|
||||
// Grab apiserver, scheduler, controller-manager metrics and (optionally) nodes' kubelet metrics.
|
||||
grabMetricsFromKubelets := framework.TestContext.GatherMetricsAfterTest != "master" && !framework.ProviderIs("kubemark")
|
||||
grabber, err := NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, f.ClientConfig(), grabMetricsFromKubelets, true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
|
||||
return
|
||||
}
|
||||
received, err := grabber.Grab()
|
||||
if err != nil {
|
||||
framework.Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
|
||||
return
|
||||
}
|
||||
if before == nil {
|
||||
before = &Collection{}
|
||||
}
|
||||
(*ComponentCollection)(&received).ComputeClusterAutoscalerMetricsDelta(*before)
|
||||
f.TestSummaries = append(f.TestSummaries, (*ComponentCollection)(&received))
|
||||
})
|
||||
}
|
1
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/interesting_metrics.go
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/interesting_metrics.go
generated
vendored
@ -46,6 +46,7 @@ var interestingKubeletMetrics = []string{
|
||||
"kubelet_docker_operations_errors_total",
|
||||
"kubelet_docker_operations_duration_seconds",
|
||||
"kubelet_pod_start_duration_seconds",
|
||||
"kubelet_pod_start_sli_duration_seconds",
|
||||
"kubelet_pod_worker_duration_seconds",
|
||||
"kubelet_pod_worker_start_duration_seconds",
|
||||
}
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go
generated
vendored
@ -29,7 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/component-base/metrics/testutil"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -44,6 +44,8 @@ const (
|
||||
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
|
||||
podStartDurationKey = "pod_start_duration_seconds"
|
||||
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
|
||||
PodStartSLIDurationKey = "pod_start_sli_duration_seconds"
|
||||
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
|
||||
cgroupManagerOperationsKey = "cgroup_manager_duration_seconds"
|
||||
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
|
||||
podWorkerStartDurationKey = "pod_worker_start_duration_seconds"
|
||||
@ -175,6 +177,7 @@ func GetDefaultKubeletLatencyMetrics(ms KubeletMetrics) KubeletLatencyMetrics {
|
||||
podWorkerDurationKey,
|
||||
podWorkerStartDurationKey,
|
||||
podStartDurationKey,
|
||||
PodStartSLIDurationKey,
|
||||
cgroupManagerOperationsKey,
|
||||
dockerOperationsLatencyKey,
|
||||
podWorkerStartDurationKey,
|
||||
@ -226,7 +229,7 @@ func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration
|
||||
for _, m := range latencyMetrics {
|
||||
if m.Latency > threshold {
|
||||
badMetrics = append(badMetrics, m)
|
||||
e2elog.Logf("%+v", m)
|
||||
framework.Logf("%+v", m)
|
||||
}
|
||||
}
|
||||
return badMetrics, nil
|
||||
|
165
vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go
generated
vendored
Normal file
165
vendor/k8s.io/kubernetes/test/e2e/framework/node/helper.go
generated
vendored
Normal file
@ -0,0 +1,165 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
// Minimal number of nodes for the cluster to be considered large.
|
||||
largeClusterThreshold = 100
|
||||
)
|
||||
|
||||
// WaitForAllNodesSchedulable waits up to timeout for all
|
||||
// (but TestContext.AllowedNotReadyNodes) to become schedulable.
|
||||
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
|
||||
if framework.TestContext.AllowedNotReadyNodes == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
framework.Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, framework.TestContext.AllowedNotReadyNodes)
|
||||
return wait.PollImmediate(
|
||||
30*time.Second,
|
||||
timeout,
|
||||
CheckReadyForTests(c, framework.TestContext.NonblockingTaints, framework.TestContext.AllowedNotReadyNodes, largeClusterThreshold),
|
||||
)
|
||||
}
|
||||
|
||||
// AddOrUpdateLabelOnNode adds the given label key and value to the given node or updates value.
|
||||
func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) {
|
||||
framework.ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
|
||||
}
|
||||
|
||||
// ExpectNodeHasLabel expects that the given node has the given label pair.
|
||||
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
|
||||
ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue)
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(node.Labels[labelKey], labelValue)
|
||||
}
|
||||
|
||||
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
|
||||
// won't fail if target label doesn't exist or has been removed.
|
||||
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) {
|
||||
ginkgo.By("removing the label " + labelKey + " off the node " + nodeName)
|
||||
framework.ExpectNoError(testutils.RemoveLabelOffNode(c, nodeName, []string{labelKey}))
|
||||
|
||||
ginkgo.By("verifying the node doesn't have the label " + labelKey)
|
||||
framework.ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey}))
|
||||
}
|
||||
|
||||
// ExpectNodeHasTaint expects that the node has the given taint.
|
||||
func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) {
|
||||
ginkgo.By("verifying the node has the taint " + taint.ToString())
|
||||
if has, err := NodeHasTaint(c, nodeName, taint); !has {
|
||||
framework.ExpectNoError(err)
|
||||
framework.Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// NodeHasTaint returns true if the node has the given taint, else returns false.
|
||||
func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
nodeTaints := node.Spec.Taints
|
||||
|
||||
if len(nodeTaints) == 0 || !taintExists(nodeTaints, taint) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// AllNodesReady checks whether all registered nodes are ready. Setting -1 on
|
||||
// framework.TestContext.AllowedNotReadyNodes will bypass the post test node readiness check.
|
||||
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
|
||||
// and figure out how to do it in a configurable way, as we can't expect all setups to run
|
||||
// default test add-ons.
|
||||
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
|
||||
if err := allNodesReady(c, timeout); err != nil {
|
||||
return fmt.Errorf("checking for ready nodes: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func allNodesReady(c clientset.Interface, timeout time.Duration) error {
|
||||
if framework.TestContext.AllowedNotReadyNodes == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
framework.Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, framework.TestContext.AllowedNotReadyNodes)
|
||||
|
||||
var notReady []*v1.Node
|
||||
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
|
||||
notReady = nil
|
||||
// It should be OK to list unschedulable Nodes here.
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
if !IsConditionSetAsExpected(node, v1.NodeReady, true) {
|
||||
notReady = append(notReady, node)
|
||||
}
|
||||
}
|
||||
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
|
||||
// to make it possible e.g. for incorrect deployment of some small percentage
|
||||
// of nodes (which we allow in cluster validation). Some nodes that are not
|
||||
// provisioned correctly at startup will never become ready (e.g. when something
|
||||
// won't install correctly), so we can't expect them to be ready at any point.
|
||||
return len(notReady) <= framework.TestContext.AllowedNotReadyNodes, nil
|
||||
})
|
||||
|
||||
if err != nil && err != wait.ErrWaitTimeout {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(notReady) > framework.TestContext.AllowedNotReadyNodes {
|
||||
msg := ""
|
||||
for _, node := range notReady {
|
||||
msg = fmt.Sprintf("%s, %s", msg, node.Name)
|
||||
}
|
||||
return fmt.Errorf("Not ready nodes: %#v", msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// taintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise.
|
||||
func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
|
||||
for _, taint := range taints {
|
||||
if taint.MatchTaint(taintToFind) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
93
vendor/k8s.io/kubernetes/test/e2e/framework/node/node_killer.go
generated
vendored
Normal file
93
vendor/k8s.io/kubernetes/test/e2e/framework/node/node_killer.go
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
// NodeKiller is a utility to simulate node failures.
|
||||
type NodeKiller struct {
|
||||
config framework.NodeKillerConfig
|
||||
client clientset.Interface
|
||||
provider string
|
||||
}
|
||||
|
||||
// NewNodeKiller creates new NodeKiller.
|
||||
func NewNodeKiller(config framework.NodeKillerConfig, client clientset.Interface, provider string) *NodeKiller {
|
||||
config.NodeKillerStopCh = make(chan struct{})
|
||||
return &NodeKiller{config, client, provider}
|
||||
}
|
||||
|
||||
// Run starts NodeKiller until stopCh is closed.
|
||||
func (k *NodeKiller) Run(stopCh <-chan struct{}) {
|
||||
// wait.JitterUntil starts work immediately, so wait first.
|
||||
time.Sleep(wait.Jitter(k.config.Interval, k.config.JitterFactor))
|
||||
wait.JitterUntil(func() {
|
||||
nodes := k.pickNodes()
|
||||
k.kill(nodes)
|
||||
}, k.config.Interval, k.config.JitterFactor, true, stopCh)
|
||||
}
|
||||
|
||||
func (k *NodeKiller) pickNodes() []v1.Node {
|
||||
nodes, err := GetReadySchedulableNodes(k.client)
|
||||
framework.ExpectNoError(err)
|
||||
numNodes := int(k.config.FailureRatio * float64(len(nodes.Items)))
|
||||
|
||||
nodes, err = GetBoundedReadySchedulableNodes(k.client, numNodes)
|
||||
framework.ExpectNoError(err)
|
||||
return nodes.Items
|
||||
}
|
||||
|
||||
func (k *NodeKiller) kill(nodes []v1.Node) {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(nodes))
|
||||
for _, node := range nodes {
|
||||
node := node
|
||||
go func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
framework.Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)
|
||||
err := e2essh.IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node)
|
||||
if err != nil {
|
||||
framework.Logf("ERROR while stopping node %q: %v", node.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(k.config.SimulatedDowntime)
|
||||
|
||||
framework.Logf("Rebooting %q to repair the node", node.Name)
|
||||
err = e2essh.IssueSSHCommand("sudo reboot", k.provider, &node)
|
||||
if err != nil {
|
||||
framework.Logf("ERROR while rebooting node %q: %v", node.Name, err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
30
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
30
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
@ -40,7 +40,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientretry "k8s.io/client-go/util/retry"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
netutil "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
@ -128,7 +128,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
|
||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
|
||||
}
|
||||
if !silent {
|
||||
e2elog.Logf(msg)
|
||||
framework.Logf(msg)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -137,7 +137,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
|
||||
return true
|
||||
}
|
||||
if !silent {
|
||||
e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
|
||||
framework.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
|
||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
|
||||
}
|
||||
return false
|
||||
@ -146,7 +146,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
|
||||
return true
|
||||
}
|
||||
if !silent {
|
||||
e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
|
||||
framework.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
|
||||
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
|
||||
}
|
||||
return false
|
||||
@ -154,7 +154,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
|
||||
|
||||
}
|
||||
if !silent {
|
||||
e2elog.Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
|
||||
framework.Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -196,7 +196,7 @@ func Filter(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
|
||||
func TotalRegistered(c clientset.Interface) (int, error) {
|
||||
nodes, err := waitListSchedulableNodes(c)
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to list nodes: %v", err)
|
||||
framework.Logf("Failed to list nodes: %v", err)
|
||||
return 0, err
|
||||
}
|
||||
return len(nodes.Items), nil
|
||||
@ -206,7 +206,7 @@ func TotalRegistered(c clientset.Interface) (int, error) {
|
||||
func TotalReady(c clientset.Interface) (int, error) {
|
||||
nodes, err := waitListSchedulableNodes(c)
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to list nodes: %v", err)
|
||||
framework.Logf("Failed to list nodes: %v", err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@ -220,7 +220,7 @@ func TotalReady(c clientset.Interface) (int, error) {
|
||||
// GetExternalIP returns node external IP concatenated with port 22 for ssh
|
||||
// e.g. 1.2.3.4:22
|
||||
func GetExternalIP(node *v1.Node) (string, error) {
|
||||
e2elog.Logf("Getting external IP address for %s", node.Name)
|
||||
framework.Logf("Getting external IP address for %s", node.Name)
|
||||
host := ""
|
||||
for _, a := range node.Status.Addresses {
|
||||
if a.Type == v1.NodeExternalIP && a.Address != "" {
|
||||
@ -628,7 +628,7 @@ func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName str
|
||||
"app": appName + "-pod",
|
||||
}
|
||||
for i, node := range nodes.Items {
|
||||
e2elog.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
|
||||
framework.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
|
||||
_, err := c.CoreV1().Pods(namespace).Create(context.TODO(), &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf(appName+"-pod-%v", i),
|
||||
@ -884,16 +884,6 @@ func verifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Tai
|
||||
// TODO use wrapper methods in expect.go after removing core e2e dependency on node
|
||||
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
|
||||
if taintExists(nodeUpdated.Spec.Taints, taint) {
|
||||
e2elog.Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
|
||||
framework.Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// taintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise.
|
||||
func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
|
||||
for _, taint := range taints {
|
||||
if taint.MatchTaint(taintToFind) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
42
vendor/k8s.io/kubernetes/test/e2e/framework/node/ssh.go
generated
vendored
Normal file
42
vendor/k8s.io/kubernetes/test/e2e/framework/node/ssh.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
)
|
||||
|
||||
// WaitForSSHTunnels waits for establishing SSH tunnel to busybox pod.
|
||||
func WaitForSSHTunnels(namespace string) {
|
||||
framework.Logf("Waiting for SSH tunnels to establish")
|
||||
e2ekubectl.RunKubectl(namespace, "run", "ssh-tunnel-test",
|
||||
"--image=busybox",
|
||||
"--restart=Never",
|
||||
"--command", "--",
|
||||
"echo", "Hello")
|
||||
defer e2ekubectl.RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test")
|
||||
|
||||
// allow up to a minute for new ssh tunnels to establish
|
||||
wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
|
||||
_, err := e2ekubectl.RunKubectl(namespace, "logs", "ssh-tunnel-test")
|
||||
return err == nil, nil
|
||||
})
|
||||
}
|
24
vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go
generated
vendored
24
vendor/k8s.io/kubernetes/test/e2e/framework/node/wait.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const sleepTime = 20 * time.Second
|
||||
@ -47,7 +47,7 @@ func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) e
|
||||
|
||||
// WaitForTotalHealthy checks whether all registered nodes are ready and all required Pods are running on them.
|
||||
func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
|
||||
e2elog.Logf("Waiting up to %v for all nodes to be ready", timeout)
|
||||
framework.Logf("Waiting up to %v for all nodes to be ready", timeout)
|
||||
|
||||
var notReady []v1.Node
|
||||
var missingPodsPerNode map[string][]string
|
||||
@ -115,11 +115,11 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
|
||||
// is ConditionTrue; if it's false, it ensures the node condition is in any state
|
||||
// other than ConditionTrue (e.g. not true or unknown).
|
||||
func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
|
||||
e2elog.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
|
||||
framework.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Couldn't get node %s", name)
|
||||
framework.Logf("Couldn't get node %s", name)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -127,7 +127,7 @@ func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.Node
|
||||
return true
|
||||
}
|
||||
}
|
||||
e2elog.Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
|
||||
framework.Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
|
||||
return false
|
||||
}
|
||||
|
||||
@ -149,7 +149,7 @@ func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.No
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(sleepTime) {
|
||||
nodes, err := waitListSchedulableNodes(c)
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to list nodes: %v", err)
|
||||
framework.Logf("Failed to list nodes: %v", err)
|
||||
continue
|
||||
}
|
||||
numNodes := len(nodes.Items)
|
||||
@ -163,10 +163,10 @@ func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.No
|
||||
numReady := len(nodes.Items)
|
||||
|
||||
if numNodes == size && numReady == size {
|
||||
e2elog.Logf("Cluster has reached the desired number of ready nodes %d", size)
|
||||
framework.Logf("Cluster has reached the desired number of ready nodes %d", size)
|
||||
return nodes.Items, nil
|
||||
}
|
||||
e2elog.Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady)
|
||||
framework.Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady)
|
||||
}
|
||||
return nil, fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size)
|
||||
}
|
||||
@ -215,7 +215,7 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
|
||||
allNodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
|
||||
if err != nil {
|
||||
var terminalListNodesErr error
|
||||
e2elog.Logf("Unexpected error listing nodes: %v", err)
|
||||
framework.Logf("Unexpected error listing nodes: %v", err)
|
||||
if attempt >= 3 {
|
||||
terminalListNodesErr = err
|
||||
}
|
||||
@ -236,9 +236,9 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
|
||||
if len(nodesNotReadyYet) > 0 {
|
||||
// In large clusters, log them only every 10th pass.
|
||||
if len(nodesNotReadyYet) < largeClusterThreshold || attempt%10 == 0 {
|
||||
e2elog.Logf("Unschedulable nodes= %v, maximum value for starting tests= %v", len(nodesNotReadyYet), allowedNotReadyNodes)
|
||||
framework.Logf("Unschedulable nodes= %v, maximum value for starting tests= %v", len(nodesNotReadyYet), allowedNotReadyNodes)
|
||||
for _, node := range nodesNotReadyYet {
|
||||
e2elog.Logf(" -> Node %s [[[ Ready=%t, Network(available)=%t, Taints=%v, NonblockingTaints=%v ]]]",
|
||||
framework.Logf(" -> Node %s [[[ Ready=%t, Network(available)=%t, Taints=%v, NonblockingTaints=%v ]]]",
|
||||
node.Name,
|
||||
IsConditionSetAsExpectedSilent(&node, v1.NodeReady, true),
|
||||
IsConditionSetAsExpectedSilent(&node, v1.NodeNetworkUnavailable, false),
|
||||
@ -250,7 +250,7 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
|
||||
if len(nodesNotReadyYet) > allowedNotReadyNodes {
|
||||
ready := len(allNodes.Items) - len(nodesNotReadyYet)
|
||||
remaining := len(nodesNotReadyYet) - allowedNotReadyNodes
|
||||
e2elog.Logf("==== node wait: %v out of %v nodes are ready, max notReady allowed %v. Need %v more before starting.", ready, len(allNodes.Items), allowedNotReadyNodes, remaining)
|
||||
framework.Logf("==== node wait: %v out of %v nodes are ready, max notReady allowed %v. Need %v more before starting.", ready, len(allNodes.Items), allowedNotReadyNodes, remaining)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
161
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
161
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
@ -16,56 +16,6 @@ limitations under the License.
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
const etcdImage = "3.5.5-0"
|
||||
|
||||
// EtcdUpgrade upgrades etcd on GCE.
|
||||
func EtcdUpgrade(targetStorage, targetVersion string) error {
|
||||
switch TestContext.Provider {
|
||||
case "gce":
|
||||
return etcdUpgradeGCE(targetStorage, targetVersion)
|
||||
default:
|
||||
return fmt.Errorf("EtcdUpgrade() is not implemented for provider %s", TestContext.Provider)
|
||||
}
|
||||
}
|
||||
|
||||
func etcdUpgradeGCE(targetStorage, targetVersion string) error {
|
||||
env := append(
|
||||
os.Environ(),
|
||||
"TEST_ETCD_VERSION="+targetVersion,
|
||||
"STORAGE_BACKEND="+targetStorage,
|
||||
"TEST_ETCD_IMAGE="+etcdImage)
|
||||
|
||||
_, _, err := RunCmdEnv(env, GCEUpgradeScript(), "-l", "-M")
|
||||
return err
|
||||
}
|
||||
|
||||
// LocationParamGKE returns parameter related to location for gcloud command.
|
||||
func LocationParamGKE() string {
|
||||
if TestContext.CloudConfig.MultiMaster {
|
||||
// GKE Regional Clusters are being tested.
|
||||
return fmt.Sprintf("--region=%s", TestContext.CloudConfig.Region)
|
||||
}
|
||||
return fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone)
|
||||
}
|
||||
|
||||
// AppendContainerCommandGroupIfNeeded returns container command group parameter if necessary.
|
||||
func AppendContainerCommandGroupIfNeeded(args []string) []string {
|
||||
if TestContext.CloudConfig.Region != "" {
|
||||
@ -74,114 +24,3 @@ func AppendContainerCommandGroupIfNeeded(args []string) []string {
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
// MasterUpgradeGKE upgrades master node to the specified version on GKE.
|
||||
func MasterUpgradeGKE(namespace string, v string) error {
|
||||
Logf("Upgrading master to %q", v)
|
||||
args := []string{
|
||||
"container",
|
||||
"clusters",
|
||||
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
|
||||
LocationParamGKE(),
|
||||
"upgrade",
|
||||
TestContext.CloudConfig.Cluster,
|
||||
"--master",
|
||||
fmt.Sprintf("--cluster-version=%s", v),
|
||||
"--quiet",
|
||||
}
|
||||
_, _, err := RunCmd("gcloud", AppendContainerCommandGroupIfNeeded(args)...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
WaitForSSHTunnels(namespace)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GCEUpgradeScript returns path of script for upgrading on GCE.
|
||||
func GCEUpgradeScript() string {
|
||||
if len(TestContext.GCEUpgradeScript) == 0 {
|
||||
return path.Join(TestContext.RepoRoot, "cluster/gce/upgrade.sh")
|
||||
}
|
||||
return TestContext.GCEUpgradeScript
|
||||
}
|
||||
|
||||
// WaitForSSHTunnels waits for establishing SSH tunnel to busybox pod.
|
||||
func WaitForSSHTunnels(namespace string) {
|
||||
Logf("Waiting for SSH tunnels to establish")
|
||||
RunKubectl(namespace, "run", "ssh-tunnel-test",
|
||||
"--image=busybox",
|
||||
"--restart=Never",
|
||||
"--command", "--",
|
||||
"echo", "Hello")
|
||||
defer RunKubectl(namespace, "delete", "pod", "ssh-tunnel-test")
|
||||
|
||||
// allow up to a minute for new ssh tunnels to establish
|
||||
wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
|
||||
_, err := RunKubectl(namespace, "logs", "ssh-tunnel-test")
|
||||
return err == nil, nil
|
||||
})
|
||||
}
|
||||
|
||||
// NodeKiller is a utility to simulate node failures.
|
||||
type NodeKiller struct {
|
||||
config NodeKillerConfig
|
||||
client clientset.Interface
|
||||
provider string
|
||||
}
|
||||
|
||||
// NewNodeKiller creates new NodeKiller.
|
||||
func NewNodeKiller(config NodeKillerConfig, client clientset.Interface, provider string) *NodeKiller {
|
||||
config.NodeKillerStopCh = make(chan struct{})
|
||||
return &NodeKiller{config, client, provider}
|
||||
}
|
||||
|
||||
// Run starts NodeKiller until stopCh is closed.
|
||||
func (k *NodeKiller) Run(stopCh <-chan struct{}) {
|
||||
// wait.JitterUntil starts work immediately, so wait first.
|
||||
time.Sleep(wait.Jitter(k.config.Interval, k.config.JitterFactor))
|
||||
wait.JitterUntil(func() {
|
||||
nodes := k.pickNodes()
|
||||
k.kill(nodes)
|
||||
}, k.config.Interval, k.config.JitterFactor, true, stopCh)
|
||||
}
|
||||
|
||||
func (k *NodeKiller) pickNodes() []v1.Node {
|
||||
nodes, err := e2enode.GetReadySchedulableNodes(k.client)
|
||||
ExpectNoError(err)
|
||||
numNodes := int(k.config.FailureRatio * float64(len(nodes.Items)))
|
||||
|
||||
nodes, err = e2enode.GetBoundedReadySchedulableNodes(k.client, numNodes)
|
||||
ExpectNoError(err)
|
||||
return nodes.Items
|
||||
}
|
||||
|
||||
func (k *NodeKiller) kill(nodes []v1.Node) {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(nodes))
|
||||
for _, node := range nodes {
|
||||
node := node
|
||||
go func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)
|
||||
err := e2essh.IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node)
|
||||
if err != nil {
|
||||
Logf("ERROR while stopping node %q: %v", node.Name, err)
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(k.config.SimulatedDowntime)
|
||||
|
||||
Logf("Rebooting %q to repair the node", node.Name)
|
||||
err = e2essh.IssueSSHCommand("sudo reboot", k.provider, &node)
|
||||
if err != nil {
|
||||
Logf("ERROR while rebooting node %q: %v", node.Name, err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
13
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
@ -28,6 +28,11 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
VolumeMountPathTemplate = "/mnt/volume%d"
|
||||
VolumeMountPath1 = "/mnt/volume1"
|
||||
)
|
||||
|
||||
// Config is a struct containing all arguments for creating a pod.
|
||||
// SELinux testing requires to pass HostIPC and HostPID as boolean arguments.
|
||||
type Config struct {
|
||||
@ -222,10 +227,11 @@ func setVolumes(podSpec *v1.PodSpec, pvcs []*v1.PersistentVolumeClaim, inlineVol
|
||||
volumeIndex := 0
|
||||
for _, pvclaim := range pvcs {
|
||||
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
|
||||
volumeMountPath := fmt.Sprintf(VolumeMountPathTemplate, volumeIndex+1)
|
||||
if pvclaim.Spec.VolumeMode != nil && *pvclaim.Spec.VolumeMode == v1.PersistentVolumeBlock {
|
||||
volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename})
|
||||
volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: volumeMountPath})
|
||||
} else {
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: volumeMountPath})
|
||||
}
|
||||
volumes[volumeIndex] = v1.Volume{
|
||||
Name: volumename,
|
||||
@ -240,8 +246,9 @@ func setVolumes(podSpec *v1.PodSpec, pvcs []*v1.PersistentVolumeClaim, inlineVol
|
||||
}
|
||||
for _, src := range inlineVolumeSources {
|
||||
volumename := fmt.Sprintf("volume%v", volumeIndex+1)
|
||||
volumeMountPath := fmt.Sprintf(VolumeMountPathTemplate, volumeIndex+1)
|
||||
// In-line volumes can be only filesystem, not block.
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: volumeMountPath})
|
||||
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: *src}
|
||||
volumeIndex++
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e/framework/pod/delete.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/framework/pod/delete.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -59,7 +59,7 @@ func DeletePodWithWait(c clientset.Interface, pod *v1.Pod) error {
|
||||
// DeletePodWithWaitByName deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod
|
||||
// not existing.
|
||||
func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string) error {
|
||||
e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
|
||||
framework.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
|
||||
err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
@ -67,7 +67,7 @@ func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string
|
||||
}
|
||||
return fmt.Errorf("pod Delete API error: %v", err)
|
||||
}
|
||||
e2elog.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName)
|
||||
framework.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName)
|
||||
err = WaitForPodNotFoundInNamespace(c, podName, podNamespace, PodDeleteTimeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pod %q was not deleted: %v", podName, err)
|
||||
@ -92,7 +92,7 @@ func DeletePodsWithGracePeriod(c clientset.Interface, pods []v1.Pod, grace int64
|
||||
|
||||
// DeletePodWithGracePeriodByName deletes a pod by name and namespace. Resilient to the pod not existing.
|
||||
func DeletePodWithGracePeriodByName(c clientset.Interface, podName, podNamespace string, grace int64) error {
|
||||
e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
|
||||
framework.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
|
||||
err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(grace))
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package pod
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
@ -49,16 +50,16 @@ type ExecOptions struct {
|
||||
// ExecWithOptions executes a command in the specified container,
|
||||
// returning stdout, stderr and error. `options` allowed for
|
||||
// additional parameters to be passed.
|
||||
func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) {
|
||||
func ExecWithOptions(f *framework.Framework, options ExecOptions) (string, string, error) {
|
||||
if !options.Quiet {
|
||||
Logf("ExecWithOptions %+v", options)
|
||||
framework.Logf("ExecWithOptions %+v", options)
|
||||
}
|
||||
config, err := LoadConfig()
|
||||
ExpectNoError(err, "failed to load restclient config")
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err, "failed to load restclient config")
|
||||
|
||||
const tty = false
|
||||
|
||||
Logf("ExecWithOptions: Clientset creation")
|
||||
framework.Logf("ExecWithOptions: Clientset creation")
|
||||
req := f.ClientSet.CoreV1().RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(options.PodName).
|
||||
@ -75,7 +76,7 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
|
||||
}, scheme.ParameterCodec)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
Logf("ExecWithOptions: execute(POST %s)", req.URL())
|
||||
framework.Logf("ExecWithOptions: execute(POST %s)", req.URL())
|
||||
err = execute("POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty)
|
||||
if options.PreserveWhitespace {
|
||||
return stdout.String(), stderr.String(), err
|
||||
@ -85,8 +86,8 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
|
||||
|
||||
// ExecCommandInContainerWithFullOutput executes a command in the
|
||||
// specified container and return stdout, stderr and error
|
||||
func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName string, cmd ...string) (string, string, error) {
|
||||
return f.ExecWithOptions(ExecOptions{
|
||||
func ExecCommandInContainerWithFullOutput(f *framework.Framework, podName, containerName string, cmd ...string) (string, string, error) {
|
||||
return ExecWithOptions(f, ExecOptions{
|
||||
Command: cmd,
|
||||
Namespace: f.Namespace.Name,
|
||||
PodName: podName,
|
||||
@ -99,42 +100,42 @@ func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName
|
||||
}
|
||||
|
||||
// ExecCommandInContainer executes a command in the specified container.
|
||||
func (f *Framework) ExecCommandInContainer(podName, containerName string, cmd ...string) string {
|
||||
stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
|
||||
Logf("Exec stderr: %q", stderr)
|
||||
ExpectNoError(err,
|
||||
func ExecCommandInContainer(f *framework.Framework, podName, containerName string, cmd ...string) string {
|
||||
stdout, stderr, err := ExecCommandInContainerWithFullOutput(f, podName, containerName, cmd...)
|
||||
framework.Logf("Exec stderr: %q", stderr)
|
||||
framework.ExpectNoError(err,
|
||||
"failed to execute command in pod %v, container %v: %v",
|
||||
podName, containerName, err)
|
||||
return stdout
|
||||
}
|
||||
|
||||
// ExecShellInContainer executes the specified command on the pod's container.
|
||||
func (f *Framework) ExecShellInContainer(podName, containerName string, cmd string) string {
|
||||
return f.ExecCommandInContainer(podName, containerName, "/bin/sh", "-c", cmd)
|
||||
func ExecShellInContainer(f *framework.Framework, podName, containerName string, cmd string) string {
|
||||
return ExecCommandInContainer(f, podName, containerName, "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func (f *Framework) execCommandInPod(podName string, cmd ...string) string {
|
||||
pod, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
ExpectNoError(err, "failed to get pod %v", podName)
|
||||
func execCommandInPod(f *framework.Framework, podName string, cmd ...string) string {
|
||||
pod, err := NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to get pod %v", podName)
|
||||
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
|
||||
return f.ExecCommandInContainer(podName, pod.Spec.Containers[0].Name, cmd...)
|
||||
return ExecCommandInContainer(f, podName, pod.Spec.Containers[0].Name, cmd...)
|
||||
}
|
||||
|
||||
func (f *Framework) execCommandInPodWithFullOutput(podName string, cmd ...string) (string, string, error) {
|
||||
pod, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
ExpectNoError(err, "failed to get pod %v", podName)
|
||||
func execCommandInPodWithFullOutput(f *framework.Framework, podName string, cmd ...string) (string, string, error) {
|
||||
pod, err := NewPodClient(f).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to get pod %v", podName)
|
||||
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
|
||||
return f.ExecCommandInContainerWithFullOutput(podName, pod.Spec.Containers[0].Name, cmd...)
|
||||
return ExecCommandInContainerWithFullOutput(f, podName, pod.Spec.Containers[0].Name, cmd...)
|
||||
}
|
||||
|
||||
// ExecShellInPod executes the specified command on the pod.
|
||||
func (f *Framework) ExecShellInPod(podName string, cmd string) string {
|
||||
return f.execCommandInPod(podName, "/bin/sh", "-c", cmd)
|
||||
func ExecShellInPod(f *framework.Framework, podName string, cmd string) string {
|
||||
return execCommandInPod(f, podName, "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
// ExecShellInPodWithFullOutput executes the specified command on the Pod and returns stdout, stderr and error.
|
||||
func (f *Framework) ExecShellInPodWithFullOutput(podName string, cmd string) (string, string, error) {
|
||||
return f.execCommandInPodWithFullOutput(podName, "/bin/sh", "-c", cmd)
|
||||
func ExecShellInPodWithFullOutput(f *framework.Framework, podName string, cmd string) (string, string, error) {
|
||||
return execCommandInPodWithFullOutput(f, podName, "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error {
|
||||
@ -142,7 +143,7 @@ func execute(method string, url *url.URL, config *restclient.Config, stdin io.Re
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return exec.Stream(remotecommand.StreamOptions{
|
||||
return exec.StreamWithContext(context.Background(), remotecommand.StreamOptions{
|
||||
Stdin: stdin,
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
238
vendor/k8s.io/kubernetes/test/e2e/framework/pod/output/output.go
generated
vendored
Normal file
238
vendor/k8s.io/kubernetes/test/e2e/framework/pod/output/output.go
generated
vendored
Normal file
@ -0,0 +1,238 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package output
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
gomegatypes "github.com/onsi/gomega/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
apiv1pod "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
// DEPRECATED constants. Use the timeouts in framework.Framework instead.
|
||||
const (
|
||||
// Poll is how often to Poll pods, nodes and claims.
|
||||
Poll = 2 * time.Second
|
||||
)
|
||||
|
||||
// LookForStringInPodExec looks for the given string in the output of a command
|
||||
// executed in the first container of specified pod.
|
||||
func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return LookForStringInPodExecToContainer(ns, podName, "", command, expectedString, timeout)
|
||||
}
|
||||
|
||||
// LookForStringInPodExecToContainer looks for the given string in the output of a
|
||||
// command executed in specified pod container, or first container if not specified.
|
||||
func LookForStringInPodExecToContainer(ns, podName, containerName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return lookForString(expectedString, timeout, func() string {
|
||||
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns)}
|
||||
if len(containerName) > 0 {
|
||||
args = append(args, fmt.Sprintf("--container=%s", containerName))
|
||||
}
|
||||
args = append(args, "--")
|
||||
args = append(args, command...)
|
||||
return e2ekubectl.RunKubectlOrDie(ns, args...)
|
||||
})
|
||||
}
|
||||
|
||||
// lookForString looks for the given string in the output of fn, repeatedly calling fn until
|
||||
// the timeout is reached or the string is found. Returns last log and possibly
|
||||
// error if the string was not found.
|
||||
func lookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
|
||||
for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) {
|
||||
result = fn()
|
||||
if strings.Contains(result, expectedString) {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result)
|
||||
return
|
||||
}
|
||||
|
||||
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
|
||||
// inside of a shell.
|
||||
func RunHostCmd(ns, name, cmd string) (string, error) {
|
||||
return e2ekubectl.RunKubectl(ns, "exec", name, "--", "/bin/sh", "-x", "-c", cmd)
|
||||
}
|
||||
|
||||
// RunHostCmdWithFullOutput runs the given cmd in the context of the given pod using `kubectl exec`
|
||||
// inside of a shell. It will also return the command's stderr.
|
||||
func RunHostCmdWithFullOutput(ns, name, cmd string) (string, string, error) {
|
||||
return e2ekubectl.RunKubectlWithFullOutput(ns, "exec", name, "--", "/bin/sh", "-x", "-c", cmd)
|
||||
}
|
||||
|
||||
// RunHostCmdOrDie calls RunHostCmd and dies on error.
|
||||
func RunHostCmdOrDie(ns, name, cmd string) string {
|
||||
stdout, err := RunHostCmd(ns, name, cmd)
|
||||
framework.Logf("stdout: %v", stdout)
|
||||
framework.ExpectNoError(err)
|
||||
return stdout
|
||||
}
|
||||
|
||||
// RunHostCmdWithRetries calls RunHostCmd and retries all errors
|
||||
// until it succeeds or the specified timeout expires.
|
||||
// This can be used with idempotent commands to deflake transient Node issues.
|
||||
func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration) (string, error) {
|
||||
start := time.Now()
|
||||
for {
|
||||
out, err := RunHostCmd(ns, name, cmd)
|
||||
if err == nil {
|
||||
return out, nil
|
||||
}
|
||||
if elapsed := time.Since(start); elapsed > timeout {
|
||||
return out, fmt.Errorf("RunHostCmd still failed after %v: %v", elapsed, err)
|
||||
}
|
||||
framework.Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err)
|
||||
time.Sleep(interval)
|
||||
}
|
||||
}
|
||||
|
||||
// LookForStringInLog looks for the given string in the log of a specific pod container
|
||||
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return lookForString(expectedString, timeout, func() string {
|
||||
return e2ekubectl.RunKubectlOrDie(ns, "logs", podName, container)
|
||||
})
|
||||
}
|
||||
|
||||
// CreateEmptyFileOnPod creates empty file at given path on the pod.
|
||||
func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error {
|
||||
_, err := e2ekubectl.RunKubectl(namespace, "exec", podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
|
||||
return err
|
||||
}
|
||||
|
||||
// DumpDebugInfo dumps debug info of tests.
|
||||
func DumpDebugInfo(c clientset.Interface, ns string) {
|
||||
sl, _ := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
for _, s := range sl.Items {
|
||||
desc, _ := e2ekubectl.RunKubectl(ns, "describe", "po", s.Name)
|
||||
framework.Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc)
|
||||
|
||||
l, _ := e2ekubectl.RunKubectl(ns, "logs", s.Name, "--tail=100")
|
||||
framework.Logf("\nLast 100 log lines of %v:\n%v", s.Name, l)
|
||||
}
|
||||
}
|
||||
|
||||
// MatchContainerOutput creates a pod and waits for all it's containers to exit with success.
|
||||
// It then tests that the matcher with each expectedOutput matches the output of the specified container.
|
||||
func MatchContainerOutput(
|
||||
f *framework.Framework,
|
||||
pod *v1.Pod,
|
||||
containerName string,
|
||||
expectedOutput []string,
|
||||
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
|
||||
ns := pod.ObjectMeta.Namespace
|
||||
if ns == "" {
|
||||
ns = f.Namespace.Name
|
||||
}
|
||||
podClient := e2epod.PodClientNS(f, ns)
|
||||
|
||||
createdPod := podClient.Create(pod)
|
||||
defer func() {
|
||||
ginkgo.By("delete the pod")
|
||||
podClient.DeleteSync(createdPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
||||
}()
|
||||
|
||||
// Wait for client pod to complete.
|
||||
podErr := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod.Name, ns, f.Timeouts.PodStart)
|
||||
|
||||
// Grab its logs. Get host first.
|
||||
podStatus, err := podClient.Get(context.TODO(), createdPod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get pod status: %v", err)
|
||||
}
|
||||
|
||||
if podErr != nil {
|
||||
// Pod failed. Dump all logs from all containers to see what's wrong
|
||||
_ = apiv1pod.VisitContainers(&podStatus.Spec, apiv1pod.AllFeatureEnabledContainers(), func(c *v1.Container, containerType apiv1pod.ContainerType) bool {
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, c.Name)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get logs from node %q pod %q container %q: %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, c.Name, err)
|
||||
} else {
|
||||
framework.Logf("Output of node %q pod %q container %q: %s", podStatus.Spec.NodeName, podStatus.Name, c.Name, logs)
|
||||
}
|
||||
return true
|
||||
})
|
||||
return fmt.Errorf("expected pod %q success: %v", createdPod.Name, podErr)
|
||||
}
|
||||
|
||||
framework.Logf("Trying to get logs from node %s pod %s container %s: %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
|
||||
|
||||
// Sometimes the actual containers take a second to get started, try to get logs for 60s
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get logs from node %q pod %q container %q. %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
|
||||
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
|
||||
}
|
||||
|
||||
for _, expected := range expectedOutput {
|
||||
m := matcher(expected)
|
||||
matches, err := m.Match(logs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected %q in container output: %v", expected, err)
|
||||
} else if !matches {
|
||||
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestContainerOutput runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using a substring matcher.
|
||||
func TestContainerOutput(f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
|
||||
TestContainerOutputMatcher(f, scenarioName, pod, containerIndex, expectedOutput, gomega.ContainSubstring)
|
||||
}
|
||||
|
||||
// TestContainerOutputRegexp runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using a regexp matcher.
|
||||
func TestContainerOutputRegexp(f *framework.Framework, scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
|
||||
TestContainerOutputMatcher(f, scenarioName, pod, containerIndex, expectedOutput, gomega.MatchRegexp)
|
||||
}
|
||||
|
||||
// TestContainerOutputMatcher runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using the given matcher.
|
||||
func TestContainerOutputMatcher(f *framework.Framework,
|
||||
scenarioName string,
|
||||
pod *v1.Pod,
|
||||
containerIndex int,
|
||||
expectedOutput []string,
|
||||
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
|
||||
ginkgo.By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
|
||||
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
|
||||
framework.Failf("Invalid container index: %d", containerIndex)
|
||||
}
|
||||
framework.ExpectNoError(MatchContainerOutput(f, pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
|
||||
}
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -39,9 +39,9 @@ import (
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -66,10 +66,10 @@ const (
|
||||
// node e2e test.
|
||||
var ImagePrePullList sets.String
|
||||
|
||||
// PodClient is a convenience method for getting a pod client interface in the framework's namespace,
|
||||
// NewPodClient is a convenience method for getting a pod client interface in the framework's namespace,
|
||||
// possibly applying test-suite specific transformations to the pod spec, e.g. for
|
||||
// node e2e pod scheduling.
|
||||
func (f *Framework) PodClient() *PodClient {
|
||||
func NewPodClient(f *framework.Framework) *PodClient {
|
||||
return &PodClient{
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(f.Namespace.Name),
|
||||
@ -79,7 +79,7 @@ func (f *Framework) PodClient() *PodClient {
|
||||
// PodClientNS is a convenience method for getting a pod client interface in an alternative namespace,
|
||||
// possibly applying test-suite specific transformations to the pod spec, e.g. for
|
||||
// node e2e pod scheduling.
|
||||
func (f *Framework) PodClientNS(namespace string) *PodClient {
|
||||
func PodClientNS(f *framework.Framework, namespace string) *PodClient {
|
||||
return &PodClient{
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(namespace),
|
||||
@ -88,7 +88,7 @@ func (f *Framework) PodClientNS(namespace string) *PodClient {
|
||||
|
||||
// PodClient is a struct for pod client.
|
||||
type PodClient struct {
|
||||
f *Framework
|
||||
f *framework.Framework
|
||||
v1core.PodInterface
|
||||
}
|
||||
|
||||
@ -96,7 +96,7 @@ type PodClient struct {
|
||||
func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
|
||||
c.mungeSpec(pod)
|
||||
p, err := c.PodInterface.Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
ExpectNoError(err, "Error creating Pod")
|
||||
framework.ExpectNoError(err, "Error creating Pod")
|
||||
return p
|
||||
}
|
||||
|
||||
@ -104,10 +104,10 @@ func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
|
||||
func (c *PodClient) CreateSync(pod *v1.Pod) *v1.Pod {
|
||||
namespace := c.f.Namespace.Name
|
||||
p := c.Create(pod)
|
||||
ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(c.f.ClientSet, p.Name, namespace, PodStartTimeout))
|
||||
framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(c.f.ClientSet, p.Name, namespace, framework.PodStartTimeout))
|
||||
// Get the newest pod after it becomes running and ready, some status may change after pod created, such as pod ip.
|
||||
p, err := c.Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
return p
|
||||
}
|
||||
|
||||
@ -131,7 +131,7 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
|
||||
// there is any other apierrors. name is the pod name, updateFn is the function updating the
|
||||
// pod object.
|
||||
func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
|
||||
ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
||||
pod, err := c.PodInterface.Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get pod %q: %v", name, err)
|
||||
@ -139,11 +139,11 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
|
||||
updateFn(pod)
|
||||
_, err = c.PodInterface.Update(context.TODO(), pod, metav1.UpdateOptions{})
|
||||
if err == nil {
|
||||
Logf("Successfully updated pod %q", name)
|
||||
framework.Logf("Successfully updated pod %q", name)
|
||||
return true, nil
|
||||
}
|
||||
if apierrors.IsConflict(err) {
|
||||
Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
|
||||
framework.Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to update pod %q: %v", name, err)
|
||||
@ -155,22 +155,22 @@ func (c *PodClient) AddEphemeralContainerSync(pod *v1.Pod, ec *v1.EphemeralConta
|
||||
namespace := c.f.Namespace.Name
|
||||
|
||||
podJS, err := json.Marshal(pod)
|
||||
ExpectNoError(err, "error creating JSON for pod %q", format.Pod(pod))
|
||||
framework.ExpectNoError(err, "error creating JSON for pod %q", format.Pod(pod))
|
||||
|
||||
ecPod := pod.DeepCopy()
|
||||
ecPod.Spec.EphemeralContainers = append(ecPod.Spec.EphemeralContainers, *ec)
|
||||
ecJS, err := json.Marshal(ecPod)
|
||||
ExpectNoError(err, "error creating JSON for pod with ephemeral container %q", format.Pod(pod))
|
||||
framework.ExpectNoError(err, "error creating JSON for pod with ephemeral container %q", format.Pod(pod))
|
||||
|
||||
patch, err := strategicpatch.CreateTwoWayMergePatch(podJS, ecJS, pod)
|
||||
ExpectNoError(err, "error creating patch to add ephemeral container %q", format.Pod(pod))
|
||||
framework.ExpectNoError(err, "error creating patch to add ephemeral container %q", format.Pod(pod))
|
||||
|
||||
// Clients may optimistically attempt to add an ephemeral container to determine whether the EphemeralContainers feature is enabled.
|
||||
if _, err := c.Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "ephemeralcontainers"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ExpectNoError(e2epod.WaitForContainerRunning(c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
|
||||
framework.ExpectNoError(WaitForContainerRunning(c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -180,27 +180,27 @@ func (c *PodClient) DeleteSync(name string, options metav1.DeleteOptions, timeou
|
||||
namespace := c.f.Namespace.Name
|
||||
err := c.Delete(context.TODO(), name, options)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
Failf("Failed to delete pod %q: %v", name, err)
|
||||
framework.Failf("Failed to delete pod %q: %v", name, err)
|
||||
}
|
||||
gomega.Expect(e2epod.WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
|
||||
gomega.Expect(WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
|
||||
2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name)
|
||||
}
|
||||
|
||||
// mungeSpec apply test-suite specific transformations to the pod spec.
|
||||
func (c *PodClient) mungeSpec(pod *v1.Pod) {
|
||||
if !TestContext.NodeE2E {
|
||||
if !framework.TestContext.NodeE2E {
|
||||
return
|
||||
}
|
||||
|
||||
gomega.Expect(pod.Spec.NodeName).To(gomega.Or(gomega.BeZero(), gomega.Equal(TestContext.NodeName)), "Test misconfigured")
|
||||
pod.Spec.NodeName = TestContext.NodeName
|
||||
gomega.Expect(pod.Spec.NodeName).To(gomega.Or(gomega.BeZero(), gomega.Equal(framework.TestContext.NodeName)), "Test misconfigured")
|
||||
pod.Spec.NodeName = framework.TestContext.NodeName
|
||||
// Node e2e does not support the default DNSClusterFirst policy. Set
|
||||
// the policy to DNSDefault, which is configured per node.
|
||||
pod.Spec.DNSPolicy = v1.DNSDefault
|
||||
|
||||
// PrepullImages only works for node e2e now. For cluster e2e, image prepull is not enforced,
|
||||
// we should not munge ImagePullPolicy for cluster e2e pods.
|
||||
if !TestContext.PrepullImages {
|
||||
if !framework.TestContext.PrepullImages {
|
||||
return
|
||||
}
|
||||
// If prepull is enabled, munge the container spec to make sure the images are not pulled
|
||||
@ -226,7 +226,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
|
||||
// TODO(random-liu): Move pod wait function into this file
|
||||
func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
@ -243,7 +243,7 @@ func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
|
||||
// WaitForFinish waits for pod to finish running, regardless of success or failure.
|
||||
func (c *PodClient) WaitForFinish(name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
@ -260,7 +260,7 @@ func (c *PodClient) WaitForFinish(name string, timeout time.Duration) {
|
||||
// WaitForErrorEventOrSuccess waits for pod to succeed or an error event for that pod.
|
||||
func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
|
||||
var ev *v1.Event
|
||||
err := wait.Poll(Poll, PodStartTimeout, func() (bool, error) {
|
||||
err := wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
|
||||
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error in listing events: %s", err)
|
||||
@ -284,7 +284,7 @@ func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
|
||||
// MatchContainerOutput gets output of a container and match expected regexp in the output.
|
||||
func (c *PodClient) MatchContainerOutput(name string, containerName string, expectedRegexp string) error {
|
||||
f := c.f
|
||||
output, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName)
|
||||
output, err := GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get output for container %q of pod %q", containerName, name)
|
||||
}
|
||||
@ -301,6 +301,14 @@ func (c *PodClient) MatchContainerOutput(name string, containerName string, expe
|
||||
// PodIsReady returns true if the specified pod is ready. Otherwise false.
|
||||
func (c *PodClient) PodIsReady(name string) bool {
|
||||
pod, err := c.Get(context.TODO(), name, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
return podutils.IsPodReady(pod)
|
||||
}
|
||||
|
||||
// RemovePodFinalizer removes the pod's finalizer
|
||||
func (c *PodClient) RemoveFinalizer(podName string, finalizerName string) {
|
||||
framework.Logf("Removing pod's %q finalizer: %q", podName, finalizerName)
|
||||
c.Update(podName, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.Finalizers = slice.RemoveString(pod.ObjectMeta.Finalizers, finalizerName, nil)
|
||||
})
|
||||
}
|
77
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
77
vendor/k8s.io/kubernetes/test/e2e/framework/pod/resource.go
generated
vendored
@ -18,6 +18,7 @@ package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -34,14 +35,18 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// errPodCompleted is returned by PodRunning or PodContainerRunning to indicate that
|
||||
// the pod has already reached completed state.
|
||||
var errPodCompleted = fmt.Errorf("pod ran to completion")
|
||||
var errPodCompleted = FinalError(errors.New("pod ran to completion successfully"))
|
||||
|
||||
// errPodFailed is returned by PodRunning or PodContainerRunning to indicate that
|
||||
// the pod has already reached a permanent failue state.
|
||||
var errPodFailed = FinalError(errors.New("pod failed permanently"))
|
||||
|
||||
// LabelLogOnPodFailure can be used to mark which Pods will have their logs logged in the case of
|
||||
// a test failure. By default, if there are no Pods with this label, only the first 5 Pods will
|
||||
@ -59,7 +64,7 @@ func expectNoError(err error, explain ...interface{}) {
|
||||
// (for example, for call chain f -> g -> expectNoErrorWithOffset(1, ...) error would be logged for "f").
|
||||
func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
|
||||
if err != nil {
|
||||
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||
framework.Logf("Unexpected error occurred: %v", err)
|
||||
}
|
||||
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
|
||||
}
|
||||
@ -117,10 +122,10 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
|
||||
if ctx.Err() != nil {
|
||||
// We may encounter errors here because of a race between the pod readiness and apiserver
|
||||
// proxy. So, we log the error and retry if this occurs.
|
||||
e2elog.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
|
||||
framework.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
|
||||
framework.Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
|
||||
continue
|
||||
}
|
||||
// The response checker expects the pod's name unless !respondName, in
|
||||
@ -131,20 +136,20 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
|
||||
what = "expected"
|
||||
want := pod.Name
|
||||
if got != want {
|
||||
e2elog.Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
|
||||
framework.Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
|
||||
r.controllerName, i+1, pod.Name, want, got)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
what = "non-empty"
|
||||
if len(got) == 0 {
|
||||
e2elog.Logf("Controller %s: Replica %d [%s] expected non-empty response",
|
||||
framework.Logf("Controller %s: Replica %d [%s] expected non-empty response",
|
||||
r.controllerName, i+1, pod.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
successes++
|
||||
e2elog.Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
|
||||
framework.Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
|
||||
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
|
||||
}
|
||||
if successes < len(r.pods.Items) {
|
||||
@ -178,7 +183,7 @@ func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32,
|
||||
}
|
||||
created = append(created, pod)
|
||||
}
|
||||
e2elog.Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
|
||||
framework.Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
|
||||
|
||||
if int32(len(created)) == replicas {
|
||||
pods.Items = created
|
||||
@ -262,17 +267,17 @@ func LogPodStates(pods []v1.Pod) {
|
||||
maxGraceW++
|
||||
|
||||
// Log pod info. * does space padding, - makes them left-aligned.
|
||||
e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
|
||||
framework.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
|
||||
maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS")
|
||||
for _, pod := range pods {
|
||||
grace := ""
|
||||
if pod.DeletionGracePeriodSeconds != nil {
|
||||
grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds)
|
||||
}
|
||||
e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
|
||||
framework.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
|
||||
maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions)
|
||||
}
|
||||
e2elog.Logf("") // Final empty line helps for readability.
|
||||
framework.Logf("") // Final empty line helps for readability.
|
||||
}
|
||||
|
||||
// logPodTerminationMessages logs termination messages for failing pods. It's a short snippet (much smaller than full logs), but it often shows
|
||||
@ -281,12 +286,12 @@ func logPodTerminationMessages(pods []v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
for _, status := range pod.Status.InitContainerStatuses {
|
||||
if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 {
|
||||
e2elog.Logf("%s[%s].initContainer[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
|
||||
framework.Logf("%s[%s].initContainer[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
|
||||
}
|
||||
}
|
||||
for _, status := range pod.Status.ContainerStatuses {
|
||||
if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 {
|
||||
e2elog.Logf("%s[%s].container[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
|
||||
framework.Logf("%s[%s].container[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -325,21 +330,21 @@ func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDi
|
||||
for _, container := range pod.Spec.Containers {
|
||||
logs, err := getPodLogsInternal(c, namespace, pod.Name, container.Name, false, nil, &tailLen)
|
||||
if err != nil {
|
||||
e2elog.Logf("Unable to fetch %s/%s/%s logs: %v", pod.Namespace, pod.Name, container.Name, err)
|
||||
framework.Logf("Unable to fetch %s/%s/%s logs: %v", pod.Namespace, pod.Name, container.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logDir := filepath.Join(reportDir, namespace, pod.Name, container.Name)
|
||||
err = os.MkdirAll(logDir, 0755)
|
||||
if err != nil {
|
||||
e2elog.Logf("Unable to create path '%s'. Err: %v", logDir, err)
|
||||
framework.Logf("Unable to create path '%s'. Err: %v", logDir, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logPath := filepath.Join(logDir, "logs.txt")
|
||||
err = os.WriteFile(logPath, []byte(logs), 0644)
|
||||
if err != nil {
|
||||
e2elog.Logf("Could not write the container logs in: %s. Err: %v", logPath, err)
|
||||
framework.Logf("Could not write the container logs in: %s. Err: %v", logPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -349,7 +354,7 @@ func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDi
|
||||
func DumpAllPodInfoForNamespace(c clientset.Interface, namespace, reportDir string) {
|
||||
pods, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("unable to fetch pod debug info: %v", err)
|
||||
framework.Logf("unable to fetch pod debug info: %v", err)
|
||||
}
|
||||
LogPodStates(pods.Items)
|
||||
logPodTerminationMessages(pods.Items)
|
||||
@ -403,6 +408,23 @@ func NewAgnhostPod(ns, podName string, volumes []v1.Volume, mounts []v1.VolumeMo
|
||||
return pod
|
||||
}
|
||||
|
||||
func NewAgnhostPodFromContainers(ns, podName string, volumes []v1.Volume, containers ...v1.Container) *v1.Pod {
|
||||
immediate := int64(0)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: containers[:],
|
||||
Volumes: volumes,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
TerminationGracePeriodSeconds: &immediate,
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// NewAgnhostContainer returns the container Spec of an agnhost container.
|
||||
func NewAgnhostContainer(containerName string, mounts []v1.VolumeMount, ports []v1.ContainerPort, args ...string) v1.Container {
|
||||
if len(args) == 0 {
|
||||
@ -438,7 +460,7 @@ func newExecPodSpec(ns, generateName string) *v1.Pod {
|
||||
// CreateExecPodOrFail creates a agnhost pause pod used as a vessel for kubectl exec commands.
|
||||
// Pod name is uniquely generated.
|
||||
func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) *v1.Pod {
|
||||
e2elog.Logf("Creating new exec pod")
|
||||
framework.Logf("Creating new exec pod")
|
||||
pod := newExecPodSpec(ns, generateName)
|
||||
if tweak != nil {
|
||||
tweak(pod)
|
||||
@ -490,7 +512,7 @@ func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames
|
||||
// in namespace ns are in the condition, using c and waiting at most timeout.
|
||||
func checkPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
|
||||
np := len(podNames)
|
||||
e2elog.Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
|
||||
framework.Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
|
||||
type waitPodResult struct {
|
||||
success bool
|
||||
podName string
|
||||
@ -508,11 +530,11 @@ func checkPodsCondition(c clientset.Interface, ns string, podNames []string, tim
|
||||
for range podNames {
|
||||
res := <-result
|
||||
if !res.success {
|
||||
e2elog.Logf("Pod %[1]s failed to be %[2]s.", res.podName, desc)
|
||||
framework.Logf("Pod %[1]s failed to be %[2]s.", res.podName, desc)
|
||||
success = false
|
||||
}
|
||||
}
|
||||
e2elog.Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
|
||||
framework.Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
|
||||
return success
|
||||
}
|
||||
|
||||
@ -595,12 +617,21 @@ func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration {
|
||||
// secret(configmap) that's based on cluster size + additional time as a fudge factor.
|
||||
secretTTL, err := getNodeTTLAnnotationValue(c)
|
||||
if err != nil {
|
||||
e2elog.Logf("Couldn't get node TTL annotation (using default value of 0): %v", err)
|
||||
framework.Logf("Couldn't get node TTL annotation (using default value of 0): %v", err)
|
||||
}
|
||||
podLogTimeout := 240*time.Second + secretTTL
|
||||
return podLogTimeout
|
||||
}
|
||||
|
||||
// VerifyPodHasConditionWithType verifies the pod has the expected condition by type
|
||||
func VerifyPodHasConditionWithType(f *framework.Framework, pod *v1.Pod, cType v1.PodConditionType) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Failed to get the recent pod object for name: %q", pod.Name)
|
||||
if condition := FindPodConditionByType(&pod.Status, cType); condition == nil {
|
||||
framework.Failf("pod %q should have the condition: %q, pod status: %v", pod.Name, cType, pod.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func getNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) {
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil || len(nodes.Items) == 0 {
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
@ -231,3 +231,13 @@ func mixinRestrictedContainerSecurityContext(container *v1.Container) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FindPodConditionByType loops through all pod conditions in pod status and returns the specified condition.
|
||||
func FindPodConditionByType(podStatus *v1.PodStatus, conditionType v1.PodConditionType) *v1.PodCondition {
|
||||
for _, cond := range podStatus.Conditions {
|
||||
if cond.Type == conditionType {
|
||||
return &cond
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
189
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
189
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
@ -33,7 +34,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
@ -81,6 +82,39 @@ func TimeoutError(msg string, observedObjects ...interface{}) *timeoutError {
|
||||
}
|
||||
}
|
||||
|
||||
// FinalError constructs an error that indicates to a poll function that
|
||||
// polling can be stopped immediately because some permanent error has been
|
||||
// encountered that is not going to go away.
|
||||
//
|
||||
// TODO (@pohly): move this into framework once the refactoring from
|
||||
// https://github.com/kubernetes/kubernetes/pull/112043 allows it. Right now it
|
||||
// leads to circular dependencies.
|
||||
func FinalError(err error) error {
|
||||
return &FinalErr{Err: err}
|
||||
}
|
||||
|
||||
type FinalErr struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (err *FinalErr) Error() string {
|
||||
if err.Err != nil {
|
||||
return fmt.Sprintf("final error: %s", err.Err.Error())
|
||||
}
|
||||
return "final error, exact problem unknown"
|
||||
}
|
||||
|
||||
func (err *FinalErr) Unwrap() error {
|
||||
return err.Err
|
||||
}
|
||||
|
||||
// IsFinal checks whether the error was marked as final by wrapping some error
|
||||
// with FinalError.
|
||||
func IsFinal(err error) bool {
|
||||
var finalErr *FinalErr
|
||||
return errors.As(err, &finalErr)
|
||||
}
|
||||
|
||||
// maybeTimeoutError returns a TimeoutError if err is a timeout. Otherwise, wrap err.
|
||||
// taskFormat and taskArgs should be the task being performed when the error occurred,
|
||||
// e.g. "waiting for pod to be running".
|
||||
@ -153,7 +187,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
|
||||
ignoreSelector := labels.SelectorFromSet(map[string]string{})
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
||||
framework.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
||||
timeout, minPods, ns)
|
||||
var ignoreNotReady bool
|
||||
badPods := []v1.Pod{}
|
||||
@ -208,25 +242,25 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
case res && err == nil:
|
||||
nOk++
|
||||
case pod.Status.Phase == v1.PodSucceeded:
|
||||
e2elog.Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name)
|
||||
framework.Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name)
|
||||
// it doesn't make sense to wait for this pod
|
||||
continue
|
||||
case pod.Status.Phase != v1.PodFailed:
|
||||
e2elog.Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
|
||||
framework.Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
|
||||
notReady++
|
||||
badPods = append(badPods, pod)
|
||||
default:
|
||||
if metav1.GetControllerOf(&pod) == nil {
|
||||
e2elog.Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
|
||||
framework.Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
|
||||
badPods = append(badPods, pod)
|
||||
}
|
||||
//ignore failed pods that are controlled by some controller
|
||||
// ignore failed pods that are controlled by some controller
|
||||
}
|
||||
}
|
||||
|
||||
e2elog.Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
|
||||
framework.Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
|
||||
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
|
||||
e2elog.Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
|
||||
framework.Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
|
||||
|
||||
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
|
||||
return true, nil
|
||||
@ -238,14 +272,16 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
if !ignoreNotReady {
|
||||
return errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout, lastAPIError)
|
||||
}
|
||||
e2elog.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
|
||||
framework.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForPodCondition waits a pods to be matched to the given condition.
|
||||
// If the condition callback returns an error that matches FinalErr (checked with IsFinal),
|
||||
// then polling aborts early.
|
||||
func WaitForPodCondition(c clientset.Interface, ns, podName, conditionDesc string, timeout time.Duration, condition podCondition) error {
|
||||
e2elog.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, conditionDesc)
|
||||
framework.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, conditionDesc)
|
||||
var (
|
||||
lastPodError error
|
||||
lastPod *v1.Pod
|
||||
@ -260,16 +296,18 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, conditionDesc strin
|
||||
lastPod = pod // Don't overwrite if an error occurs after successfully retrieving.
|
||||
|
||||
// log now so that current pod info is reported before calling `condition()`
|
||||
e2elog.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
|
||||
framework.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
|
||||
podName, pod.Status.Phase, pod.Status.Reason, podutils.IsPodReady(pod), time.Since(start))
|
||||
if done, err := condition(pod); done {
|
||||
if err == nil {
|
||||
e2elog.Logf("Pod %q satisfied condition %q", podName, conditionDesc)
|
||||
framework.Logf("Pod %q satisfied condition %q", podName, conditionDesc)
|
||||
}
|
||||
return true, err
|
||||
} else if err != nil {
|
||||
// TODO(#109732): stop polling and return the error in this case.
|
||||
e2elog.Logf("Error evaluating pod condition %s: %v", conditionDesc, err)
|
||||
framework.Logf("Error evaluating pod condition %s: %v", conditionDesc, err)
|
||||
if IsFinal(err) {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
@ -289,10 +327,10 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, conditionDesc strin
|
||||
return maybeTimeoutError(err, "waiting for pod %s to be %s", podIdentifier(ns, podName), conditionDesc)
|
||||
}
|
||||
|
||||
// WaitForPodsCondition waits for the listed pods to match the given condition.
|
||||
// WaitForAllPodsCondition waits for the listed pods to match the given condition.
|
||||
// To succeed, at least minPods must be listed, and all listed pods must match the condition.
|
||||
func WaitForAllPodsCondition(c clientset.Interface, ns string, opts metav1.ListOptions, minPods int, conditionDesc string, timeout time.Duration, condition podCondition) (*v1.PodList, error) {
|
||||
e2elog.Logf("Waiting up to %v for at least %d pods in namespace %s to be %s", timeout, minPods, ns, conditionDesc)
|
||||
framework.Logf("Waiting up to %v for at least %d pods in namespace %s to be %s", timeout, minPods, ns, conditionDesc)
|
||||
var pods *v1.PodList
|
||||
matched := 0
|
||||
err := wait.PollImmediate(poll, timeout, func() (done bool, err error) {
|
||||
@ -301,7 +339,7 @@ func WaitForAllPodsCondition(c clientset.Interface, ns string, opts metav1.ListO
|
||||
return handleWaitingAPIError(err, true, "listing pods")
|
||||
}
|
||||
if len(pods.Items) < minPods {
|
||||
e2elog.Logf("found %d pods, waiting for at least %d", len(pods.Items), minPods)
|
||||
framework.Logf("found %d pods, waiting for at least %d", len(pods.Items), minPods)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -319,12 +357,84 @@ func WaitForAllPodsCondition(c clientset.Interface, ns string, opts metav1.ListO
|
||||
if len(nonMatchingPods) <= 0 {
|
||||
return true, nil // All pods match.
|
||||
}
|
||||
e2elog.Logf("%d pods are not %s: %v", len(nonMatchingPods), conditionDesc, nonMatchingPods)
|
||||
framework.Logf("%d pods are not %s: %v", len(nonMatchingPods), conditionDesc, nonMatchingPods)
|
||||
return false, nil
|
||||
})
|
||||
return pods, maybeTimeoutError(err, "waiting for at least %d pods to be %s (matched %d)", minPods, conditionDesc, matched)
|
||||
}
|
||||
|
||||
// WaitForPodsRunning waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` are running.
|
||||
func WaitForPodsRunning(c clientset.Interface, ns string, num int, timeout time.Duration) error {
|
||||
matched := 0
|
||||
err := wait.PollImmediate(poll, timeout, func() (done bool, err error) {
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, true, "listing pods")
|
||||
}
|
||||
matched = 0
|
||||
for _, pod := range pods.Items {
|
||||
if ready, _ := testutils.PodRunningReady(&pod); ready {
|
||||
matched++
|
||||
}
|
||||
}
|
||||
if matched == num {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("expect %d pods are running, but got %v", num, matched)
|
||||
return false, nil
|
||||
})
|
||||
return maybeTimeoutError(err, "waiting for pods to be running (want %v, matched %d)", num, matched)
|
||||
}
|
||||
|
||||
// WaitForPodsSchedulingGated waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` stay in scheduling gated state.
|
||||
func WaitForPodsSchedulingGated(c clientset.Interface, ns string, num int, timeout time.Duration) error {
|
||||
matched := 0
|
||||
err := wait.PollImmediate(poll, timeout, func() (done bool, err error) {
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, true, "listing pods")
|
||||
}
|
||||
matched = 0
|
||||
for _, pod := range pods.Items {
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Type == v1.PodScheduled && condition.Reason == v1.PodReasonSchedulingGated {
|
||||
matched++
|
||||
}
|
||||
}
|
||||
}
|
||||
if matched == num {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("expect %d pods in scheduling gated state, but got %v", num, matched)
|
||||
return false, nil
|
||||
})
|
||||
return maybeTimeoutError(err, "waiting for pods to be scheduling gated (want %d, matched %d)", num, matched)
|
||||
}
|
||||
|
||||
// WaitForPodsWithSchedulingGates waits for a given `timeout` to evaluate if a certain amount of pods in given `ns`
|
||||
// match the given `schedulingGates`stay in scheduling gated state.
|
||||
func WaitForPodsWithSchedulingGates(c clientset.Interface, ns string, num int, timeout time.Duration, schedulingGates []v1.PodSchedulingGate) error {
|
||||
matched := 0
|
||||
err := wait.PollImmediate(poll, timeout, func() (done bool, err error) {
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, true, "listing pods")
|
||||
}
|
||||
matched = 0
|
||||
for _, pod := range pods.Items {
|
||||
if reflect.DeepEqual(pod.Spec.SchedulingGates, schedulingGates) {
|
||||
matched++
|
||||
}
|
||||
}
|
||||
if matched == num {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("expect %d pods carry the expected scheduling gates, but got %v", num, matched)
|
||||
return false, nil
|
||||
})
|
||||
return maybeTimeoutError(err, "waiting for pods to carry the expected scheduling gates (want %d, matched %d)", num, matched)
|
||||
}
|
||||
|
||||
// WaitForPodTerminatedInNamespace returns an error if it takes too long for the pod to terminate,
|
||||
// if the pod Get api returns an error (IsNotFound or other), or if the pod failed (and thus did not
|
||||
// terminate) with an unexpected reason. Typically called to test that the passed-in pod is fully
|
||||
@ -344,6 +454,16 @@ func WaitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, nam
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodTerminatingInNamespaceTimeout returns if the pod is terminating, or an error if it is not after the timeout.
|
||||
func WaitForPodTerminatingInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return WaitForPodCondition(c, namespace, podName, "is terminating", timeout, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
|
||||
func WaitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return WaitForPodCondition(c, namespace, podName, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, func(pod *v1.Pod) (bool, error) {
|
||||
@ -403,7 +523,9 @@ func WaitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespa
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodRunning:
|
||||
return true, nil
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
case v1.PodFailed:
|
||||
return false, errPodFailed
|
||||
case v1.PodSucceeded:
|
||||
return false, errPodCompleted
|
||||
}
|
||||
return false, nil
|
||||
@ -441,14 +563,17 @@ func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namesp
|
||||
func WaitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return WaitForPodCondition(c, namespace, podName, "running and ready", timeout, func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
e2elog.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status)
|
||||
case v1.PodFailed:
|
||||
framework.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status)
|
||||
return false, errPodFailed
|
||||
case v1.PodSucceeded:
|
||||
framework.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status)
|
||||
return false, errPodCompleted
|
||||
case v1.PodRunning:
|
||||
e2elog.Logf("The phase of Pod %s is %s (Ready = %v)", pod.Name, pod.Status.Phase, podutils.IsPodReady(pod))
|
||||
framework.Logf("The phase of Pod %s is %s (Ready = %v)", pod.Name, pod.Status.Phase, podutils.IsPodReady(pod))
|
||||
return podutils.IsPodReady(pod), nil
|
||||
}
|
||||
e2elog.Logf("The phase of Pod %s is %s, waiting for it to be Running (with Ready = true)", pod.Name, pod.Status.Phase)
|
||||
framework.Logf("The phase of Pod %s is %s, waiting for it to be Running (with Ready = true)", pod.Name, pod.Status.Phase)
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
@ -509,7 +634,7 @@ func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, ti
|
||||
func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
|
||||
var lastPod *v1.Pod
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
e2elog.Logf("Waiting for pod %s to disappear", podName)
|
||||
framework.Logf("Waiting for pod %s to disappear", podName)
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
@ -518,14 +643,14 @@ func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labe
|
||||
found := false
|
||||
for i, pod := range pods.Items {
|
||||
if pod.Name == podName {
|
||||
e2elog.Logf("Pod %s still exists", podName)
|
||||
framework.Logf("Pod %s still exists", podName)
|
||||
found = true
|
||||
lastPod = &(pods.Items[i])
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
e2elog.Logf("Pod %s no longer exists", podName)
|
||||
framework.Logf("Pod %s no longer exists", podName)
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
@ -589,7 +714,7 @@ func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selecto
|
||||
// Return the list of matching pods.
|
||||
func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) {
|
||||
opts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
return WaitForAllPodsCondition(c, ns, opts, 1, "running and ready", podListTimeout, testutils.PodRunningReady)
|
||||
return WaitForAllPodsCondition(c, ns, opts, 1, "running and ready", timeout, testutils.PodRunningReady)
|
||||
}
|
||||
|
||||
// WaitForNRestartablePods tries to list restarting pods using ps until it finds expect of them,
|
||||
@ -602,7 +727,7 @@ func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Du
|
||||
pods = FilterNonRestartablePods(allPods)
|
||||
if len(pods) != expect {
|
||||
errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods))
|
||||
e2elog.Logf("Error getting pods: %v", errLast)
|
||||
framework.Logf("Error getting pods: %v", errLast)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -688,21 +813,21 @@ func WaitForContainerRunning(c clientset.Interface, namespace, podName, containe
|
||||
|
||||
// handleWaitingAPIErrror handles an error from an API request in the context of a Wait function.
|
||||
// If the error is retryable, sleep the recommended delay and ignore the error.
|
||||
// If the erorr is terminal, return it.
|
||||
// If the error is terminal, return it.
|
||||
func handleWaitingAPIError(err error, retryNotFound bool, taskFormat string, taskArgs ...interface{}) (bool, error) {
|
||||
taskDescription := fmt.Sprintf(taskFormat, taskArgs...)
|
||||
if retryNotFound && apierrors.IsNotFound(err) {
|
||||
e2elog.Logf("Ignoring NotFound error while " + taskDescription)
|
||||
framework.Logf("Ignoring NotFound error while " + taskDescription)
|
||||
return false, nil
|
||||
}
|
||||
if retry, delay := shouldRetry(err); retry {
|
||||
e2elog.Logf("Retryable error while %s, retrying after %v: %v", taskDescription, delay, err)
|
||||
framework.Logf("Retryable error while %s, retrying after %v: %v", taskDescription, delay, err)
|
||||
if delay > 0 {
|
||||
time.Sleep(delay)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf("Encountered non-retryable error while %s: %v", taskDescription, err)
|
||||
framework.Logf("Encountered non-retryable error while %s: %v", taskDescription, err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
|
87
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
87
vendor/k8s.io/kubernetes/test/e2e/framework/skipper/skipper.go
generated
vendored
@ -17,14 +17,8 @@ limitations under the License.
|
||||
package skipper
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
@ -44,87 +38,14 @@ import (
|
||||
|
||||
func skipInternalf(caller int, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
// Long term this should get replaced with https://github.com/onsi/ginkgo/issues/1069.
|
||||
framework.Logf(msg)
|
||||
skip(msg, caller+1)
|
||||
}
|
||||
|
||||
// SkipPanic is the value that will be panicked from Skip.
|
||||
type SkipPanic struct {
|
||||
Message string // The failure message passed to Fail
|
||||
Filename string // The filename that is the source of the failure
|
||||
Line int // The line number of the filename that is the source of the failure
|
||||
FullStackTrace string // A full stack trace starting at the source of the failure
|
||||
}
|
||||
|
||||
const ginkgoSkipPanic = `
|
||||
Your test was skipped.
|
||||
Ginkgo panics to prevent subsequent assertions from running.
|
||||
Normally Ginkgo rescues this panic so you shouldn't see it.
|
||||
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
|
||||
To circumvent this, you should call
|
||||
defer GinkgoRecover()
|
||||
at the top of the goroutine that caused this panic.
|
||||
`
|
||||
|
||||
// String makes SkipPanic look like the old Ginkgo panic when printed.
|
||||
func (SkipPanic) String() string { return ginkgoSkipPanic }
|
||||
|
||||
// Skip wraps ginkgo.Skip so that it panics with more useful
|
||||
// information about why the test is being skipped. This function will
|
||||
// panic with a SkipPanic.
|
||||
func skip(message string, callerSkip ...int) {
|
||||
skip := 1
|
||||
if len(callerSkip) > 0 {
|
||||
skip += callerSkip[0]
|
||||
}
|
||||
|
||||
_, file, line, _ := runtime.Caller(skip)
|
||||
sp := SkipPanic{
|
||||
Message: message,
|
||||
Filename: file,
|
||||
Line: line,
|
||||
FullStackTrace: pruneStack(skip),
|
||||
}
|
||||
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
panic(sp)
|
||||
}
|
||||
}()
|
||||
|
||||
ginkgo.Skip(message, skip)
|
||||
}
|
||||
|
||||
// ginkgo adds a lot of test running infrastructure to the stack, so
|
||||
// we filter those out
|
||||
var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo/v2`)
|
||||
|
||||
func pruneStack(skip int) string {
|
||||
skip += 2 // one for pruneStack and one for debug.Stack
|
||||
stack := debug.Stack()
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(stack))
|
||||
var prunedStack []string
|
||||
|
||||
// skip the top of the stack
|
||||
for i := 0; i < 2*skip+1; i++ {
|
||||
scanner.Scan()
|
||||
}
|
||||
|
||||
for scanner.Scan() {
|
||||
if stackSkipPattern.Match(scanner.Bytes()) {
|
||||
scanner.Scan() // these come in pairs
|
||||
} else {
|
||||
prunedStack = append(prunedStack, scanner.Text())
|
||||
scanner.Scan() // these come in pairs
|
||||
prunedStack = append(prunedStack, scanner.Text())
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(prunedStack, "\n")
|
||||
ginkgo.Skip(msg, caller+1)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Skipf skips with information about why the test is being skipped.
|
||||
// The direct caller is recorded in the callstack.
|
||||
func Skipf(format string, args ...interface{}) {
|
||||
skipInternalf(1, format, args...)
|
||||
panic("unreachable")
|
||||
|
24
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
24
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/ssh.go
generated
vendored
@ -35,7 +35,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -125,7 +125,7 @@ func NodeSSHHosts(c clientset.Interface) ([]string, error) {
|
||||
hosts := nodeAddresses(nodelist, v1.NodeExternalIP)
|
||||
// If ExternalIPs aren't available for all nodes, try falling back to the InternalIPs.
|
||||
if len(hosts) < len(nodelist.Items) {
|
||||
e2elog.Logf("No external IP address on nodes, falling back to internal IPs")
|
||||
framework.Logf("No external IP address on nodes, falling back to internal IPs")
|
||||
hosts = nodeAddresses(nodelist, v1.NodeInternalIP)
|
||||
}
|
||||
|
||||
@ -146,12 +146,12 @@ func NodeSSHHosts(c clientset.Interface) ([]string, error) {
|
||||
go func(host string) {
|
||||
defer wg.Done()
|
||||
if canConnect(host) {
|
||||
e2elog.Logf("Assuming SSH on host %s", host)
|
||||
framework.Logf("Assuming SSH on host %s", host)
|
||||
sshHostsLock.Lock()
|
||||
sshHosts = append(sshHosts, net.JoinHostPort(host, SSHPort))
|
||||
sshHostsLock.Unlock()
|
||||
} else {
|
||||
e2elog.Logf("Skipping host %s because it does not run anything on port %s", host, SSHPort)
|
||||
framework.Logf("Skipping host %s because it does not run anything on port %s", host, SSHPort)
|
||||
}
|
||||
}(host)
|
||||
}
|
||||
@ -168,7 +168,7 @@ func canConnect(host string) bool {
|
||||
hostPort := net.JoinHostPort(host, SSHPort)
|
||||
conn, err := net.DialTimeout("tcp", hostPort, 3*time.Second)
|
||||
if err != nil {
|
||||
e2elog.Logf("cannot dial %s: %v", hostPort, err)
|
||||
framework.Logf("cannot dial %s: %v", hostPort, err)
|
||||
return false
|
||||
}
|
||||
conn.Close()
|
||||
@ -352,15 +352,15 @@ func runSSHCommandViaBastion(cmd, user, bastion, host string, signer ssh.Signer)
|
||||
// LogResult records result log
|
||||
func LogResult(result Result) {
|
||||
remote := fmt.Sprintf("%s@%s", result.User, result.Host)
|
||||
e2elog.Logf("ssh %s: command: %s", remote, result.Cmd)
|
||||
e2elog.Logf("ssh %s: stdout: %q", remote, result.Stdout)
|
||||
e2elog.Logf("ssh %s: stderr: %q", remote, result.Stderr)
|
||||
e2elog.Logf("ssh %s: exit code: %d", remote, result.Code)
|
||||
framework.Logf("ssh %s: command: %s", remote, result.Cmd)
|
||||
framework.Logf("ssh %s: stdout: %q", remote, result.Stdout)
|
||||
framework.Logf("ssh %s: stderr: %q", remote, result.Stderr)
|
||||
framework.Logf("ssh %s: exit code: %d", remote, result.Code)
|
||||
}
|
||||
|
||||
// IssueSSHCommandWithResult tries to execute a SSH command and returns the execution result
|
||||
func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, error) {
|
||||
e2elog.Logf("Getting external IP address for %s", node.Name)
|
||||
framework.Logf("Getting external IP address for %s", node.Name)
|
||||
host := ""
|
||||
for _, a := range node.Status.Addresses {
|
||||
if a.Type == v1.NodeExternalIP && a.Address != "" {
|
||||
@ -383,7 +383,7 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, er
|
||||
return nil, fmt.Errorf("couldn't find any IP address for node %s", node.Name)
|
||||
}
|
||||
|
||||
e2elog.Logf("SSH %q on %s(%s)", cmd, node.Name, host)
|
||||
framework.Logf("SSH %q on %s(%s)", cmd, node.Name, host)
|
||||
result, err := SSH(cmd, host, provider)
|
||||
LogResult(result)
|
||||
|
||||
@ -454,7 +454,7 @@ func expectNoError(err error, explain ...interface{}) {
|
||||
// (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f").
|
||||
func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
|
||||
if err != nil {
|
||||
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||
framework.Logf("Unexpected error occurred: %v", err)
|
||||
}
|
||||
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
|
||||
}
|
||||
|
72
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
72
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -32,14 +32,15 @@ import (
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/ginkgo/v2/reporters"
|
||||
"github.com/onsi/ginkgo/v2/types"
|
||||
gomegaformat "github.com/onsi/gomega/format"
|
||||
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/test/utils/kubeconfig"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -197,6 +198,9 @@ type TestContextType struct {
|
||||
|
||||
// NodeKillerConfig describes configuration of NodeKiller -- a utility to
|
||||
// simulate node failures.
|
||||
//
|
||||
// TODO: move this and the corresponding command line flags into
|
||||
// test/e2e/framework/node.
|
||||
type NodeKillerConfig struct {
|
||||
// Enabled determines whether NodeKill should do anything at all.
|
||||
// All other options below are ignored if Enabled = false.
|
||||
@ -256,7 +260,7 @@ type CloudConfig struct {
|
||||
ClusterIPRange string
|
||||
ClusterTag string
|
||||
Network string
|
||||
ConfigFile string // for azure and openstack
|
||||
ConfigFile string // for azure
|
||||
NodeTag string
|
||||
MasterTag string
|
||||
|
||||
@ -304,6 +308,9 @@ func (tc TestContextType) ClusterIsIPv6() bool {
|
||||
// options themselves, copy flags from test/e2e/framework/config
|
||||
// as shown in HandleFlags.
|
||||
func RegisterCommonFlags(flags *flag.FlagSet) {
|
||||
// The default is too low for objects like pods, even when using YAML. We double the default.
|
||||
flags.IntVar(&gomegaformat.MaxLength, "gomega-max-length", 8000, "Sets the maximum size for the gomega formatter (= gomega.MaxLength). Use 0 to disable truncation.")
|
||||
|
||||
flags.StringVar(&TestContext.GatherKubeSystemResourceUsageData, "gather-resource-usage", "false", "If set to 'true' or 'all' framework will be monitoring resource usage of system all add-ons in (some) e2e tests, if set to 'master' framework will be monitoring master node only, if set to 'none' of 'false' monitoring will be turned off.")
|
||||
flags.BoolVar(&TestContext.GatherLogsSizes, "gather-logs-sizes", false, "If set to true framework will be monitoring logs sizes on all machines running e2e tests.")
|
||||
flags.IntVar(&TestContext.MaxNodesToGather, "max-nodes-to-gather-from", 20, "The maximum number of nodes to gather extended info from on test failure.")
|
||||
@ -424,44 +431,6 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
|
||||
flags.DurationVar(&nodeKiller.SimulatedDowntime, "node-killer-simulated-downtime", 10*time.Minute, "A delay between node death and recreation")
|
||||
}
|
||||
|
||||
func createKubeConfig(clientCfg *restclient.Config) *clientcmdapi.Config {
|
||||
clusterNick := "cluster"
|
||||
userNick := "user"
|
||||
contextNick := "context"
|
||||
|
||||
configCmd := clientcmdapi.NewConfig()
|
||||
|
||||
credentials := clientcmdapi.NewAuthInfo()
|
||||
credentials.Token = clientCfg.BearerToken
|
||||
credentials.TokenFile = clientCfg.BearerTokenFile
|
||||
credentials.ClientCertificate = clientCfg.TLSClientConfig.CertFile
|
||||
if len(credentials.ClientCertificate) == 0 {
|
||||
credentials.ClientCertificateData = clientCfg.TLSClientConfig.CertData
|
||||
}
|
||||
credentials.ClientKey = clientCfg.TLSClientConfig.KeyFile
|
||||
if len(credentials.ClientKey) == 0 {
|
||||
credentials.ClientKeyData = clientCfg.TLSClientConfig.KeyData
|
||||
}
|
||||
configCmd.AuthInfos[userNick] = credentials
|
||||
|
||||
cluster := clientcmdapi.NewCluster()
|
||||
cluster.Server = clientCfg.Host
|
||||
cluster.CertificateAuthority = clientCfg.CAFile
|
||||
if len(cluster.CertificateAuthority) == 0 {
|
||||
cluster.CertificateAuthorityData = clientCfg.CAData
|
||||
}
|
||||
cluster.InsecureSkipTLSVerify = clientCfg.Insecure
|
||||
configCmd.Clusters[clusterNick] = cluster
|
||||
|
||||
context := clientcmdapi.NewContext()
|
||||
context.Cluster = clusterNick
|
||||
context.AuthInfo = userNick
|
||||
configCmd.Contexts[contextNick] = context
|
||||
configCmd.CurrentContext = contextNick
|
||||
|
||||
return configCmd
|
||||
}
|
||||
|
||||
// GenerateSecureToken returns a string of length tokenLen, consisting
|
||||
// of random bytes encoded as base64 for use as a Bearer Token during
|
||||
// communication with an APIServer
|
||||
@ -491,6 +460,7 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
fs.Set("logtostderr", "false")
|
||||
fs.Set("alsologtostderr", "false")
|
||||
fs.Set("one_output", "true")
|
||||
fs.Set("stderrthreshold", "10" /* higher than any of the severities -> none pass the threshold */)
|
||||
klog.SetOutput(ginkgo.GinkgoWriter)
|
||||
|
||||
// Only set a default host if one won't be supplied via kubeconfig
|
||||
@ -498,7 +468,7 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
// Check if we can use the in-cluster config
|
||||
if clusterConfig, err := restclient.InClusterConfig(); err == nil {
|
||||
if tempFile, err := os.CreateTemp(os.TempDir(), "kubeconfig-"); err == nil {
|
||||
kubeConfig := createKubeConfig(clusterConfig)
|
||||
kubeConfig := kubeconfig.CreateKubeConfig(clusterConfig)
|
||||
clientcmd.WriteToFile(*kubeConfig, tempFile.Name())
|
||||
t.KubeConfig = tempFile.Name()
|
||||
klog.V(4).Infof("Using a temporary kubeconfig file from in-cluster config : %s", tempFile.Name())
|
||||
@ -560,6 +530,14 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// This is the traditional gomega.Format default of 4000 for an object
|
||||
// dump plus some extra room for the message.
|
||||
maxFailureMessageSize = 5000
|
||||
|
||||
truncatedMsg = "\n[... see output for full dump ...]\n"
|
||||
)
|
||||
|
||||
// writeJUnitReport generates a JUnit file in the e2e report directory that is
|
||||
// shorter than the one normally written by `ginkgo --junit-report`. This is
|
||||
// needed because the full report can become too large for tools like Spyglass
|
||||
@ -576,6 +554,18 @@ func writeJUnitReport(report ginkgo.Report) {
|
||||
if specReport.State != types.SpecStateFailed {
|
||||
specReport.CapturedGinkgoWriterOutput = ""
|
||||
specReport.CapturedStdOutErr = ""
|
||||
} else {
|
||||
// Truncate the failure message if it is too large.
|
||||
msgLen := len(specReport.Failure.Message)
|
||||
if msgLen > maxFailureMessageSize {
|
||||
// Insert full message at the beginning where it is easy to find.
|
||||
specReport.CapturedGinkgoWriterOutput =
|
||||
"Full failure message:\n" +
|
||||
specReport.Failure.Message + "\n\n" +
|
||||
strings.Repeat("=", 70) + "\n\n" +
|
||||
specReport.CapturedGinkgoWriterOutput
|
||||
specReport.Failure.Message = specReport.Failure.Message[0:maxFailureMessageSize/2] + truncatedMsg + specReport.Failure.Message[msgLen-maxFailureMessageSize/2:msgLen]
|
||||
}
|
||||
}
|
||||
|
||||
// Remove report entries generated by ginkgo.By("doing
|
||||
|
692
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
692
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
@ -23,27 +23,22 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
gomegatypes "github.com/onsi/gomega/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -57,28 +52,13 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
uexec "k8s.io/utils/exec"
|
||||
netutils "k8s.io/utils/net"
|
||||
|
||||
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
const (
|
||||
// Minimal number of nodes for the cluster to be considered large.
|
||||
largeClusterThreshold = 100
|
||||
|
||||
// TODO(justinsb): Avoid hardcoding this.
|
||||
awsMasterIP = "172.20.0.9"
|
||||
|
||||
// AllContainers specifies that all containers be visited
|
||||
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
|
||||
AllContainers = InitContainers | Containers | EphemeralContainers
|
||||
)
|
||||
|
||||
// DEPRECATED constants. Use the timeouts in framework.Framework instead.
|
||||
@ -328,7 +308,10 @@ func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountN
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return err
|
||||
if err != nil {
|
||||
return fmt.Errorf("wait for service account %q in namespace %q: %w", serviceAccountName, ns, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned
|
||||
@ -540,199 +523,6 @@ func RandomSuffix() string {
|
||||
return strconv.Itoa(rand.Intn(10000))
|
||||
}
|
||||
|
||||
// LookForStringInPodExec looks for the given string in the output of a command
|
||||
// executed in the first container of specified pod.
|
||||
// TODO(alejandrox1): move to pod/ subpkg once kubectl methods are refactored.
|
||||
func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return LookForStringInPodExecToContainer(ns, podName, "", command, expectedString, timeout)
|
||||
}
|
||||
|
||||
// LookForStringInPodExecToContainer looks for the given string in the output of a
|
||||
// command executed in specified pod container, or first container if not specified.
|
||||
func LookForStringInPodExecToContainer(ns, podName, containerName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return lookForString(expectedString, timeout, func() string {
|
||||
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns)}
|
||||
if len(containerName) > 0 {
|
||||
args = append(args, fmt.Sprintf("--container=%s", containerName))
|
||||
}
|
||||
args = append(args, "--")
|
||||
args = append(args, command...)
|
||||
return RunKubectlOrDie(ns, args...)
|
||||
})
|
||||
}
|
||||
|
||||
// lookForString looks for the given string in the output of fn, repeatedly calling fn until
|
||||
// the timeout is reached or the string is found. Returns last log and possibly
|
||||
// error if the string was not found.
|
||||
// TODO(alejandrox1): move to pod/ subpkg once kubectl methods are refactored.
|
||||
func lookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
|
||||
for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) {
|
||||
result = fn()
|
||||
if strings.Contains(result, expectedString) {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result)
|
||||
return
|
||||
}
|
||||
|
||||
// KubectlBuilder is used to build, customize and execute a kubectl Command.
|
||||
// Add more functions to customize the builder as needed.
|
||||
type KubectlBuilder struct {
|
||||
cmd *exec.Cmd
|
||||
timeout <-chan time.Time
|
||||
}
|
||||
|
||||
// NewKubectlCommand returns a KubectlBuilder for running kubectl.
|
||||
func NewKubectlCommand(namespace string, args ...string) *KubectlBuilder {
|
||||
b := new(KubectlBuilder)
|
||||
tk := e2ekubectl.NewTestKubeconfig(TestContext.CertDir, TestContext.Host, TestContext.KubeConfig, TestContext.KubeContext, TestContext.KubectlPath, namespace)
|
||||
b.cmd = tk.KubectlCmd(args...)
|
||||
return b
|
||||
}
|
||||
|
||||
// WithEnv sets the given environment and returns itself.
|
||||
func (b *KubectlBuilder) WithEnv(env []string) *KubectlBuilder {
|
||||
b.cmd.Env = env
|
||||
return b
|
||||
}
|
||||
|
||||
// WithTimeout sets the given timeout and returns itself.
|
||||
func (b *KubectlBuilder) WithTimeout(t <-chan time.Time) *KubectlBuilder {
|
||||
b.timeout = t
|
||||
return b
|
||||
}
|
||||
|
||||
// WithStdinData sets the given data to stdin and returns itself.
|
||||
func (b KubectlBuilder) WithStdinData(data string) *KubectlBuilder {
|
||||
b.cmd.Stdin = strings.NewReader(data)
|
||||
return &b
|
||||
}
|
||||
|
||||
// WithStdinReader sets the given reader and returns itself.
|
||||
func (b KubectlBuilder) WithStdinReader(reader io.Reader) *KubectlBuilder {
|
||||
b.cmd.Stdin = reader
|
||||
return &b
|
||||
}
|
||||
|
||||
// ExecOrDie runs the kubectl executable or dies if error occurs.
|
||||
func (b KubectlBuilder) ExecOrDie(namespace string) string {
|
||||
str, err := b.Exec()
|
||||
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
|
||||
// Note that we're still dying after retrying so that we can get visibility to triage it further.
|
||||
if isTimeout(err) {
|
||||
Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.")
|
||||
time.Sleep(2 * time.Second)
|
||||
retryStr, retryErr := RunKubectl(namespace, "version")
|
||||
Logf("stdout: %q", retryStr)
|
||||
Logf("err: %v", retryErr)
|
||||
}
|
||||
ExpectNoError(err)
|
||||
return str
|
||||
}
|
||||
|
||||
func isTimeout(err error) bool {
|
||||
switch err := err.(type) {
|
||||
case *url.Error:
|
||||
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
|
||||
return true
|
||||
}
|
||||
case net.Error:
|
||||
if err.Timeout() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Exec runs the kubectl executable.
|
||||
func (b KubectlBuilder) Exec() (string, error) {
|
||||
stdout, _, err := b.ExecWithFullOutput()
|
||||
return stdout, err
|
||||
}
|
||||
|
||||
// ExecWithFullOutput runs the kubectl executable, and returns the stdout and stderr.
|
||||
func (b KubectlBuilder) ExecWithFullOutput() (string, string, error) {
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd := b.cmd
|
||||
cmd.Stdout, cmd.Stderr = &stdout, &stderr
|
||||
|
||||
Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately
|
||||
if err := cmd.Start(); err != nil {
|
||||
return "", "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v", cmd, cmd.Stdout, cmd.Stderr, err)
|
||||
}
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
errCh <- cmd.Wait()
|
||||
}()
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
var rc = 127
|
||||
if ee, ok := err.(*exec.ExitError); ok {
|
||||
rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus())
|
||||
Logf("rc: %d", rc)
|
||||
}
|
||||
return stdout.String(), stderr.String(), uexec.CodeExitError{
|
||||
Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v", cmd, cmd.Stdout, cmd.Stderr, err),
|
||||
Code: rc,
|
||||
}
|
||||
}
|
||||
case <-b.timeout:
|
||||
b.cmd.Process.Kill()
|
||||
return "", "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v", cmd, cmd.Stdout, cmd.Stderr)
|
||||
}
|
||||
Logf("stderr: %q", stderr.String())
|
||||
Logf("stdout: %q", stdout.String())
|
||||
return stdout.String(), stderr.String(), nil
|
||||
}
|
||||
|
||||
// RunKubectlOrDie is a convenience wrapper over kubectlBuilder
|
||||
func RunKubectlOrDie(namespace string, args ...string) string {
|
||||
return NewKubectlCommand(namespace, args...).ExecOrDie(namespace)
|
||||
}
|
||||
|
||||
// RunKubectl is a convenience wrapper over kubectlBuilder
|
||||
func RunKubectl(namespace string, args ...string) (string, error) {
|
||||
return NewKubectlCommand(namespace, args...).Exec()
|
||||
}
|
||||
|
||||
// RunKubectlWithFullOutput is a convenience wrapper over kubectlBuilder
|
||||
// It will also return the command's stderr.
|
||||
func RunKubectlWithFullOutput(namespace string, args ...string) (string, string, error) {
|
||||
return NewKubectlCommand(namespace, args...).ExecWithFullOutput()
|
||||
}
|
||||
|
||||
// RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin
|
||||
func RunKubectlOrDieInput(namespace string, data string, args ...string) string {
|
||||
return NewKubectlCommand(namespace, args...).WithStdinData(data).ExecOrDie(namespace)
|
||||
}
|
||||
|
||||
// RunKubectlInput is a convenience wrapper over kubectlBuilder that takes input to stdin
|
||||
func RunKubectlInput(namespace string, data string, args ...string) (string, error) {
|
||||
return NewKubectlCommand(namespace, args...).WithStdinData(data).Exec()
|
||||
}
|
||||
|
||||
// RunKubemciWithKubeconfig is a convenience wrapper over RunKubemciCmd
|
||||
func RunKubemciWithKubeconfig(args ...string) (string, error) {
|
||||
if TestContext.KubeConfig != "" {
|
||||
args = append(args, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig)
|
||||
}
|
||||
return RunKubemciCmd(args...)
|
||||
}
|
||||
|
||||
// RunKubemciCmd is a convenience wrapper over kubectlBuilder to run kubemci.
|
||||
// It assumes that kubemci exists in PATH.
|
||||
func RunKubemciCmd(args ...string) (string, error) {
|
||||
// kubemci is assumed to be in PATH.
|
||||
kubemci := "kubemci"
|
||||
b := new(KubectlBuilder)
|
||||
args = append(args, "--gcp-project="+TestContext.CloudConfig.ProjectID)
|
||||
|
||||
b.cmd = exec.Command(kubemci, args...)
|
||||
return b.Exec()
|
||||
}
|
||||
|
||||
// StartCmdAndStreamOutput returns stdout and stderr after starting the given cmd.
|
||||
func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
|
||||
stdout, err = cmd.StdoutPipe()
|
||||
@ -755,449 +545,6 @@ func TryKill(cmd *exec.Cmd) {
|
||||
}
|
||||
}
|
||||
|
||||
// testContainerOutputMatcher runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using the given matcher.
|
||||
func (f *Framework) testContainerOutputMatcher(scenarioName string,
|
||||
pod *v1.Pod,
|
||||
containerIndex int,
|
||||
expectedOutput []string,
|
||||
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
|
||||
ginkgo.By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
|
||||
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
|
||||
Failf("Invalid container index: %d", containerIndex)
|
||||
}
|
||||
ExpectNoError(f.MatchContainerOutput(pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
|
||||
}
|
||||
|
||||
// ContainerType signifies container type
|
||||
type ContainerType int
|
||||
|
||||
const (
|
||||
// Containers is for normal containers
|
||||
Containers ContainerType = 1 << iota
|
||||
// InitContainers is for init containers
|
||||
InitContainers
|
||||
// EphemeralContainers is for ephemeral containers
|
||||
EphemeralContainers
|
||||
)
|
||||
|
||||
// allFeatureEnabledContainers returns a ContainerType mask which includes all container
|
||||
// types except for the ones guarded by feature gate.
|
||||
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
|
||||
func allFeatureEnabledContainers() ContainerType {
|
||||
return AllContainers
|
||||
}
|
||||
|
||||
// ContainerVisitor is called with each container spec, and returns true
|
||||
// if visiting should continue.
|
||||
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
|
||||
type ContainerVisitor func(container *v1.Container, containerType ContainerType) (shouldContinue bool)
|
||||
|
||||
// visitContainers invokes the visitor function with a pointer to every container
|
||||
// spec in the given pod spec with type set in mask. If visitor returns false,
|
||||
// visiting is short-circuited. visitContainers returns true if visiting completes,
|
||||
// false if visiting was short-circuited.
|
||||
// Copied from pkg/api/v1/pod to avoid pulling extra dependencies
|
||||
func visitContainers(podSpec *v1.PodSpec, mask ContainerType, visitor ContainerVisitor) bool {
|
||||
if mask&InitContainers != 0 {
|
||||
for i := range podSpec.InitContainers {
|
||||
if !visitor(&podSpec.InitContainers[i], InitContainers) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
if mask&Containers != 0 {
|
||||
for i := range podSpec.Containers {
|
||||
if !visitor(&podSpec.Containers[i], Containers) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
if mask&EphemeralContainers != 0 {
|
||||
for i := range podSpec.EphemeralContainers {
|
||||
if !visitor((*v1.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon), EphemeralContainers) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// MatchContainerOutput creates a pod and waits for all it's containers to exit with success.
|
||||
// It then tests that the matcher with each expectedOutput matches the output of the specified container.
|
||||
func (f *Framework) MatchContainerOutput(
|
||||
pod *v1.Pod,
|
||||
containerName string,
|
||||
expectedOutput []string,
|
||||
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
|
||||
ns := pod.ObjectMeta.Namespace
|
||||
if ns == "" {
|
||||
ns = f.Namespace.Name
|
||||
}
|
||||
podClient := f.PodClientNS(ns)
|
||||
|
||||
createdPod := podClient.Create(pod)
|
||||
defer func() {
|
||||
ginkgo.By("delete the pod")
|
||||
podClient.DeleteSync(createdPod.Name, metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
|
||||
}()
|
||||
|
||||
// Wait for client pod to complete.
|
||||
podErr := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod.Name, ns, f.Timeouts.PodStart)
|
||||
|
||||
// Grab its logs. Get host first.
|
||||
podStatus, err := podClient.Get(context.TODO(), createdPod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get pod status: %v", err)
|
||||
}
|
||||
|
||||
if podErr != nil {
|
||||
// Pod failed. Dump all logs from all containers to see what's wrong
|
||||
_ = visitContainers(&podStatus.Spec, allFeatureEnabledContainers(), func(c *v1.Container, containerType ContainerType) bool {
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, c.Name)
|
||||
if err != nil {
|
||||
Logf("Failed to get logs from node %q pod %q container %q: %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, c.Name, err)
|
||||
} else {
|
||||
Logf("Output of node %q pod %q container %q: %s", podStatus.Spec.NodeName, podStatus.Name, c.Name, logs)
|
||||
}
|
||||
return true
|
||||
})
|
||||
return fmt.Errorf("expected pod %q success: %v", createdPod.Name, podErr)
|
||||
}
|
||||
|
||||
Logf("Trying to get logs from node %s pod %s container %s: %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
|
||||
|
||||
// Sometimes the actual containers take a second to get started, try to get logs for 60s
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName)
|
||||
if err != nil {
|
||||
Logf("Failed to get logs from node %q pod %q container %q. %v",
|
||||
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
|
||||
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
|
||||
}
|
||||
|
||||
for _, expected := range expectedOutput {
|
||||
m := matcher(expected)
|
||||
matches, err := m.Match(logs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("expected %q in container output: %v", expected, err)
|
||||
} else if !matches {
|
||||
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EventsLister is a func that lists events.
|
||||
type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error)
|
||||
|
||||
// dumpEventsInNamespace dumps events in the given namespace.
|
||||
func dumpEventsInNamespace(eventsLister EventsLister, namespace string) {
|
||||
ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
|
||||
events, err := eventsLister(metav1.ListOptions{}, namespace)
|
||||
ExpectNoError(err, "failed to list events in namespace %q", namespace)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Found %d events.", len(events.Items)))
|
||||
// Sort events by their first timestamp
|
||||
sortedEvents := events.Items
|
||||
if len(sortedEvents) > 1 {
|
||||
sort.Sort(byFirstTimestamp(sortedEvents))
|
||||
}
|
||||
for _, e := range sortedEvents {
|
||||
Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
|
||||
}
|
||||
// Note that we don't wait for any Cleanup to propagate, which means
|
||||
// that if you delete a bunch of pods right before ending your test,
|
||||
// you may or may not see the killing/deletion/Cleanup events.
|
||||
}
|
||||
|
||||
// DumpAllNamespaceInfo dumps events, pods and nodes information in the given namespace.
|
||||
func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
|
||||
dumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) {
|
||||
return c.CoreV1().Events(ns).List(context.TODO(), opts)
|
||||
}, namespace)
|
||||
|
||||
e2epod.DumpAllPodInfoForNamespace(c, namespace, TestContext.ReportDir)
|
||||
|
||||
// If cluster is large, then the following logs are basically useless, because:
|
||||
// 1. it takes tens of minutes or hours to grab all of them
|
||||
// 2. there are so many of them that working with them are mostly impossible
|
||||
// So we dump them only if the cluster is relatively small.
|
||||
maxNodesForDump := TestContext.MaxNodesToGather
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("unable to fetch node list: %v", err)
|
||||
return
|
||||
}
|
||||
if len(nodes.Items) <= maxNodesForDump {
|
||||
dumpAllNodeInfo(c, nodes)
|
||||
} else {
|
||||
Logf("skipping dumping cluster info - cluster too large")
|
||||
}
|
||||
}
|
||||
|
||||
// byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker.
|
||||
type byFirstTimestamp []v1.Event
|
||||
|
||||
func (o byFirstTimestamp) Len() int { return len(o) }
|
||||
func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
func (o byFirstTimestamp) Less(i, j int) bool {
|
||||
if o[i].FirstTimestamp.Equal(&o[j].FirstTimestamp) {
|
||||
return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name
|
||||
}
|
||||
return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp)
|
||||
}
|
||||
|
||||
func dumpAllNodeInfo(c clientset.Interface, nodes *v1.NodeList) {
|
||||
names := make([]string, len(nodes.Items))
|
||||
for ix := range nodes.Items {
|
||||
names[ix] = nodes.Items[ix].Name
|
||||
}
|
||||
DumpNodeDebugInfo(c, names, Logf)
|
||||
}
|
||||
|
||||
// DumpNodeDebugInfo dumps debug information of the given nodes.
|
||||
func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) {
|
||||
for _, n := range nodeNames {
|
||||
logFunc("\nLogging node info for node %v", n)
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), n, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
logFunc("Error getting node info %v", err)
|
||||
}
|
||||
logFunc("Node Info: %v", node)
|
||||
|
||||
logFunc("\nLogging kubelet events for node %v", n)
|
||||
for _, e := range getNodeEvents(c, n) {
|
||||
logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
|
||||
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
|
||||
}
|
||||
logFunc("\nLogging pods the kubelet thinks is on node %v", n)
|
||||
podList, err := getKubeletPods(c, n)
|
||||
if err != nil {
|
||||
logFunc("Unable to retrieve kubelet pods for node %v: %v", n, err)
|
||||
continue
|
||||
}
|
||||
for _, p := range podList.Items {
|
||||
logFunc("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
|
||||
for _, c := range p.Status.InitContainerStatuses {
|
||||
logFunc("\tInit container %v ready: %v, restart count %v",
|
||||
c.Name, c.Ready, c.RestartCount)
|
||||
}
|
||||
for _, c := range p.Status.ContainerStatuses {
|
||||
logFunc("\tContainer %v ready: %v, restart count %v",
|
||||
c.Name, c.Ready, c.RestartCount)
|
||||
}
|
||||
}
|
||||
e2emetrics.HighLatencyKubeletOperations(c, 10*time.Second, n, logFunc)
|
||||
// TODO: Log node resource info
|
||||
}
|
||||
}
|
||||
|
||||
// getKubeletPods retrieves the list of pods on the kubelet.
|
||||
func getKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
|
||||
var client restclient.Result
|
||||
finished := make(chan struct{}, 1)
|
||||
go func() {
|
||||
// call chain tends to hang in some cases when Node is not ready. Add an artificial timeout for this call. #22165
|
||||
client = c.CoreV1().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", node, KubeletPort)).
|
||||
Suffix("pods").
|
||||
Do(context.TODO())
|
||||
|
||||
finished <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-finished:
|
||||
result := &v1.PodList{}
|
||||
if err := client.Into(result); err != nil {
|
||||
return &v1.PodList{}, err
|
||||
}
|
||||
return result, nil
|
||||
case <-time.After(PodGetTimeout):
|
||||
return &v1.PodList{}, fmt.Errorf("Waiting up to %v for getting the list of pods", PodGetTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
// logNodeEvents logs kubelet events from the given node. This includes kubelet
|
||||
// restart and node unhealthy events. Note that listing events like this will mess
|
||||
// with latency metrics, beware of calling it during a test.
|
||||
func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
|
||||
selector := fields.Set{
|
||||
"involvedObject.kind": "Node",
|
||||
"involvedObject.name": nodeName,
|
||||
"involvedObject.namespace": metav1.NamespaceAll,
|
||||
"source": "kubelet",
|
||||
}.AsSelector().String()
|
||||
options := metav1.ListOptions{FieldSelector: selector}
|
||||
events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(context.TODO(), options)
|
||||
if err != nil {
|
||||
Logf("Unexpected error retrieving node events %v", err)
|
||||
return []v1.Event{}
|
||||
}
|
||||
return events.Items
|
||||
}
|
||||
|
||||
// WaitForAllNodesSchedulable waits up to timeout for all
|
||||
// (but TestContext.AllowedNotReadyNodes) to become schedulable.
|
||||
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
|
||||
if TestContext.AllowedNotReadyNodes == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
|
||||
return wait.PollImmediate(
|
||||
30*time.Second,
|
||||
timeout,
|
||||
e2enode.CheckReadyForTests(c, TestContext.NonblockingTaints, TestContext.AllowedNotReadyNodes, largeClusterThreshold),
|
||||
)
|
||||
}
|
||||
|
||||
// AddOrUpdateLabelOnNode adds the given label key and value to the given node or updates value.
|
||||
func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) {
|
||||
ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
|
||||
}
|
||||
|
||||
// ExpectNodeHasLabel expects that the given node has the given label pair.
|
||||
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
|
||||
ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue)
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
ExpectEqual(node.Labels[labelKey], labelValue)
|
||||
}
|
||||
|
||||
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
|
||||
// won't fail if target label doesn't exist or has been removed.
|
||||
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) {
|
||||
ginkgo.By("removing the label " + labelKey + " off the node " + nodeName)
|
||||
ExpectNoError(testutils.RemoveLabelOffNode(c, nodeName, []string{labelKey}))
|
||||
|
||||
ginkgo.By("verifying the node doesn't have the label " + labelKey)
|
||||
ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey}))
|
||||
}
|
||||
|
||||
// ExpectNodeHasTaint expects that the node has the given taint.
|
||||
func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) {
|
||||
ginkgo.By("verifying the node has the taint " + taint.ToString())
|
||||
if has, err := NodeHasTaint(c, nodeName, taint); !has {
|
||||
ExpectNoError(err)
|
||||
Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// NodeHasTaint returns true if the node has the given taint, else returns false.
|
||||
func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
nodeTaints := node.Spec.Taints
|
||||
|
||||
if len(nodeTaints) == 0 || !taintExists(nodeTaints, taint) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
|
||||
// inside of a shell.
|
||||
func RunHostCmd(ns, name, cmd string) (string, error) {
|
||||
return RunKubectl(ns, "exec", name, "--", "/bin/sh", "-x", "-c", cmd)
|
||||
}
|
||||
|
||||
// RunHostCmdWithFullOutput runs the given cmd in the context of the given pod using `kubectl exec`
|
||||
// inside of a shell. It will also return the command's stderr.
|
||||
func RunHostCmdWithFullOutput(ns, name, cmd string) (string, string, error) {
|
||||
return RunKubectlWithFullOutput(ns, "exec", name, "--", "/bin/sh", "-x", "-c", cmd)
|
||||
}
|
||||
|
||||
// RunHostCmdOrDie calls RunHostCmd and dies on error.
|
||||
func RunHostCmdOrDie(ns, name, cmd string) string {
|
||||
stdout, err := RunHostCmd(ns, name, cmd)
|
||||
Logf("stdout: %v", stdout)
|
||||
ExpectNoError(err)
|
||||
return stdout
|
||||
}
|
||||
|
||||
// RunHostCmdWithRetries calls RunHostCmd and retries all errors
|
||||
// until it succeeds or the specified timeout expires.
|
||||
// This can be used with idempotent commands to deflake transient Node issues.
|
||||
func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration) (string, error) {
|
||||
start := time.Now()
|
||||
for {
|
||||
out, err := RunHostCmd(ns, name, cmd)
|
||||
if err == nil {
|
||||
return out, nil
|
||||
}
|
||||
if elapsed := time.Since(start); elapsed > timeout {
|
||||
return out, fmt.Errorf("RunHostCmd still failed after %v: %v", elapsed, err)
|
||||
}
|
||||
Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err)
|
||||
time.Sleep(interval)
|
||||
}
|
||||
}
|
||||
|
||||
// AllNodesReady checks whether all registered nodes are ready. Setting -1 on
|
||||
// TestContext.AllowedNotReadyNodes will bypass the post test node readiness check.
|
||||
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
|
||||
// and figure out how to do it in a configurable way, as we can't expect all setups to run
|
||||
// default test add-ons.
|
||||
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
|
||||
if TestContext.AllowedNotReadyNodes == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, TestContext.AllowedNotReadyNodes)
|
||||
|
||||
var notReady []*v1.Node
|
||||
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
||||
notReady = nil
|
||||
// It should be OK to list unschedulable Nodes here.
|
||||
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
if !e2enode.IsConditionSetAsExpected(node, v1.NodeReady, true) {
|
||||
notReady = append(notReady, node)
|
||||
}
|
||||
}
|
||||
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
|
||||
// to make it possible e.g. for incorrect deployment of some small percentage
|
||||
// of nodes (which we allow in cluster validation). Some nodes that are not
|
||||
// provisioned correctly at startup will never become ready (e.g. when something
|
||||
// won't install correctly), so we can't expect them to be ready at any point.
|
||||
return len(notReady) <= TestContext.AllowedNotReadyNodes, nil
|
||||
})
|
||||
|
||||
if err != nil && err != wait.ErrWaitTimeout {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(notReady) > TestContext.AllowedNotReadyNodes {
|
||||
msg := ""
|
||||
for _, node := range notReady {
|
||||
msg = fmt.Sprintf("%s, %s", msg, node.Name)
|
||||
}
|
||||
return fmt.Errorf("Not ready nodes: %#v", msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LookForStringInLog looks for the given string in the log of a specific pod container
|
||||
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
|
||||
return lookForString(expectedString, timeout, func() string {
|
||||
return RunKubectlOrDie(ns, "logs", podName, container)
|
||||
})
|
||||
}
|
||||
|
||||
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
|
||||
// are actually cleaned up. Currently only implemented for GCE/GKE.
|
||||
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
|
||||
@ -1320,25 +667,6 @@ func GetControlPlaneAddresses(c clientset.Interface) []string {
|
||||
return ips.List()
|
||||
}
|
||||
|
||||
// CreateEmptyFileOnPod creates empty file at given path on the pod.
|
||||
// TODO(alejandrox1): move to subpkg pod once kubectl methods have been refactored.
|
||||
func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error {
|
||||
_, err := RunKubectl(namespace, "exec", podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
|
||||
return err
|
||||
}
|
||||
|
||||
// DumpDebugInfo dumps debug info of tests.
|
||||
func DumpDebugInfo(c clientset.Interface, ns string) {
|
||||
sl, _ := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
for _, s := range sl.Items {
|
||||
desc, _ := RunKubectl(ns, "describe", "po", s.Name)
|
||||
Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc)
|
||||
|
||||
l, _ := RunKubectl(ns, "logs", s.Name, "--tail=100")
|
||||
Logf("\nLast 100 log lines of %v:\n%v", s.Name, l)
|
||||
}
|
||||
}
|
||||
|
||||
// PrettyPrintJSON converts metrics to JSON format.
|
||||
func PrettyPrintJSON(metrics interface{}) string {
|
||||
output := &bytes.Buffer{}
|
||||
@ -1354,20 +682,10 @@ func PrettyPrintJSON(metrics interface{}) string {
|
||||
return formatted.String()
|
||||
}
|
||||
|
||||
// taintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise.
|
||||
func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool {
|
||||
for _, taint := range taints {
|
||||
if taint.MatchTaint(taintToFind) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// WatchEventSequenceVerifier ...
|
||||
// manages a watch for a given resource, ensures that events take place in a given order, retries the test on failure
|
||||
//
|
||||
// testContext cancelation signal across API boundries, e.g: context.TODO()
|
||||
// testContext cancellation signal across API boundaries, e.g: context.TODO()
|
||||
// dc sets up a client to the API
|
||||
// resourceType specify the type of resource
|
||||
// namespace select a namespace
|
||||
|
84
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
84
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
@ -31,8 +31,8 @@ limitations under the License.
|
||||
* Note that the server containers are for testing purposes only and should not
|
||||
* be used in production.
|
||||
*
|
||||
* 2) With server outside of Kubernetes (Cinder, ...)
|
||||
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
|
||||
* 2) With server outside of Kubernetes
|
||||
* Appropriate server must exist somewhere outside
|
||||
* the tested Kubernetes cluster. The test itself creates a new volume,
|
||||
* and checks, that Kubernetes can use it as a volume.
|
||||
*/
|
||||
@ -56,7 +56,9 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientexec "k8s.io/client-go/util/exec"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
uexec "k8s.io/utils/exec"
|
||||
|
||||
@ -166,65 +168,6 @@ func NewNFSServer(cs clientset.Interface, namespace string, args []string) (conf
|
||||
return config, pod, host
|
||||
}
|
||||
|
||||
// NewGlusterfsServer is a GlusterFS-specific wrapper for CreateStorageServer. Also creates the gluster endpoints object.
|
||||
func NewGlusterfsServer(cs clientset.Interface, namespace string) (config TestConfig, pod *v1.Pod, ip string) {
|
||||
config = TestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "gluster",
|
||||
ServerImage: imageutils.GetE2EImage(imageutils.VolumeGlusterServer),
|
||||
ServerPorts: []int{24007, 24008, 49152},
|
||||
}
|
||||
pod, ip = CreateStorageServer(cs, config)
|
||||
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-server",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: 24007,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := cs.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "failed to create service for Gluster server")
|
||||
|
||||
ginkgo.By("creating Gluster endpoints")
|
||||
endpoints := &v1.Endpoints{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Endpoints",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-server",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{
|
||||
IP: ip,
|
||||
},
|
||||
},
|
||||
Ports: []v1.EndpointPort{
|
||||
{
|
||||
Name: "gluster",
|
||||
Port: 24007,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = cs.CoreV1().Endpoints(namespace).Create(context.TODO(), endpoints, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "failed to create endpoints for Gluster server")
|
||||
|
||||
return config, pod, ip
|
||||
}
|
||||
|
||||
// CreateStorageServer is a wrapper for startVolumeServer(). A storage server config is passed in, and a pod pointer
|
||||
// and ip address string are returned.
|
||||
// Note: Expect() is called so no error is returned.
|
||||
@ -413,7 +356,7 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
|
||||
}
|
||||
}
|
||||
if config.ServerReadyMessage != "" {
|
||||
_, err := framework.LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
|
||||
_, err := e2epodoutput.LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
|
||||
framework.ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err)
|
||||
}
|
||||
return pod
|
||||
@ -534,7 +477,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
|
||||
// Block: check content
|
||||
deviceName := fmt.Sprintf("/opt/%d", i)
|
||||
commands := GenerateReadBlockCmd(deviceName, len(test.ExpectedContent))
|
||||
_, err := framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
|
||||
_, err := e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
|
||||
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
|
||||
|
||||
// Check that it's a real block device
|
||||
@ -543,7 +486,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
|
||||
// Filesystem: check content
|
||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
||||
commands := GenerateReadFileCmd(fileName)
|
||||
_, err := framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
|
||||
_, err := e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
|
||||
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
|
||||
|
||||
// Check that a directory has been mounted
|
||||
@ -554,14 +497,14 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
|
||||
// Filesystem: check fsgroup
|
||||
if fsGroup != nil {
|
||||
ginkgo.By("Checking fsGroup is correct.")
|
||||
_, err = framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
|
||||
_, err = e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
|
||||
framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
|
||||
}
|
||||
|
||||
// Filesystem: check fsType
|
||||
if fsType != "" {
|
||||
ginkgo.By("Checking fsType is correct.")
|
||||
_, err = framework.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
|
||||
_, err = e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
|
||||
framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType)
|
||||
}
|
||||
}
|
||||
@ -608,7 +551,7 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
|
||||
}
|
||||
ec.Resources = v1.ResourceRequirements{}
|
||||
ec.Name = "volume-ephemeral-container"
|
||||
err = f.PodClient().AddEphemeralContainerSync(clientPod, ec, timeouts.PodStart)
|
||||
err = e2epod.NewPodClient(f).AddEphemeralContainerSync(clientPod, ec, timeouts.PodStart)
|
||||
// The API server will return NotFound for the subresource when the feature is disabled
|
||||
framework.ExpectNoError(err, "failed to add ephemeral container for re-test")
|
||||
testVolumeContent(f, clientPod, ec.Name, fsGroup, fsType, tests)
|
||||
@ -646,7 +589,7 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
|
||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
||||
commands = append(commands, generateWriteFileCmd(test.ExpectedContent, fileName)...)
|
||||
}
|
||||
out, err := framework.RunKubectl(injectorPod.Namespace, commands...)
|
||||
out, err := e2ekubectl.RunKubectl(injectorPod.Namespace, commands...)
|
||||
framework.ExpectNoError(err, "failed: writing the contents: %s", out)
|
||||
}
|
||||
|
||||
@ -658,7 +601,7 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
|
||||
// generateWriteCmd is used by generateWriteBlockCmd and generateWriteFileCmd
|
||||
func generateWriteCmd(content, path string) []string {
|
||||
var commands []string
|
||||
commands = []string{"/bin/sh", "-c", "echo '" + content + "' > " + path}
|
||||
commands = []string{"/bin/sh", "-c", "echo '" + content + "' > " + path + "; sync"}
|
||||
return commands
|
||||
}
|
||||
|
||||
@ -707,7 +650,7 @@ func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persi
|
||||
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
|
||||
// is resolved. Otherwise there will be dependency issue.
|
||||
func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
|
||||
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
|
||||
return e2epod.ExecCommandInContainerWithFullOutput(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
|
||||
}
|
||||
|
||||
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed
|
||||
@ -716,7 +659,6 @@ func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string
|
||||
func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) {
|
||||
stdout, stderr, err := PodExec(f, pod, shExec)
|
||||
if err != nil {
|
||||
|
||||
if exiterr, ok := err.(uexec.CodeExitError); ok {
|
||||
exitCode := exiterr.ExitStatus()
|
||||
framework.ExpectNoError(err,
|
||||
|
32
vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go
generated
vendored
32
vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go
generated
vendored
@ -275,6 +275,7 @@ var factories = map[What]ItemFactory{
|
||||
{"ClusterRoleBinding"}: &clusterRoleBindingFactory{},
|
||||
{"CSIDriver"}: &csiDriverFactory{},
|
||||
{"DaemonSet"}: &daemonSetFactory{},
|
||||
{"ReplicaSet"}: &replicaSetFactory{},
|
||||
{"Role"}: &roleFactory{},
|
||||
{"RoleBinding"}: &roleBindingFactory{},
|
||||
{"Secret"}: &secretFactory{},
|
||||
@ -315,7 +316,7 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace,
|
||||
case *rbacv1.RoleRef:
|
||||
// TODO: avoid hard-coding this special name. Perhaps add a Framework.PredefinedRoles
|
||||
// which contains all role names that are defined cluster-wide before the test starts?
|
||||
// All those names are excempt from renaming. That list could be populated by querying
|
||||
// All those names are exempt from renaming. That list could be populated by querying
|
||||
// and get extended by tests.
|
||||
if item.Name != "e2e-test-privileged-psp" {
|
||||
PatchName(f, &item.Name)
|
||||
@ -382,6 +383,14 @@ func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace,
|
||||
if err := patchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil {
|
||||
return err
|
||||
}
|
||||
case *appsv1.ReplicaSet:
|
||||
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
|
||||
if err := patchContainerImages(item.Spec.Template.Spec.Containers); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := patchContainerImages(item.Spec.Template.Spec.InitContainers); err != nil {
|
||||
return err
|
||||
}
|
||||
case *apiextensionsv1.CustomResourceDefinition:
|
||||
// Do nothing. Patching name to all CRDs won't always be the expected behavior.
|
||||
default:
|
||||
@ -584,6 +593,27 @@ func (*daemonSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i inte
|
||||
}, nil
|
||||
}
|
||||
|
||||
type replicaSetFactory struct{}
|
||||
|
||||
func (f *replicaSetFactory) New() runtime.Object {
|
||||
return &appsv1.ReplicaSet{}
|
||||
}
|
||||
|
||||
func (*replicaSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*appsv1.ReplicaSet)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.AppsV1().ReplicaSets(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("create ReplicaSet: %w", err)
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(context.TODO(), item.GetName(), metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type storageClassFactory struct{}
|
||||
|
||||
func (f *storageClassFactory) New() runtime.Object {
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/storage/utils/deployment.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/storage/utils/deployment.go
generated
vendored
@ -152,6 +152,9 @@ func PatchCSIDeployment(f *e2eframework.Framework, o PatchCSIOptions, object int
|
||||
if o.FSGroupPolicy != nil {
|
||||
object.Spec.FSGroupPolicy = o.FSGroupPolicy
|
||||
}
|
||||
if o.SELinuxMount != nil {
|
||||
object.Spec.SELinuxMount = o.SELinuxMount
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -211,4 +214,8 @@ type PatchCSIOptions struct {
|
||||
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||
// otherwise.
|
||||
FSGroupPolicy *storagev1.FSGroupPolicy
|
||||
// If not nil, the value to use for the CSIDriver.Spec.SELinuxMount
|
||||
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||
// otherwise.
|
||||
SELinuxMount *bool
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/storage/utils/host_exec.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/storage/utils/host_exec.go
generated
vendored
@ -149,7 +149,7 @@ func (h *hostExecutor) exec(cmd string, node *v1.Node) (Result, error) {
|
||||
}
|
||||
containerName := pod.Spec.Containers[0].Name
|
||||
var err error
|
||||
result.Stdout, result.Stderr, err = h.Framework.ExecWithOptions(framework.ExecOptions{
|
||||
result.Stdout, result.Stderr, err = e2epod.ExecWithOptions(h.Framework, e2epod.ExecOptions{
|
||||
Command: args,
|
||||
Namespace: pod.Namespace,
|
||||
PodName: pod.Name,
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/storage/utils/local.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/storage/utils/local.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/storage/utils/pod.go
generated
vendored
@ -152,7 +152,10 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
||||
break
|
||||
}
|
||||
}
|
||||
framework.ExpectEqual(isPidChanged, true, "Kubelet PID remained unchanged after restarting Kubelet")
|
||||
if !isPidChanged {
|
||||
framework.Fail("Kubelet PID remained unchanged after restarting Kubelet")
|
||||
}
|
||||
|
||||
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/storage/utils/snapshot.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/storage/utils/snapshot.go
generated
vendored
@ -80,9 +80,9 @@ func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, p
|
||||
|
||||
// GetSnapshotContentFromSnapshot returns the VolumeSnapshotContent object Bound to a
|
||||
// given VolumeSnapshot
|
||||
func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured) *unstructured.Unstructured {
|
||||
func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured, timeout time.Duration) *unstructured.Unstructured {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
err := WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
|
||||
err := WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, timeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
vs, err := dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{})
|
||||
|
74
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
74
vendor/k8s.io/kubernetes/test/e2e/storage/utils/utils.go
generated
vendored
@ -95,45 +95,44 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
|
||||
}
|
||||
|
||||
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
|
||||
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
path := "/mnt/volume1"
|
||||
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
|
||||
byteLen := 64
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
|
||||
ginkgo.By("Writing to the volume.")
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
|
||||
ginkgo.By("Restarting kubelet")
|
||||
KubeletCommand(KRestart, c, clientPod)
|
||||
|
||||
ginkgo.By("Testing that written file is accessible.")
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
|
||||
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, path)
|
||||
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, volumePath)
|
||||
}
|
||||
|
||||
// TestKubeletRestartsAndRestoresMap tests that a volume mapped to a pod remains mapped after a kubelet restarts
|
||||
func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
path := "/mnt/volume1"
|
||||
func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
|
||||
byteLen := 64
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
|
||||
ginkgo.By("Writing to the volume.")
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, path, byteLen, seed)
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
|
||||
|
||||
ginkgo.By("Restarting kubelet")
|
||||
KubeletCommand(KRestart, c, clientPod)
|
||||
|
||||
ginkgo.By("Testing that written pv is accessible.")
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, path, byteLen, seed)
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
|
||||
|
||||
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path)
|
||||
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, volumePath)
|
||||
}
|
||||
|
||||
// TestVolumeUnmountsFromDeletedPodWithForceOption tests that a volume unmounts if the client pod was deleted while the kubelet was down.
|
||||
// forceDelete is true indicating whether the pod is forcefully deleted.
|
||||
// checkSubpath is true indicating whether the subpath should be checked.
|
||||
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
|
||||
// If secondPod is set, it is started when kubelet is down to check that the volume is usable while the old pod is being deleted and the new pod is starting.
|
||||
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool, secondPod *v1.Pod, volumePath string) {
|
||||
nodeIP, err := getHostAddress(c, clientPod)
|
||||
framework.ExpectNoError(err)
|
||||
nodeIP = nodeIP + ":22"
|
||||
@ -152,6 +151,11 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
}
|
||||
|
||||
ginkgo.By("Writing to the volume.")
|
||||
byteLen := 64
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
|
||||
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
|
||||
defer func() {
|
||||
KubeletCommand(KStart, c, clientPod)
|
||||
@ -159,6 +163,12 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
ginkgo.By("Stopping the kubelet.")
|
||||
KubeletCommand(KStop, c, clientPod)
|
||||
|
||||
if secondPod != nil {
|
||||
ginkgo.By("Starting the second pod")
|
||||
_, err = c.CoreV1().Pods(clientPod.Namespace).Create(context.TODO(), secondPod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "when starting the second pod")
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
|
||||
if forceDelete {
|
||||
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0))
|
||||
@ -180,6 +190,29 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
|
||||
if secondPod != nil {
|
||||
ginkgo.By("Waiting for the second pod.")
|
||||
err = e2epod.WaitForPodRunningInNamespace(c, secondPod)
|
||||
framework.ExpectNoError(err, "while waiting for the second pod Running")
|
||||
|
||||
ginkgo.By("Getting the second pod uuid.")
|
||||
secondPod, err := c.CoreV1().Pods(secondPod.Namespace).Get(context.TODO(), secondPod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "getting the second UID")
|
||||
|
||||
ginkgo.By("Expecting the volume mount to be found in the second pod.")
|
||||
result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", secondPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error when checking the second pod.")
|
||||
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
|
||||
ginkgo.By("Testing that written file is accessible in the second pod.")
|
||||
CheckReadFromPath(f, secondPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
|
||||
err = c.CoreV1().Pods(secondPod.Namespace).Delete(context.TODO(), secondPod.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "when deleting the second pod")
|
||||
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, secondPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
|
||||
framework.ExpectNoError(err, "when waiting for the second pod to disappear")
|
||||
}
|
||||
|
||||
ginkgo.By("Expecting the volume mount not to be found.")
|
||||
result, err = e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
@ -195,21 +228,22 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
|
||||
framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
|
||||
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false)
|
||||
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
|
||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false, nil, volumePath)
|
||||
}
|
||||
|
||||
// TestVolumeUnmountsFromForceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down.
|
||||
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false)
|
||||
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
|
||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false, nil, volumePath)
|
||||
}
|
||||
|
||||
// TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down.
|
||||
// forceDelete is true indicating whether the pod is forcefully deleted.
|
||||
func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool) {
|
||||
func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, devicePath string) {
|
||||
nodeIP, err := getHostAddress(c, clientPod)
|
||||
framework.ExpectNoError(err, "Failed to get nodeIP.")
|
||||
nodeIP = nodeIP + ":22"
|
||||
@ -280,13 +314,13 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra
|
||||
}
|
||||
|
||||
// TestVolumeUnmapsFromDeletedPod tests that a volume unmaps if the client pod was deleted while the kubelet was down.
|
||||
func TestVolumeUnmapsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, false)
|
||||
func TestVolumeUnmapsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
|
||||
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, false, devicePath)
|
||||
}
|
||||
|
||||
// TestVolumeUnmapsFromForceDeletedPod tests that a volume unmaps if the client pod was forcefully deleted while the kubelet was down.
|
||||
func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, true)
|
||||
func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
|
||||
TestVolumeUnmapsFromDeletedPodWithForceOption(c, f, clientPod, true, devicePath)
|
||||
}
|
||||
|
||||
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
|
||||
|
@ -52,8 +52,8 @@ spec:
|
||||
# Refer to details about the installer in https://cos.googlesource.com/cos/tools/+/refs/heads/master/src/cmd/cos_gpu_installer/
|
||||
# and the COS release notes (https://cloud.google.com/container-optimized-os/docs/release-notes) to determine version COS GPU installer for a given version of COS.
|
||||
|
||||
# Maps to gcr.io/cos-cloud/cos-gpu-installer:v2.0.3 - suitable for COS M85 as per https://cloud.google.com/container-optimized-os/docs/release-notes#cos-85-13310-1209-3
|
||||
- image: gcr.io/cos-cloud/cos-gpu-installer:v2.0.5
|
||||
# Maps to gcr.io/cos-cloud/cos-gpu-installer:v2.0.27 - suitable for COS M97 as per https://cloud.google.com/container-optimized-os/docs/release-notes
|
||||
- image: gcr.io/cos-cloud/cos-gpu-installer:v2.0.27
|
||||
name: nvidia-driver-installer
|
||||
resources:
|
||||
requests:
|
||||
@ -81,6 +81,6 @@ spec:
|
||||
- name: root-mount
|
||||
mountPath: /root
|
||||
containers:
|
||||
- image: "registry.k8s.io/pause:3.8"
|
||||
- image: "registry.k8s.io/pause:3.9"
|
||||
name: pause
|
||||
|
||||
|
@ -39,7 +39,7 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
@ -73,7 +73,7 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-attacher
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v3.4.0
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.0.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
@ -102,7 +102,7 @@ spec:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-resizer
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.4.0
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.6.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
|
@ -184,7 +184,7 @@ roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
|
||||
# priviledged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
|
||||
# privileged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml
generated
vendored
@ -13,7 +13,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: csi-driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.1.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md
generated
vendored
@ -1,4 +1,4 @@
|
||||
The files in this directory are exact copys of "kubernetes-latest" in
|
||||
The files in this directory are exact copies of "kubernetes-latest" in
|
||||
https://github.com/kubernetes-csi/csi-driver-host-path/tree/v1.8.0/deploy/
|
||||
|
||||
Do not edit manually. Run ./update-hostpath.sh to refresh the content.
|
||||
|
@ -218,7 +218,7 @@ spec:
|
||||
serviceAccountName: csi-hostpathplugin-sa
|
||||
containers:
|
||||
- name: hostpath
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.7.3
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
|
||||
args:
|
||||
- "--drivername=hostpath.csi.k8s.io"
|
||||
- "--v=5"
|
||||
@ -261,7 +261,7 @@ spec:
|
||||
name: dev-dir
|
||||
|
||||
- name: csi-external-health-monitor-controller
|
||||
image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.4.0
|
||||
image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
@ -275,7 +275,7 @@ spec:
|
||||
mountPath: /csi
|
||||
|
||||
- name: node-driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -303,13 +303,13 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
image: registry.k8s.io/sig-storage/livenessprobe:v2.6.0
|
||||
image: registry.k8s.io/sig-storage/livenessprobe:v2.7.0
|
||||
args:
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --health-port=9898
|
||||
|
||||
- name: csi-attacher
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v3.4.0
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.0.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -323,7 +323,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.1.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -338,7 +338,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-resizer
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.4.0
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.6.0
|
||||
args:
|
||||
- -v=5
|
||||
- -csi-address=/csi/csi.sock
|
||||
@ -352,7 +352,7 @@ spec:
|
||||
name: socket-dir
|
||||
|
||||
- name: csi-snapshotter
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v5.0.1
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
|
||||
args:
|
||||
- -v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
|
@ -1,4 +1,4 @@
|
||||
# priviledged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
|
||||
# privileged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v3.3.0
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.0.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-resizer
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.3.0
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.6.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-snapshotter
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v4.2.1
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.0.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
# Topology support is needed for the pod rescheduling test
|
||||
@ -34,7 +34,7 @@ spec:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- name: driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.3.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -53,7 +53,7 @@ spec:
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: mock
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.7.3
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
|
||||
args:
|
||||
- "--drivername=mock.storage.k8s.io"
|
||||
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.0.0
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v3.3.0
|
||||
args:
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
# Topology support is needed for the pod rescheduling test
|
||||
@ -35,7 +35,7 @@ spec:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- name: driver-registrar
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.3.0
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
@ -53,7 +53,7 @@ spec:
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: mock
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.7.3
|
||||
image: registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
|
||||
args:
|
||||
- -v=5
|
||||
- -nodeid=$(KUBE_NODE_NAME)
|
||||
|
@ -46,7 +46,7 @@ roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# priviledged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
|
||||
# privileged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh
generated
vendored
@ -47,14 +47,14 @@ trap "rm -rf csi-driver-host-path" EXIT
|
||||
# Main YAML files.
|
||||
mkdir hostpath
|
||||
cat >hostpath/README.md <<EOF
|
||||
The files in this directory are exact copys of "kubernetes-latest" in
|
||||
The files in this directory are exact copies of "kubernetes-latest" in
|
||||
https://github.com/kubernetes-csi/csi-driver-host-path/tree/$hostpath_version/deploy/
|
||||
|
||||
Do not edit manually. Run $script to refresh the content.
|
||||
EOF
|
||||
cp -r csi-driver-host-path/deploy/kubernetes-latest/hostpath hostpath/
|
||||
cat >hostpath/hostpath/e2e-test-rbac.yaml <<EOF
|
||||
# priviledged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
|
||||
# privileged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
|
6
vendor/k8s.io/kubernetes/test/utils/conditions.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/utils/conditions.go
generated
vendored
@ -19,7 +19,7 @@ package utils
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
)
|
||||
|
||||
@ -52,6 +52,10 @@ func PodRunningReadyOrSucceeded(p *v1.Pod) (bool, error) {
|
||||
return PodRunningReady(p)
|
||||
}
|
||||
|
||||
func PodSucceeded(p *v1.Pod) (bool, error) {
|
||||
return p.Status.Phase == v1.PodSucceeded, nil
|
||||
}
|
||||
|
||||
// FailedContainers inspects all containers in a pod and returns failure
|
||||
// information for containers that have failed or been restarted.
|
||||
// A map is returned where the key is the containerID and the value is a
|
||||
|
2
vendor/k8s.io/kubernetes/test/utils/create_resources.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/utils/create_resources.go
generated
vendored
@ -65,7 +65,7 @@ func CreatePodWithRetries(c clientset.Interface, namespace string, obj *v1.Pod)
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("Failed to create object with non-retriable error: %v ", err)
|
||||
return false, fmt.Errorf("failed to create object with non-retriable error: %v ", err)
|
||||
}
|
||||
return RetryWithExponentialBackOff(createFunc)
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/utils/delete_resources.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/utils/delete_resources.go
generated
vendored
@ -32,7 +32,7 @@ import (
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
)
|
||||
|
||||
func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, name string, options metav1.DeleteOptions) error {
|
||||
func DeleteResource(c clientset.Interface, kind schema.GroupKind, namespace, name string, options metav1.DeleteOptions) error {
|
||||
switch kind {
|
||||
case api.Kind("Pod"):
|
||||
return c.CoreV1().Pods(namespace).Delete(context.TODO(), name, options)
|
||||
@ -59,7 +59,7 @@ func deleteResource(c clientset.Interface, kind schema.GroupKind, namespace, nam
|
||||
|
||||
func DeleteResourceWithRetries(c clientset.Interface, kind schema.GroupKind, namespace, name string, options metav1.DeleteOptions) error {
|
||||
deleteFunc := func() (bool, error) {
|
||||
err := deleteResource(c, kind, namespace, name, options)
|
||||
err := DeleteResource(c, kind, namespace, name, options)
|
||||
if err == nil || apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user