mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
rebase: update kubernetes to 1.28.0 in main
updating kubernetes to 1.28.0 in the main repo. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
committed by
mergify[bot]
parent
b2fdc269c3
commit
ff3e84ad67
9
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/framework/.import-restrictions
generated
vendored
@ -1,13 +1,6 @@
|
||||
rules:
|
||||
# The core E2E framework is meant to be a normal Kubernetes client,
|
||||
# which means that it shouldn't depend on internal
|
||||
# code. But we are not there yet, so some exceptions
|
||||
# have to be allowed. Over time the list of allowed
|
||||
# packages should get shorter, not longer.
|
||||
- selectorRegexp: ^k8s[.]io/kubernetes/pkg/
|
||||
allowedPrefixes:
|
||||
- k8s.io/kubernetes/pkg/kubelet/apis/
|
||||
|
||||
# which means that it shouldn't depend on internal code.
|
||||
# The following packages are okay to use:
|
||||
#
|
||||
# public API
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/config/.import-restrictions
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/config/.import-restrictions
generated
vendored
@ -1,9 +1,12 @@
|
||||
# This E2E framework sub-package is currently allowed to use arbitrary
|
||||
# dependencies, therefore we need to override the restrictions from
|
||||
# the parent .import-restrictions file.
|
||||
# dependencies except of k/k/pkg, therefore we need to override the
|
||||
# restrictions from the parent .import-restrictions file.
|
||||
#
|
||||
# At some point it may become useful to also check this package's
|
||||
# dependencies more careful.
|
||||
rules:
|
||||
- selectorRegexp: "^k8s[.]io/kubernetes/pkg"
|
||||
allowedPrefixes: []
|
||||
|
||||
- selectorRegexp: ""
|
||||
allowedPrefixes: [ "" ]
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/debug/.import-restrictions
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/debug/.import-restrictions
generated
vendored
@ -1,9 +1,12 @@
|
||||
# This E2E framework sub-package is currently allowed to use arbitrary
|
||||
# dependencies, therefore we need to override the restrictions from
|
||||
# the parent .import-restrictions file.
|
||||
# dependencies except of k/k/pkg, therefore we need to override the
|
||||
# restrictions from the parent .import-restrictions file.
|
||||
#
|
||||
# At some point it may become useful to also check this package's
|
||||
# dependencies more careful.
|
||||
rules:
|
||||
- selectorRegexp: "^k8s[.]io/kubernetes/pkg"
|
||||
allowedPrefixes: []
|
||||
|
||||
- selectorRegexp: ""
|
||||
allowedPrefixes: [ "" ]
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e/framework/expect.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/framework/expect.go
generated
vendored
@ -81,7 +81,7 @@ var _ types.GomegaMatcher = &matcher[string]{}
|
||||
// assertions. The difference is that failed assertions are returned as an
|
||||
// error:
|
||||
//
|
||||
// if err := Gomega().Expect(pod.Status.Phase).To(gomega.BeEqual(v1.Running)); err != nil {
|
||||
// if err := Gomega().Expect(pod.Status.Phase).To(gomega.Equal(v1.Running)); err != nil {
|
||||
// return fmt.Errorf("test pod not running: %w", err)
|
||||
// }
|
||||
//
|
||||
@ -294,14 +294,14 @@ var ErrFailure error = FailureError{}
|
||||
|
||||
// ExpectEqual expects the specified two are the same, otherwise an exception raises
|
||||
//
|
||||
// Deprecated: use gomega.Expect().To(gomega.BeEqual())
|
||||
// Deprecated: use gomega.Expect().To(gomega.Equal())
|
||||
func ExpectEqual(actual interface{}, extra interface{}, explain ...interface{}) {
|
||||
gomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...)
|
||||
}
|
||||
|
||||
// ExpectNotEqual expects the specified two are not the same, otherwise an exception raises
|
||||
//
|
||||
// Deprecated: use gomega.Expect().ToNot(gomega.BeEqual())
|
||||
// Deprecated: use gomega.Expect().ToNot(gomega.Equal())
|
||||
func ExpectNotEqual(actual interface{}, extra interface{}, explain ...interface{}) {
|
||||
gomega.ExpectWithOffset(1, actual).NotTo(gomega.Equal(extra), explain...)
|
||||
}
|
||||
|
26
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
26
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@ -89,6 +89,12 @@ var (
|
||||
|
||||
// Framework supports common operations used by e2e tests; it will keep a client & a namespace for you.
|
||||
// Eventual goal is to merge this with integration test framework.
|
||||
//
|
||||
// You can configure the pod security level for your test by setting the `NamespacePodSecurityLevel`
|
||||
// which will set all three of pod security admission enforce, warn and audit labels on the namespace.
|
||||
// The default pod security profile is "restricted".
|
||||
// Each of the labels can be overridden by using more specific NamespacePodSecurity* attributes of this
|
||||
// struct.
|
||||
type Framework struct {
|
||||
BaseName string
|
||||
|
||||
@ -111,6 +117,9 @@ type Framework struct {
|
||||
namespacesToDelete []*v1.Namespace // Some tests have more than one.
|
||||
NamespaceDeletionTimeout time.Duration
|
||||
NamespacePodSecurityEnforceLevel admissionapi.Level // The pod security enforcement level for namespaces to be applied.
|
||||
NamespacePodSecurityWarnLevel admissionapi.Level // The pod security warn (client logging) level for namespaces to be applied.
|
||||
NamespacePodSecurityAuditLevel admissionapi.Level // The pod security audit (server logging) level for namespaces to be applied.
|
||||
NamespacePodSecurityLevel admissionapi.Level // The pod security level to be used for all of enforcement, warn and audit. Can be rewritten by more specific configuration attributes.
|
||||
|
||||
// Flaky operation failures in an e2e test can be captured through this.
|
||||
flakeReport *FlakeReport
|
||||
@ -448,11 +457,9 @@ func (f *Framework) CreateNamespace(ctx context.Context, baseName string, labels
|
||||
labels = labelsCopy
|
||||
}
|
||||
|
||||
enforceLevel := admissionapi.LevelRestricted
|
||||
if f.NamespacePodSecurityEnforceLevel != "" {
|
||||
enforceLevel = f.NamespacePodSecurityEnforceLevel
|
||||
}
|
||||
labels[admissionapi.EnforceLevelLabel] = string(enforceLevel)
|
||||
labels[admissionapi.EnforceLevelLabel] = firstNonEmptyPSaLevelOrRestricted(f.NamespacePodSecurityEnforceLevel, f.NamespacePodSecurityLevel)
|
||||
labels[admissionapi.WarnLevelLabel] = firstNonEmptyPSaLevelOrRestricted(f.NamespacePodSecurityWarnLevel, f.NamespacePodSecurityLevel)
|
||||
labels[admissionapi.AuditLevelLabel] = firstNonEmptyPSaLevelOrRestricted(f.NamespacePodSecurityAuditLevel, f.NamespacePodSecurityLevel)
|
||||
|
||||
ns, err := createTestingNS(ctx, baseName, f.ClientSet, labels)
|
||||
// check ns instead of err to see if it's nil as we may
|
||||
@ -481,6 +488,15 @@ func (f *Framework) CreateNamespace(ctx context.Context, baseName string, labels
|
||||
return ns, err
|
||||
}
|
||||
|
||||
func firstNonEmptyPSaLevelOrRestricted(levelConfig ...admissionapi.Level) string {
|
||||
for _, l := range levelConfig {
|
||||
if len(l) > 0 {
|
||||
return string(l)
|
||||
}
|
||||
}
|
||||
return string(admissionapi.LevelRestricted)
|
||||
}
|
||||
|
||||
// createSecretFromDockerConfig creates a secret using the private image registry credentials.
|
||||
// The credentials are provided by --e2e-docker-config-file flag.
|
||||
func (f *Framework) createSecretFromDockerConfig(ctx context.Context, namespace string) (*v1.Secret, error) {
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/.import-restrictions
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/.import-restrictions
generated
vendored
@ -1,9 +1,12 @@
|
||||
# This E2E framework sub-package is currently allowed to use arbitrary
|
||||
# dependencies, therefore we need to override the restrictions from
|
||||
# the parent .import-restrictions file.
|
||||
# dependencies except of k/k/pkg, therefore we need to override the
|
||||
# restrictions from the parent .import-restrictions file.
|
||||
#
|
||||
# At some point it may become useful to also check this package's
|
||||
# dependencies more careful.
|
||||
rules:
|
||||
- selectorRegexp: "^k8s[.]io/kubernetes/pkg"
|
||||
allowedPrefixes: []
|
||||
|
||||
- selectorRegexp: ""
|
||||
allowedPrefixes: [ "" ]
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/builder.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/kubectl/builder.go
generated
vendored
@ -49,7 +49,7 @@ func NewKubectlCommand(namespace string, args ...string) *KubectlBuilder {
|
||||
return b
|
||||
}
|
||||
|
||||
// WithEnv appends the given environment and returns itself.
|
||||
// AppendEnv appends the given environment and returns itself.
|
||||
func (b *KubectlBuilder) AppendEnv(env []string) *KubectlBuilder {
|
||||
if b.cmd.Env == nil {
|
||||
b.cmd.Env = os.Environ()
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/.import-restrictions
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/.import-restrictions
generated
vendored
@ -1,9 +1,12 @@
|
||||
# This E2E framework sub-package is currently allowed to use arbitrary
|
||||
# dependencies, therefore we need to override the restrictions from
|
||||
# the parent .import-restrictions file.
|
||||
# dependencies except of k/k/pkg, therefore we need to override the
|
||||
# restrictions from the parent .import-restrictions file.
|
||||
#
|
||||
# At some point it may become useful to also check this package's
|
||||
# dependencies more careful.
|
||||
rules:
|
||||
- selectorRegexp: "^k8s[.]io/kubernetes/pkg"
|
||||
allowedPrefixes: []
|
||||
|
||||
- selectorRegexp: ""
|
||||
allowedPrefixes: [ "" ]
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go
generated
vendored
@ -44,7 +44,7 @@ const (
|
||||
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
|
||||
podStartDurationKey = "pod_start_duration_seconds"
|
||||
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
|
||||
PodStartSLIDurationKey = "pod_start_sli_duration_seconds"
|
||||
podStartSLIDurationKey = "pod_start_sli_duration_seconds"
|
||||
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
|
||||
cgroupManagerOperationsKey = "cgroup_manager_duration_seconds"
|
||||
// Taken from k8s.io/kubernetes/pkg/kubelet/metrics
|
||||
@ -155,7 +155,7 @@ func GetDefaultKubeletLatencyMetrics(ms KubeletMetrics) KubeletLatencyMetrics {
|
||||
podWorkerDurationKey,
|
||||
podWorkerStartDurationKey,
|
||||
podStartDurationKey,
|
||||
PodStartSLIDurationKey,
|
||||
podStartSLIDurationKey,
|
||||
cgroupManagerOperationsKey,
|
||||
dockerOperationsLatencyKey,
|
||||
podWorkerStartDurationKey,
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/node/.import-restrictions
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/node/.import-restrictions
generated
vendored
@ -1,9 +1,12 @@
|
||||
# This E2E framework sub-package is currently allowed to use arbitrary
|
||||
# dependencies, therefore we need to override the restrictions from
|
||||
# the parent .import-restrictions file.
|
||||
# dependencies except of k/k/pkg, therefore we need to override the
|
||||
# restrictions from the parent .import-restrictions file.
|
||||
#
|
||||
# At some point it may become useful to also check this package's
|
||||
# dependencies more careful.
|
||||
rules:
|
||||
- selectorRegexp: "^k8s[.]io/kubernetes/pkg"
|
||||
allowedPrefixes: []
|
||||
|
||||
- selectorRegexp: ""
|
||||
allowedPrefixes: [ "" ]
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/node/resource.go
generated
vendored
@ -536,7 +536,7 @@ func GetClusterZones(ctx context.Context, c clientset.Interface) (sets.String, e
|
||||
}
|
||||
|
||||
// GetSchedulableClusterZones returns the values of zone label collected from all nodes which are schedulable.
|
||||
func GetSchedulableClusterZones(ctx context.Context, c clientset.Interface) (sets.String, error) {
|
||||
func GetSchedulableClusterZones(ctx context.Context, c clientset.Interface) (sets.Set[string], error) {
|
||||
// GetReadySchedulableNodes already filters our tainted and unschedulable nodes.
|
||||
nodes, err := GetReadySchedulableNodes(ctx, c)
|
||||
if err != nil {
|
||||
@ -544,7 +544,7 @@ func GetSchedulableClusterZones(ctx context.Context, c clientset.Interface) (set
|
||||
}
|
||||
|
||||
// collect values of zone label from all nodes
|
||||
zones := sets.NewString()
|
||||
zones := sets.New[string]()
|
||||
for _, node := range nodes.Items {
|
||||
if zone, found := node.Labels[v1.LabelFailureDomainBetaZone]; found {
|
||||
zones.Insert(zone)
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/pod/.import-restrictions
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/pod/.import-restrictions
generated
vendored
@ -1,9 +1,12 @@
|
||||
# This E2E framework sub-package is currently allowed to use arbitrary
|
||||
# dependencies, therefore we need to override the restrictions from
|
||||
# the parent .import-restrictions file.
|
||||
# dependencies except of k/k/pkg, therefore we need to override the
|
||||
# restrictions from the parent .import-restrictions file.
|
||||
#
|
||||
# At some point it may become useful to also check this package's
|
||||
# dependencies more careful.
|
||||
rules:
|
||||
- selectorRegexp: "^k8s[.]io/kubernetes/pkg"
|
||||
allowedPrefixes: []
|
||||
|
||||
- selectorRegexp: ""
|
||||
allowedPrefixes: [ "" ]
|
||||
|
27
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
27
vendor/k8s.io/kubernetes/test/e2e/framework/pod/create.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -40,7 +41,7 @@ type Config struct {
|
||||
PVCs []*v1.PersistentVolumeClaim
|
||||
PVCsReadOnly bool
|
||||
InlineVolumeSources []*v1.VolumeSource
|
||||
IsPrivileged bool
|
||||
SecurityLevel admissionapi.Level
|
||||
Command string
|
||||
HostIPC bool
|
||||
HostPID bool
|
||||
@ -52,8 +53,8 @@ type Config struct {
|
||||
}
|
||||
|
||||
// CreateUnschedulablePod with given claims based on node selector
|
||||
func CreateUnschedulablePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
|
||||
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
|
||||
func CreateUnschedulablePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) (*v1.Pod, error) {
|
||||
pod := MakePod(namespace, nodeSelector, pvclaims, securityLevel, command)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %w", err)
|
||||
@ -73,12 +74,12 @@ func CreateUnschedulablePod(ctx context.Context, client clientset.Interface, nam
|
||||
|
||||
// CreateClientPod defines and creates a pod with a mounted PV. Pod runs infinite loop until killed.
|
||||
func CreateClientPod(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
|
||||
return CreatePod(ctx, c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
|
||||
return CreatePod(ctx, c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, admissionapi.LevelPrivileged, "")
|
||||
}
|
||||
|
||||
// CreatePod with given claims based on node selector
|
||||
func CreatePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
|
||||
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
|
||||
func CreatePod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) (*v1.Pod, error) {
|
||||
pod := MakePod(namespace, nodeSelector, pvclaims, securityLevel, command)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %w", err)
|
||||
@ -128,7 +129,7 @@ func CreateSecPodWithNodeSelection(ctx context.Context, client clientset.Interfa
|
||||
|
||||
// MakePod returns a pod definition based on the namespace. The pod references the PVC's
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod
|
||||
func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod {
|
||||
func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, securityLevel admissionapi.Level, command string) *v1.Pod {
|
||||
if len(command) == 0 {
|
||||
command = "trap exit TERM; while true; do sleep 1; done"
|
||||
}
|
||||
@ -147,7 +148,7 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
|
||||
Name: "write-pod",
|
||||
Image: GetDefaultTestImage(),
|
||||
Command: GenerateScriptCmd(command),
|
||||
SecurityContext: GenerateContainerSecurityContext(isPrivileged),
|
||||
SecurityContext: GenerateContainerSecurityContext(securityLevel),
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
@ -157,6 +158,10 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
|
||||
if nodeSelector != nil {
|
||||
podSpec.Spec.NodeSelector = nodeSelector
|
||||
}
|
||||
if securityLevel == admissionapi.LevelRestricted {
|
||||
podSpec = MustMixinRestrictedPodSecurity(podSpec)
|
||||
}
|
||||
|
||||
return podSpec
|
||||
}
|
||||
|
||||
@ -196,6 +201,10 @@ func MakePodSpec(podConfig *Config) *v1.PodSpec {
|
||||
if podConfig.ImageID != imageutils.None {
|
||||
image = podConfig.ImageID
|
||||
}
|
||||
securityLevel := podConfig.SecurityLevel
|
||||
if securityLevel == "" {
|
||||
securityLevel = admissionapi.LevelBaseline
|
||||
}
|
||||
podSpec := &v1.PodSpec{
|
||||
HostIPC: podConfig.HostIPC,
|
||||
HostPID: podConfig.HostPID,
|
||||
@ -205,7 +214,7 @@ func MakePodSpec(podConfig *Config) *v1.PodSpec {
|
||||
Name: "write-pod",
|
||||
Image: GetTestImage(image),
|
||||
Command: GenerateScriptCmd(podConfig.Command),
|
||||
SecurityContext: GenerateContainerSecurityContext(podConfig.IsPrivileged),
|
||||
SecurityContext: GenerateContainerSecurityContext(securityLevel),
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/pod/output/output.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/pod/output/output.go
generated
vendored
@ -30,7 +30,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
apiv1pod "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
@ -180,7 +180,7 @@ func MatchContainerOutput(
|
||||
|
||||
if podErr != nil {
|
||||
// Pod failed. Dump all logs from all containers to see what's wrong
|
||||
_ = apiv1pod.VisitContainers(&podStatus.Spec, apiv1pod.AllFeatureEnabledContainers(), func(c *v1.Container, containerType apiv1pod.ContainerType) bool {
|
||||
_ = podutils.VisitContainers(&podStatus.Spec, podutils.AllContainers, func(c *v1.Container, containerType podutils.ContainerType) bool {
|
||||
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, ns, podStatus.Name, c.Name)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get logs from node %q pod %q container %q: %v",
|
||||
|
61
vendor/k8s.io/kubernetes/test/e2e/framework/pod/pod_client.go
generated
vendored
61
vendor/k8s.io/kubernetes/test/e2e/framework/pod/pod_client.go
generated
vendored
@ -38,8 +38,6 @@ import (
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
@ -72,6 +70,7 @@ func NewPodClient(f *framework.Framework) *PodClient {
|
||||
return &PodClient{
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(f.Namespace.Name),
|
||||
namespace: f.Namespace.Name,
|
||||
}
|
||||
}
|
||||
|
||||
@ -82,6 +81,7 @@ func PodClientNS(f *framework.Framework, namespace string) *PodClient {
|
||||
return &PodClient{
|
||||
f: f,
|
||||
PodInterface: f.ClientSet.CoreV1().Pods(namespace),
|
||||
namespace: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
@ -89,6 +89,7 @@ func PodClientNS(f *framework.Framework, namespace string) *PodClient {
|
||||
type PodClient struct {
|
||||
f *framework.Framework
|
||||
v1core.PodInterface
|
||||
namespace string
|
||||
}
|
||||
|
||||
// Create creates a new pod according to the framework specifications (don't wait for it to start).
|
||||
@ -101,9 +102,8 @@ func (c *PodClient) Create(ctx context.Context, pod *v1.Pod) *v1.Pod {
|
||||
|
||||
// CreateSync creates a new pod according to the framework specifications, and wait for it to start and be running and ready.
|
||||
func (c *PodClient) CreateSync(ctx context.Context, pod *v1.Pod) *v1.Pod {
|
||||
namespace := c.f.Namespace.Name
|
||||
p := c.Create(ctx, pod)
|
||||
framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(ctx, c.f.ClientSet, p.Name, namespace, framework.PodStartTimeout))
|
||||
framework.ExpectNoError(WaitTimeoutForPodReadyInNamespace(ctx, c.f.ClientSet, p.Name, c.namespace, framework.PodStartTimeout))
|
||||
// Get the newest pod after it becomes running and ready, some status may change after pod created, such as pod ip.
|
||||
p, err := c.Get(ctx, p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
@ -151,37 +151,45 @@ func (c *PodClient) Update(ctx context.Context, name string, updateFn func(pod *
|
||||
|
||||
// AddEphemeralContainerSync adds an EphemeralContainer to a pod and waits for it to be running.
|
||||
func (c *PodClient) AddEphemeralContainerSync(ctx context.Context, pod *v1.Pod, ec *v1.EphemeralContainer, timeout time.Duration) error {
|
||||
namespace := c.f.Namespace.Name
|
||||
|
||||
podJS, err := json.Marshal(pod)
|
||||
framework.ExpectNoError(err, "error creating JSON for pod %q", format.Pod(pod))
|
||||
framework.ExpectNoError(err, "error creating JSON for pod %q", FormatPod(pod))
|
||||
|
||||
ecPod := pod.DeepCopy()
|
||||
ecPod.Spec.EphemeralContainers = append(ecPod.Spec.EphemeralContainers, *ec)
|
||||
ecJS, err := json.Marshal(ecPod)
|
||||
framework.ExpectNoError(err, "error creating JSON for pod with ephemeral container %q", format.Pod(pod))
|
||||
framework.ExpectNoError(err, "error creating JSON for pod with ephemeral container %q", FormatPod(pod))
|
||||
|
||||
patch, err := strategicpatch.CreateTwoWayMergePatch(podJS, ecJS, pod)
|
||||
framework.ExpectNoError(err, "error creating patch to add ephemeral container %q", format.Pod(pod))
|
||||
framework.ExpectNoError(err, "error creating patch to add ephemeral container %q", FormatPod(pod))
|
||||
|
||||
// Clients may optimistically attempt to add an ephemeral container to determine whether the EphemeralContainers feature is enabled.
|
||||
if _, err := c.Patch(ctx, pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "ephemeralcontainers"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
framework.ExpectNoError(WaitForContainerRunning(ctx, c.f.ClientSet, namespace, pod.Name, ec.Name, timeout))
|
||||
framework.ExpectNoError(WaitForContainerRunning(ctx, c.f.ClientSet, c.namespace, pod.Name, ec.Name, timeout))
|
||||
return nil
|
||||
}
|
||||
|
||||
// FormatPod returns a string representing a pod in a consistent human readable format,
|
||||
// with pod name, namespace and pod UID as part of the string.
|
||||
// This code is taken from k/k/pkg/kubelet/util/format/pod.go to remove
|
||||
// e2e framework -> k/k/pkg/kubelet dependency.
|
||||
func FormatPod(pod *v1.Pod) string {
|
||||
if pod == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("%s_%s(%s)", pod.Name, pod.Namespace, pod.UID)
|
||||
}
|
||||
|
||||
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't
|
||||
// disappear before the timeout, it will fail the test.
|
||||
func (c *PodClient) DeleteSync(ctx context.Context, name string, options metav1.DeleteOptions, timeout time.Duration) {
|
||||
namespace := c.f.Namespace.Name
|
||||
err := c.Delete(ctx, name, options)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
framework.Failf("Failed to delete pod %q: %v", name, err)
|
||||
}
|
||||
framework.ExpectNoError(WaitForPodNotFoundInNamespace(ctx, c.f.ClientSet, name, namespace, timeout), "wait for pod %q to disappear", name)
|
||||
framework.ExpectNoError(WaitForPodNotFoundInNamespace(ctx, c.f.ClientSet, name, c.namespace, timeout), "wait for pod %q to disappear", name)
|
||||
}
|
||||
|
||||
// mungeSpec apply test-suite specific transformations to the pod spec.
|
||||
@ -223,8 +231,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
|
||||
// WaitForSuccess waits for pod to succeed.
|
||||
// TODO(random-liu): Move pod wait function into this file
|
||||
func (c *PodClient) WaitForSuccess(ctx context.Context, name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
gomega.Expect(WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
gomega.Expect(WaitForPodCondition(ctx, c.f.ClientSet, c.namespace, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
@ -240,8 +247,7 @@ func (c *PodClient) WaitForSuccess(ctx context.Context, name string, timeout tim
|
||||
|
||||
// WaitForFinish waits for pod to finish running, regardless of success or failure.
|
||||
func (c *PodClient) WaitForFinish(ctx context.Context, name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
gomega.Expect(WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
gomega.Expect(WaitForPodCondition(ctx, c.f.ClientSet, c.namespace, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
@ -303,10 +309,29 @@ func (c *PodClient) PodIsReady(ctx context.Context, name string) bool {
|
||||
return podutils.IsPodReady(pod)
|
||||
}
|
||||
|
||||
// RemovePodFinalizer removes the pod's finalizer
|
||||
// RemoveString returns a newly created []string that contains all items from slice
|
||||
// that are not equal to s.
|
||||
// This code is taken from k/k/pkg/util/slice/slice.go to remove
|
||||
// e2e/framework/pod -> k/k/pkg/util/slice dependency.
|
||||
func removeString(slice []string, s string) []string {
|
||||
newSlice := make([]string, 0)
|
||||
for _, item := range slice {
|
||||
if item != s {
|
||||
newSlice = append(newSlice, item)
|
||||
}
|
||||
}
|
||||
if len(newSlice) == 0 {
|
||||
// Sanitize for unit tests so we don't need to distinguish empty array
|
||||
// and nil.
|
||||
return nil
|
||||
}
|
||||
return newSlice
|
||||
}
|
||||
|
||||
// RemoveFinalizer removes the pod's finalizer
|
||||
func (c *PodClient) RemoveFinalizer(ctx context.Context, podName string, finalizerName string) {
|
||||
framework.Logf("Removing pod's %q finalizer: %q", podName, finalizerName)
|
||||
c.Update(ctx, podName, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.Finalizers = slice.RemoveString(pod.ObjectMeta.Finalizers, finalizerName, nil)
|
||||
pod.ObjectMeta.Finalizers = removeString(pod.ObjectMeta.Finalizers, finalizerName)
|
||||
})
|
||||
}
|
||||
|
20
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
20
vendor/k8s.io/kubernetes/test/e2e/framework/pod/utils.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@ -111,12 +112,25 @@ func GeneratePodSecurityContext(fsGroup *int64, seLinuxOptions *v1.SELinuxOption
|
||||
// GenerateContainerSecurityContext generates the corresponding container security context with the given inputs
|
||||
// If the Node OS is windows, currently we will ignore the inputs and return nil.
|
||||
// TODO: Will modify it after windows has its own security context
|
||||
func GenerateContainerSecurityContext(privileged bool) *v1.SecurityContext {
|
||||
func GenerateContainerSecurityContext(level psaapi.Level) *v1.SecurityContext {
|
||||
if NodeOSDistroIs("windows") {
|
||||
return nil
|
||||
}
|
||||
return &v1.SecurityContext{
|
||||
Privileged: &privileged,
|
||||
|
||||
switch level {
|
||||
case psaapi.LevelBaseline:
|
||||
return &v1.SecurityContext{
|
||||
Privileged: pointer.Bool(false),
|
||||
}
|
||||
case psaapi.LevelPrivileged:
|
||||
return &v1.SecurityContext{
|
||||
Privileged: pointer.Bool(true),
|
||||
}
|
||||
case psaapi.LevelRestricted:
|
||||
return GetRestrictedContainerSecurityContext()
|
||||
default:
|
||||
ginkgo.Fail(fmt.Sprintf("unknown k8s.io/pod-security-admission/policy.Level %q", level))
|
||||
panic("not reached")
|
||||
}
|
||||
}
|
||||
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/framework/pod/wait.go
generated
vendored
@ -37,7 +37,6 @@ import (
|
||||
apitypes "k8s.io/apimachinery/pkg/types"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
"k8s.io/kubernetes/test/utils/format"
|
||||
@ -332,7 +331,7 @@ func WaitForPods(ctx context.Context, c clientset.Interface, ns string, opts met
|
||||
// RunningReady checks whether pod p's phase is running and it has a ready
|
||||
// condition of status true.
|
||||
func RunningReady(p *v1.Pod) bool {
|
||||
return p.Status.Phase == v1.PodRunning && podutil.IsPodReady(p)
|
||||
return p.Status.Phase == v1.PodRunning && podutils.IsPodReady(p)
|
||||
}
|
||||
|
||||
// WaitForPodsRunning waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` are running.
|
||||
@ -542,7 +541,7 @@ func WaitForPodNotFoundInNamespace(ctx context.Context, c clientset.Interface, p
|
||||
return nil
|
||||
}
|
||||
|
||||
// PodsResponding waits for the pods to response.
|
||||
// WaitForPodsResponding waits for the pods to response.
|
||||
func WaitForPodsResponding(ctx context.Context, c clientset.Interface, ns string, controllerName string, wantName bool, timeout time.Duration, pods *v1.PodList) error {
|
||||
if timeout == 0 {
|
||||
timeout = podRespondingTimeout
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/pv/pv.go
generated
vendored
@ -78,7 +78,7 @@ type pvcval struct{}
|
||||
type PVCMap map[types.NamespacedName]pvcval
|
||||
|
||||
// PersistentVolumeConfig is consumed by MakePersistentVolume() to generate a PV object
|
||||
// for varying storage options (NFS, ceph, glusterFS, etc.).
|
||||
// for varying storage options (NFS, ceph, etc.).
|
||||
// (+optional) prebind holds a pre-bound PVC
|
||||
// Example pvSource:
|
||||
//
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/.import-restrictions
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/ssh/.import-restrictions
generated
vendored
@ -1,9 +1,12 @@
|
||||
# This E2E framework sub-package is currently allowed to use arbitrary
|
||||
# dependencies, therefore we need to override the restrictions from
|
||||
# the parent .import-restrictions file.
|
||||
# dependencies except of k/k/pkg, therefore we need to override the
|
||||
# restrictions from the parent .import-restrictions file.
|
||||
#
|
||||
# At some point it may become useful to also check this package's
|
||||
# dependencies more careful.
|
||||
rules:
|
||||
- selectorRegexp: "^k8s[.]io/kubernetes/pkg"
|
||||
allowedPrefixes: []
|
||||
|
||||
- selectorRegexp: ""
|
||||
allowedPrefixes: [ "" ]
|
||||
|
18
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
18
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -41,7 +41,6 @@ import (
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/test/e2e/framework/internal/junit"
|
||||
"k8s.io/kubernetes/test/utils/image"
|
||||
"k8s.io/kubernetes/test/utils/kubeconfig"
|
||||
@ -244,8 +243,6 @@ type NodeTestContextType struct {
|
||||
NodeConformance bool
|
||||
// PrepullImages indicates whether node e2e framework should prepull images.
|
||||
PrepullImages bool
|
||||
// KubeletConfig is the kubelet configuration the test is running against.
|
||||
KubeletConfig kubeletconfig.KubeletConfiguration
|
||||
// ImageDescription is the description of the image on which the test is running.
|
||||
ImageDescription string
|
||||
// RuntimeConfig is a map of API server runtime configuration values.
|
||||
@ -350,18 +347,13 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
|
||||
flags.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the simplified JUnit XML reports and other tests results should be saved. Default is empty, which doesn't generate these reports. If ginkgo's -junit-report parameter is used, that parameter instead of -report-dir determines the location of a single JUnit report.")
|
||||
flags.BoolVar(&TestContext.ReportCompleteGinkgo, "report-complete-ginkgo", false, "Enables writing a complete test report as Ginkgo JSON to <report dir>/ginkgo/report.json. Ignored if --report-dir is not set.")
|
||||
flags.BoolVar(&TestContext.ReportCompleteJUnit, "report-complete-junit", false, "Enables writing a complete test report as JUnit XML to <report dir>/ginkgo/report.json. Ignored if --report-dir is not set.")
|
||||
flags.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "unix:///var/run/containerd/containerd.sock", "The container runtime endpoint of cluster VM instances.")
|
||||
flags.StringVar(&TestContext.ContainerRuntimeProcessName, "container-runtime-process-name", "dockerd", "The name of the container runtime process.")
|
||||
flags.StringVar(&TestContext.ContainerRuntimePidFile, "container-runtime-pid-file", "/var/run/docker.pid", "The pid file of the container runtime.")
|
||||
flags.StringVar(&TestContext.SystemdServices, "systemd-services", "docker", "The comma separated list of systemd services the framework will dump logs for.")
|
||||
flags.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "unix:///run/containerd/containerd.sock", "The container runtime endpoint of cluster VM instances.")
|
||||
flags.StringVar(&TestContext.ContainerRuntimeProcessName, "container-runtime-process-name", "containerd", "The name of the container runtime process.")
|
||||
flags.StringVar(&TestContext.ContainerRuntimePidFile, "container-runtime-pid-file", "/run/containerd/containerd.pid", "The pid file of the container runtime.")
|
||||
flags.StringVar(&TestContext.SystemdServices, "systemd-services", "containerd*", "The comma separated list of systemd services the framework will dump logs for.")
|
||||
flags.BoolVar(&TestContext.DumpSystemdJournal, "dump-systemd-journal", false, "Whether to dump the full systemd journal.")
|
||||
flags.StringVar(&TestContext.ImageServiceEndpoint, "image-service-endpoint", "", "The image service endpoint of cluster VM instances.")
|
||||
// TODO: remove the node-role.kubernetes.io/master taint in 1.25 or later.
|
||||
// The change will likely require an action for some users that do not
|
||||
// use k8s originated tools like kubeadm or kOps for creating clusters
|
||||
// and taint their control plane nodes with "master", expecting the test
|
||||
// suite to work with this legacy non-blocking taint.
|
||||
flags.StringVar(&TestContext.NonblockingTaints, "non-blocking-taints", `node-role.kubernetes.io/control-plane,node-role.kubernetes.io/master`, "Nodes with taints in this comma-delimited list will not block the test framework from starting tests. The default taint 'node-role.kubernetes.io/master' is DEPRECATED and will be removed from the list in a future release.")
|
||||
flags.StringVar(&TestContext.NonblockingTaints, "non-blocking-taints", `node-role.kubernetes.io/control-plane`, "Nodes with taints in this comma-delimited list will not block the test framework from starting tests.")
|
||||
|
||||
flags.BoolVar(&TestContext.ListImages, "list-images", false, "If true, will show list of images used for running tests.")
|
||||
flags.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.")
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/framework/testfiles/.import-restrictions
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/framework/testfiles/.import-restrictions
generated
vendored
@ -1,9 +1,12 @@
|
||||
# This E2E framework sub-package is currently allowed to use arbitrary
|
||||
# dependencies, therefore we need to override the restrictions from
|
||||
# the parent .import-restrictions file.
|
||||
# dependencies except of k/k/pkg, therefore we need to override the
|
||||
# restrictions from the parent .import-restrictions file.
|
||||
#
|
||||
# At some point it may become useful to also check this package's
|
||||
# dependencies more careful.
|
||||
rules:
|
||||
- selectorRegexp: "^k8s[.]io/kubernetes/pkg"
|
||||
allowedPrefixes: []
|
||||
|
||||
- selectorRegexp: ""
|
||||
allowedPrefixes: [ "" ]
|
||||
|
45
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
45
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
@ -36,6 +36,7 @@ import (
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@ -420,20 +421,37 @@ func CheckTestingNSDeletedExcept(ctx context.Context, c clientset.Interface, ski
|
||||
}
|
||||
|
||||
// WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
|
||||
// Some components use EndpointSlices other Endpoints, we must verify that both objects meet the requirements.
|
||||
func WaitForServiceEndpointsNum(ctx context.Context, c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
|
||||
return wait.PollWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
|
||||
Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
|
||||
list, err := c.CoreV1().Endpoints(namespace).List(ctx, metav1.ListOptions{})
|
||||
endpoint, err := c.CoreV1().Endpoints(namespace).Get(ctx, serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
Logf("Unexpected error trying to get Endpoints for %s : %v", serviceName, err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, e := range list.Items {
|
||||
if e.Name == serviceName && countEndpointsNum(&e) == expectNum {
|
||||
return true, nil
|
||||
}
|
||||
if countEndpointsNum(endpoint) != expectNum {
|
||||
Logf("Unexpected number of Endpoints, got %d, expected %d", countEndpointsNum(endpoint), expectNum)
|
||||
return false, nil
|
||||
}
|
||||
return false, nil
|
||||
|
||||
esList, err := c.DiscoveryV1().EndpointSlices(namespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, serviceName)})
|
||||
if err != nil {
|
||||
Logf("Unexpected error trying to get EndpointSlices for %s : %v", serviceName, err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if len(esList.Items) == 0 {
|
||||
Logf("Waiting for at least 1 EndpointSlice to exist")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if countEndpointsSlicesNum(esList) != expectNum {
|
||||
Logf("Unexpected number of Endpoints on Slices, got %d, expected %d", countEndpointsSlicesNum(esList), expectNum)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
@ -445,6 +463,19 @@ func countEndpointsNum(e *v1.Endpoints) int {
|
||||
return num
|
||||
}
|
||||
|
||||
func countEndpointsSlicesNum(epList *discoveryv1.EndpointSliceList) int {
|
||||
// EndpointSlices can contain the same address on multiple Slices
|
||||
addresses := sets.Set[string]{}
|
||||
for _, epSlice := range epList.Items {
|
||||
for _, ep := range epSlice.Endpoints {
|
||||
if len(ep.Addresses) > 0 {
|
||||
addresses.Insert(ep.Addresses[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
return addresses.Len()
|
||||
}
|
||||
|
||||
// restclientConfig returns a config holds the information needed to build connection to kubernetes clusters.
|
||||
func restclientConfig(kubeContext string) (*clientcmdapi.Config, error) {
|
||||
Logf(">>> kubeConfig: %s", TestContext.KubeConfig)
|
||||
|
20
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
20
vendor/k8s.io/kubernetes/test/e2e/framework/volume/fixtures.go
generated
vendored
@ -18,14 +18,14 @@ limitations under the License.
|
||||
* This test checks that various VolumeSources are working.
|
||||
*
|
||||
* There are two ways, how to test the volumes:
|
||||
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
|
||||
* 1) With containerized server (NFS, Ceph, iSCSI, ...)
|
||||
* The test creates a server pod, exporting simple 'index.html' file.
|
||||
* Then it uses appropriate VolumeSource to import this file into a client pod
|
||||
* and checks that the pod can see the file. It does so by importing the file
|
||||
* into web server root and loadind the index.html from it.
|
||||
* into web server root and loading the index.html from it.
|
||||
*
|
||||
* These tests work only when privileged containers are allowed, exporting
|
||||
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
|
||||
* various filesystems (ex: NFS) usually needs some mounting or
|
||||
* other privileged magic in the server pod.
|
||||
*
|
||||
* Note that the server containers are for testing purposes only and should not
|
||||
@ -59,6 +59,7 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
uexec "k8s.io/utils/exec"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
@ -87,7 +88,7 @@ const (
|
||||
VolumeServerPodStartupTimeout = 3 * time.Minute
|
||||
|
||||
// PodCleanupTimeout is a waiting period for pod to be cleaned up and unmount its volumes so we
|
||||
// don't tear down containers with NFS/Ceph/Gluster server too early.
|
||||
// don't tear down containers with NFS/Ceph server too early.
|
||||
PodCleanupTimeout = 20 * time.Second
|
||||
)
|
||||
|
||||
@ -398,8 +399,9 @@ func runVolumeTesterPod(ctx context.Context, client clientset.Interface, timeout
|
||||
When SELinux is enabled on the host, client-pod can not read the content, with permission denied.
|
||||
Invoking client-pod as privileged, so that it can access the volume content, even when SELinux is enabled on the host.
|
||||
*/
|
||||
if config.Prefix == "hostpathsymlink" || config.Prefix == "hostpath" {
|
||||
privileged = true
|
||||
securityLevel := admissionapi.LevelBaseline // TODO (#118184): also support LevelRestricted
|
||||
if privileged || config.Prefix == "hostpathsymlink" || config.Prefix == "hostpath" {
|
||||
securityLevel = admissionapi.LevelPrivileged
|
||||
}
|
||||
command = "while true ; do sleep 2; done "
|
||||
seLinuxOptions := &v1.SELinuxOptions{Level: "s0:c0,c1"}
|
||||
@ -443,9 +445,9 @@ func runVolumeTesterPod(ctx context.Context, client clientset.Interface, timeout
|
||||
// a privileged container, so we don't go privileged for block volumes.
|
||||
// https://github.com/moby/moby/issues/35991
|
||||
if privileged && test.Mode == v1.PersistentVolumeBlock {
|
||||
privileged = false
|
||||
securityLevel = admissionapi.LevelBaseline
|
||||
}
|
||||
clientPod.Spec.Containers[0].SecurityContext = e2epod.GenerateContainerSecurityContext(privileged)
|
||||
clientPod.Spec.Containers[0].SecurityContext = e2epod.GenerateContainerSecurityContext(securityLevel)
|
||||
|
||||
if test.Mode == v1.PersistentVolumeBlock {
|
||||
clientPod.Spec.Containers[0].VolumeDevices = append(clientPod.Spec.Containers[0].VolumeDevices, v1.VolumeDevice{
|
||||
@ -620,7 +622,7 @@ func generateWriteCmd(content, path string) []string {
|
||||
return commands
|
||||
}
|
||||
|
||||
// generateReadBlockCmd generates the corresponding command lines to read from a block device with the given file path.
|
||||
// GenerateReadBlockCmd generates the corresponding command lines to read from a block device with the given file path.
|
||||
func GenerateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
|
||||
var commands []string
|
||||
commands = []string{"head", "-c", strconv.Itoa(numberOfCharacters), fullPath}
|
||||
|
11
vendor/k8s.io/kubernetes/test/utils/deployment.go
generated
vendored
11
vendor/k8s.io/kubernetes/test/utils/deployment.go
generated
vendored
@ -21,11 +21,10 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/dump"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
@ -37,7 +36,7 @@ type LogfFn func(format string, args ...interface{})
|
||||
|
||||
func LogReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, logf LogfFn) {
|
||||
if newRS != nil {
|
||||
logf(spew.Sprintf("New ReplicaSet %q of Deployment %q:\n%+v", newRS.Name, deployment.Name, *newRS))
|
||||
logf("New ReplicaSet %q of Deployment %q:\n%s", newRS.Name, deployment.Name, dump.Pretty(*newRS))
|
||||
} else {
|
||||
logf("New ReplicaSet of Deployment %q is nil.", deployment.Name)
|
||||
}
|
||||
@ -45,7 +44,7 @@ func LogReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.R
|
||||
logf("All old ReplicaSets of Deployment %q:", deployment.Name)
|
||||
}
|
||||
for i := range allOldRSs {
|
||||
logf(spew.Sprintf("%+v", *allOldRSs[i]))
|
||||
logf(dump.Pretty(*allOldRSs[i]))
|
||||
}
|
||||
}
|
||||
|
||||
@ -65,7 +64,7 @@ func LogPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsL
|
||||
if podutil.IsPodAvailable(&pod, minReadySeconds, metav1.Now()) {
|
||||
availability = "available"
|
||||
}
|
||||
logf(spew.Sprintf("Pod %q is %s:\n%+v", pod.Name, availability, pod))
|
||||
logf("Pod %q is %s:\n%s", pod.Name, availability, dump.Pretty(pod))
|
||||
}
|
||||
}
|
||||
|
||||
|
18
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
18
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
@ -173,8 +173,6 @@ const (
|
||||
DistrolessIptables
|
||||
// Etcd image
|
||||
Etcd
|
||||
// GlusterDynamicProvisioner image
|
||||
GlusterDynamicProvisioner
|
||||
// Httpd image
|
||||
Httpd
|
||||
// HttpdNew image
|
||||
@ -226,8 +224,6 @@ const (
|
||||
VolumeNFSServer
|
||||
// VolumeISCSIServer image
|
||||
VolumeISCSIServer
|
||||
// VolumeGlusterServer image
|
||||
VolumeGlusterServer
|
||||
// VolumeRBDServer image
|
||||
VolumeRBDServer
|
||||
// WindowsServer image
|
||||
@ -236,7 +232,7 @@ const (
|
||||
|
||||
func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config) {
|
||||
configs := map[ImageID]Config{}
|
||||
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.43"}
|
||||
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.45"}
|
||||
configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"}
|
||||
configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"}
|
||||
configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"}
|
||||
@ -244,10 +240,9 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config
|
||||
configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.4"}
|
||||
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-4"}
|
||||
configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"}
|
||||
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.2"}
|
||||
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.2.3"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.7-0"}
|
||||
configs[GlusterDynamicProvisioner] = Config{list.PromoterE2eRegistry, "glusterdynamic-provisioner", "v1.3"}
|
||||
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.3"}
|
||||
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.2.7"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.9-0"}
|
||||
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"}
|
||||
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"}
|
||||
configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"}
|
||||
@ -273,9 +268,8 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config
|
||||
configs[ResourceConsumer] = Config{list.PromoterE2eRegistry, "resource-consumer", "1.13"}
|
||||
configs[SdDummyExporter] = Config{list.GcRegistry, "sd-dummy-exporter", "v0.2.0"}
|
||||
configs[VolumeNFSServer] = Config{list.PromoterE2eRegistry, "volume/nfs", "1.3"}
|
||||
configs[VolumeISCSIServer] = Config{list.PromoterE2eRegistry, "volume/iscsi", "2.3"}
|
||||
configs[VolumeGlusterServer] = Config{list.PromoterE2eRegistry, "volume/gluster", "1.3"}
|
||||
configs[VolumeRBDServer] = Config{list.PromoterE2eRegistry, "volume/rbd", "1.0.4"}
|
||||
configs[VolumeISCSIServer] = Config{list.PromoterE2eRegistry, "volume/iscsi", "2.6"}
|
||||
configs[VolumeRBDServer] = Config{list.PromoterE2eRegistry, "volume/rbd", "1.0.6"}
|
||||
configs[WindowsServer] = Config{list.MicrosoftRegistry, "windows", "1809"}
|
||||
|
||||
// This adds more config entries. Those have no pre-defined ImageID number,
|
||||
|
12
vendor/k8s.io/kubernetes/test/utils/paths.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/utils/paths.go
generated
vendored
@ -27,6 +27,11 @@ import (
|
||||
|
||||
// GetK8sRootDir returns the root directory for kubernetes, if present in the gopath.
|
||||
func GetK8sRootDir() (string, error) {
|
||||
dir := os.Getenv("KUBE_ROOT")
|
||||
if len(dir) > 0 {
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
dir, err := RootDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -59,12 +64,17 @@ func RootDir() (string, error) {
|
||||
}
|
||||
|
||||
// GetK8sBuildOutputDir returns the build output directory for k8s
|
||||
func GetK8sBuildOutputDir() (string, error) {
|
||||
// For dockerized build, targetArch (eg: 'linux/arm64', 'linux/amd64') must be explicitly specified
|
||||
// For non dockerized build, targetArch is ignored
|
||||
func GetK8sBuildOutputDir(isDockerizedBuild bool, targetArch string) (string, error) {
|
||||
k8sRoot, err := GetK8sRootDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buildOutputDir := filepath.Join(k8sRoot, "_output/local/go/bin")
|
||||
if isDockerizedBuild {
|
||||
buildOutputDir = filepath.Join(k8sRoot, "_output/dockerized/bin/", targetArch)
|
||||
}
|
||||
if _, err := os.Stat(buildOutputDir); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
49
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
49
vendor/k8s.io/kubernetes/test/utils/runners.go
generated
vendored
@ -183,8 +183,11 @@ type RCConfig struct {
|
||||
|
||||
ServiceAccountTokenProjections int
|
||||
|
||||
//Additional containers to run in the pod
|
||||
// Additional containers to run in the pod
|
||||
AdditionalContainers []v1.Container
|
||||
|
||||
// Security context for created pods
|
||||
SecurityContext *v1.SecurityContext
|
||||
}
|
||||
|
||||
func (rc *RCConfig) RCConfigLog(fmt string, args ...interface{}) {
|
||||
@ -335,11 +338,12 @@ func (config *DeploymentConfig) create() error {
|
||||
TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil),
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Lifecycle: config.Lifecycle,
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Lifecycle: config.Lifecycle,
|
||||
SecurityContext: config.SecurityContext,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -421,11 +425,12 @@ func (config *ReplicaSetConfig) create() error {
|
||||
TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil),
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Lifecycle: config.Lifecycle,
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Lifecycle: config.Lifecycle,
|
||||
SecurityContext: config.SecurityContext,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -499,10 +504,11 @@ func (config *JobConfig) create() error {
|
||||
TerminationGracePeriodSeconds: config.getTerminationGracePeriodSeconds(nil),
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Lifecycle: config.Lifecycle,
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Lifecycle: config.Lifecycle,
|
||||
SecurityContext: config.SecurityContext,
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
@ -612,12 +618,13 @@ func (config *RCConfig) create() error {
|
||||
Affinity: config.Affinity,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
ReadinessProbe: config.ReadinessProbe,
|
||||
Lifecycle: config.Lifecycle,
|
||||
Name: config.Name,
|
||||
Image: config.Image,
|
||||
Command: config.Command,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
ReadinessProbe: config.ReadinessProbe,
|
||||
Lifecycle: config.Lifecycle,
|
||||
SecurityContext: config.SecurityContext,
|
||||
},
|
||||
},
|
||||
DNSPolicy: *config.DNSPolicy,
|
||||
|
Reference in New Issue
Block a user