mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
1
vendor/k8s.io/kubernetes/test/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/BUILD
generated
vendored
@ -13,6 +13,7 @@ filegroup(
|
||||
":package-srcs",
|
||||
"//test/conformance:all-srcs",
|
||||
"//test/e2e:all-srcs",
|
||||
"//test/e2e_kubeadm:all-srcs",
|
||||
"//test/e2e_node:all-srcs",
|
||||
"//test/fixtures:all-srcs",
|
||||
"//test/images:all-srcs",
|
||||
|
7
vendor/k8s.io/kubernetes/test/OWNERS
generated
vendored
7
vendor/k8s.io/kubernetes/test/OWNERS
generated
vendored
@ -1,5 +1,7 @@
|
||||
reviewers:
|
||||
- bowei
|
||||
- rramkumar1
|
||||
- MrHohn
|
||||
- deads2k
|
||||
- enisoc
|
||||
- enj # for test/integration/etcd/etcd_storage_path_test.go
|
||||
@ -26,6 +28,9 @@ reviewers:
|
||||
- vishh
|
||||
approvers:
|
||||
- bowei # for test/e2e/{dns*,network}.go
|
||||
- cblecker
|
||||
- rramkumar1
|
||||
- MrHohn
|
||||
- deads2k
|
||||
- enisoc
|
||||
- enj # for test/integration/etcd/etcd_storage_path_test.go
|
||||
@ -50,4 +55,4 @@ approvers:
|
||||
- sttts
|
||||
- timothysc
|
||||
- zmerlynn
|
||||
- vishh
|
||||
- vishh
|
||||
|
2
vendor/k8s.io/kubernetes/test/conformance/conformance_test.sh
generated
vendored
2
vendor/k8s.io/kubernetes/test/conformance/conformance_test.sh
generated
vendored
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
216
vendor/k8s.io/kubernetes/test/conformance/testdata/conformance.txt
generated
vendored
216
vendor/k8s.io/kubernetes/test/conformance/testdata/conformance.txt
generated
vendored
@ -1,4 +1,4 @@
|
||||
test/e2e/apimachinery/custom_resource_definition.go: "creating/deleting custom resource definition objects works "
|
||||
test/e2e/apimachinery/custom_resource_definition.go: "creating/deleting custom resource definition objects works"
|
||||
test/e2e/apimachinery/garbage_collector.go: "should delete pods created by rc when not orphaning"
|
||||
test/e2e/apimachinery/garbage_collector.go: "should orphan pods created by rc if delete options say so"
|
||||
test/e2e/apimachinery/garbage_collector.go: "should delete RS created by deployment when not orphaning"
|
||||
@ -6,54 +6,62 @@ test/e2e/apimachinery/garbage_collector.go: "should orphan RS created by deploym
|
||||
test/e2e/apimachinery/garbage_collector.go: "should keep the rc around until all its pods are deleted if the deleteOptions says so"
|
||||
test/e2e/apimachinery/garbage_collector.go: "should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted"
|
||||
test/e2e/apimachinery/garbage_collector.go: "should not be blocked by dependency circle"
|
||||
test/e2e/apps/rc.go: "should serve a basic image on each replica with a public image "
|
||||
test/e2e/apps/replica_set.go: "should serve a basic image on each replica with a public image "
|
||||
test/e2e/apimachinery/watch.go: "should observe add, update, and delete watch notifications on configmaps"
|
||||
test/e2e/apimachinery/watch.go: "should be able to start watching from a specific resource version"
|
||||
test/e2e/apimachinery/watch.go: "should be able to restart watching from the last resource version observed by the previous watch"
|
||||
test/e2e/apimachinery/watch.go: "should observe an object deletion if it stops meeting the requirements of the selector"
|
||||
test/e2e/apps/daemon_set.go: "should run and stop simple daemon"
|
||||
test/e2e/apps/daemon_set.go: "should run and stop complex daemon"
|
||||
test/e2e/apps/daemon_set.go: "should retry creating failed daemon pods"
|
||||
test/e2e/apps/daemon_set.go: "should update pod when spec was updated and update strategy is RollingUpdate"
|
||||
test/e2e/apps/daemon_set.go: "should rollback without unnecessary restarts"
|
||||
test/e2e/apps/rc.go: "should serve a basic image on each replica with a public image"
|
||||
test/e2e/apps/replica_set.go: "should serve a basic image on each replica with a public image"
|
||||
test/e2e/apps/statefulset.go: "should perform rolling updates and roll backs of template modifications"
|
||||
test/e2e/apps/statefulset.go: "should perform canary updates and phased rolling updates of template modifications"
|
||||
test/e2e/apps/statefulset.go: "Scaling should happen in predictable order and halt if any stateful pod is unhealthy"
|
||||
test/e2e/apps/statefulset.go: "Burst scaling should run to completion even with unhealthy pods"
|
||||
test/e2e/apps/statefulset.go: "Should recreate evicted statefulset"
|
||||
test/e2e/auth/service_accounts.go: "should mount an API token into pods "
|
||||
test/e2e/auth/service_accounts.go: "should allow opting out of API token automount "
|
||||
test/e2e/common/configmap.go: "should be consumable via environment variable "
|
||||
test/e2e/common/configmap.go: "should be consumable via the environment "
|
||||
test/e2e/common/configmap_volume.go: "should be consumable from pods in volume "
|
||||
test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with defaultMode set "
|
||||
test/e2e/common/configmap_volume.go: "should be consumable from pods in volume as non-root "
|
||||
test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with mappings "
|
||||
test/e2e/auth/service_accounts.go: "should mount an API token into pods"
|
||||
test/e2e/auth/service_accounts.go: "should allow opting out of API token automount"
|
||||
test/e2e/common/configmap.go: "should be consumable via environment variable"
|
||||
test/e2e/common/configmap.go: "should be consumable via the environment"
|
||||
test/e2e/common/configmap_volume.go: "should be consumable from pods in volume"
|
||||
test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with defaultMode set"
|
||||
test/e2e/common/configmap_volume.go: "should be consumable from pods in volume as non-root"
|
||||
test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with mappings"
|
||||
test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with mappings and Item mode set"
|
||||
test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with mappings as non-root "
|
||||
test/e2e/common/configmap_volume.go: "updates should be reflected in volume "
|
||||
test/e2e/common/configmap_volume.go: "optional updates should be reflected in volume "
|
||||
test/e2e/common/configmap_volume.go: "should be consumable in multiple volumes in the same pod "
|
||||
test/e2e/common/container_probe.go: "with readiness probe should not be ready before initial delay and never restart "
|
||||
test/e2e/common/container_probe.go: "with readiness probe that fails should never be ready and never restart "
|
||||
test/e2e/common/container_probe.go: "should be restarted with a exec \"cat /tmp/health\" liveness probe"
|
||||
test/e2e/common/container_probe.go: "should *not* be restarted with a exec \"cat /tmp/health\" liveness probe"
|
||||
test/e2e/common/container_probe.go: "should be restarted with a /healthz http liveness probe "
|
||||
test/e2e/common/container_probe.go: "should have monotonically increasing restart count [Slow]"
|
||||
test/e2e/common/container_probe.go: "should *not* be restarted with a /healthz http liveness probe "
|
||||
test/e2e/common/container_probe.go: "should be restarted with a docker exec liveness probe with timeout "
|
||||
test/e2e/common/docker_containers.go: "should use the image defaults if command and args are blank "
|
||||
test/e2e/common/docker_containers.go: "should be able to override the image's default arguments (docker cmd) "
|
||||
test/e2e/common/docker_containers.go: "should be able to override the image's default command (docker entrypoint) "
|
||||
test/e2e/common/docker_containers.go: "should be able to override the image's default command and arguments "
|
||||
test/e2e/common/downward_api.go: "should provide pod name, namespace and IP address as env vars "
|
||||
test/e2e/common/downward_api.go: "should provide host IP as an env var "
|
||||
test/e2e/common/downward_api.go: "should provide container's limits.cpu/memory and requests.cpu/memory as env vars "
|
||||
test/e2e/common/downward_api.go: "should provide default limits.cpu/memory from node allocatable "
|
||||
test/e2e/common/downward_api.go: "should provide pod UID as env vars "
|
||||
test/e2e/common/downwardapi_volume.go: "should provide podname only "
|
||||
test/e2e/common/downwardapi_volume.go: "should set DefaultMode on files "
|
||||
test/e2e/common/downwardapi_volume.go: "should set mode on item file "
|
||||
test/e2e/common/downwardapi_volume.go: "should update labels on modification "
|
||||
test/e2e/common/downwardapi_volume.go: "should update annotations on modification "
|
||||
test/e2e/common/downwardapi_volume.go: "should provide container's cpu limit "
|
||||
test/e2e/common/downwardapi_volume.go: "should provide container's memory limit "
|
||||
test/e2e/common/downwardapi_volume.go: "should provide container's cpu request "
|
||||
test/e2e/common/downwardapi_volume.go: "should provide container's memory request "
|
||||
test/e2e/common/downwardapi_volume.go: "should provide node allocatable (cpu) as default cpu limit if the limit is not set "
|
||||
test/e2e/common/downwardapi_volume.go: "should provide node allocatable (memory) as default memory limit if the limit is not set "
|
||||
test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with mappings as non-root"
|
||||
test/e2e/common/configmap_volume.go: "updates should be reflected in volume"
|
||||
test/e2e/common/configmap_volume.go: "optional updates should be reflected in volume"
|
||||
test/e2e/common/configmap_volume.go: "should be consumable in multiple volumes in the same pod"
|
||||
test/e2e/common/container_probe.go: "with readiness probe should not be ready before initial delay and never restart"
|
||||
test/e2e/common/container_probe.go: "with readiness probe that fails should never be ready and never restart"
|
||||
test/e2e/common/container_probe.go: "should be restarted with a exec \\\"cat /tmp/health\\\" liveness probe"
|
||||
test/e2e/common/container_probe.go: "should *not* be restarted with a exec \\\"cat /tmp/health\\\" liveness probe"
|
||||
test/e2e/common/container_probe.go: "should be restarted with a /healthz http liveness probe"
|
||||
test/e2e/common/container_probe.go: "should have monotonically increasing restart count"
|
||||
test/e2e/common/container_probe.go: "should *not* be restarted with a /healthz http liveness probe"
|
||||
test/e2e/common/docker_containers.go: "should use the image defaults if command and args are blank"
|
||||
test/e2e/common/docker_containers.go: "should be able to override the image's default arguments (docker cmd)"
|
||||
test/e2e/common/docker_containers.go: "should be able to override the image's default command (docker entrypoint)"
|
||||
test/e2e/common/docker_containers.go: "should be able to override the image's default command and arguments"
|
||||
test/e2e/common/downward_api.go: "should provide pod name, namespace and IP address as env vars"
|
||||
test/e2e/common/downward_api.go: "should provide host IP as an env var"
|
||||
test/e2e/common/downward_api.go: "should provide container's limits.cpu/memory and requests.cpu/memory as env vars"
|
||||
test/e2e/common/downward_api.go: "should provide default limits.cpu/memory from node allocatable"
|
||||
test/e2e/common/downward_api.go: "should provide pod UID as env vars"
|
||||
test/e2e/common/downwardapi_volume.go: "should provide podname only"
|
||||
test/e2e/common/downwardapi_volume.go: "should set DefaultMode on files"
|
||||
test/e2e/common/downwardapi_volume.go: "should set mode on item file"
|
||||
test/e2e/common/downwardapi_volume.go: "should update labels on modification"
|
||||
test/e2e/common/downwardapi_volume.go: "should update annotations on modification"
|
||||
test/e2e/common/downwardapi_volume.go: "should provide container's cpu limit"
|
||||
test/e2e/common/downwardapi_volume.go: "should provide container's memory limit"
|
||||
test/e2e/common/downwardapi_volume.go: "should provide container's cpu request"
|
||||
test/e2e/common/downwardapi_volume.go: "should provide container's memory request"
|
||||
test/e2e/common/downwardapi_volume.go: "should provide node allocatable (cpu) as default cpu limit if the limit is not set"
|
||||
test/e2e/common/downwardapi_volume.go: "should provide node allocatable (memory) as default memory limit if the limit is not set"
|
||||
test/e2e/common/empty_dir.go: "volume on tmpfs should have the correct mode"
|
||||
test/e2e/common/empty_dir.go: "should support (root,0644,tmpfs)"
|
||||
test/e2e/common/empty_dir.go: "should support (root,0666,tmpfs)"
|
||||
@ -68,20 +76,20 @@ test/e2e/common/empty_dir.go: "should support (root,0777,default)"
|
||||
test/e2e/common/empty_dir.go: "should support (non-root,0644,default)"
|
||||
test/e2e/common/empty_dir.go: "should support (non-root,0666,default)"
|
||||
test/e2e/common/empty_dir.go: "should support (non-root,0777,default)"
|
||||
test/e2e/common/expansion.go: "should allow composing env vars into new env vars "
|
||||
test/e2e/common/expansion.go: "should allow substituting values in a container's command "
|
||||
test/e2e/common/expansion.go: "should allow substituting values in a container's args "
|
||||
test/e2e/common/expansion.go: "should allow composing env vars into new env vars"
|
||||
test/e2e/common/expansion.go: "should allow substituting values in a container's command"
|
||||
test/e2e/common/expansion.go: "should allow substituting values in a container's args"
|
||||
test/e2e/common/host_path.go: "should give a volume the correct mode"
|
||||
test/e2e/common/kubelet_etc_hosts.go: "should test kubelet managed /etc/hosts file "
|
||||
test/e2e/common/networking.go: "should function for intra-pod communication: http "
|
||||
test/e2e/common/networking.go: "should function for intra-pod communication: udp "
|
||||
test/e2e/common/networking.go: "should function for node-pod communication: http "
|
||||
test/e2e/common/networking.go: "should function for node-pod communication: udp "
|
||||
test/e2e/common/pods.go: "should get a host IP "
|
||||
test/e2e/common/pods.go: "should be submitted and removed "
|
||||
test/e2e/common/pods.go: "should be updated "
|
||||
test/e2e/common/pods.go: "should allow activeDeadlineSeconds to be updated "
|
||||
test/e2e/common/pods.go: "should contain environment variables for services "
|
||||
test/e2e/common/kubelet_etc_hosts.go: "should test kubelet managed /etc/hosts file"
|
||||
test/e2e/common/networking.go: "should function for intra-pod communication: http"
|
||||
test/e2e/common/networking.go: "should function for intra-pod communication: udp"
|
||||
test/e2e/common/networking.go: "should function for node-pod communication: http"
|
||||
test/e2e/common/networking.go: "should function for node-pod communication: udp"
|
||||
test/e2e/common/pods.go: "should get a host IP"
|
||||
test/e2e/common/pods.go: "should be submitted and removed"
|
||||
test/e2e/common/pods.go: "should be updated"
|
||||
test/e2e/common/pods.go: "should allow activeDeadlineSeconds to be updated"
|
||||
test/e2e/common/pods.go: "should contain environment variables for services"
|
||||
test/e2e/common/projected.go: "should be consumable from pods in volume"
|
||||
test/e2e/common/projected.go: "should be consumable from pods in volume with defaultMode set"
|
||||
test/e2e/common/projected.go: "should be consumable from pods in volume as non-root with defaultMode and fsGroup set"
|
||||
@ -109,54 +117,54 @@ test/e2e/common/projected.go: "should provide container's cpu request"
|
||||
test/e2e/common/projected.go: "should provide container's memory request"
|
||||
test/e2e/common/projected.go: "should provide node allocatable (cpu) as default cpu limit if the limit is not set"
|
||||
test/e2e/common/projected.go: "should provide node allocatable (memory) as default memory limit if the limit is not set"
|
||||
test/e2e/common/projected.go: "should project all components that make up the projection API [Projection]"
|
||||
test/e2e/common/secrets.go: "should be consumable from pods in env vars "
|
||||
test/e2e/common/secrets.go: "should be consumable via the environment "
|
||||
test/e2e/common/secrets_volume.go: "should be consumable from pods in volume "
|
||||
test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with defaultMode set "
|
||||
test/e2e/common/secrets_volume.go: "should be consumable from pods in volume as non-root with defaultMode and fsGroup set "
|
||||
test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with mappings "
|
||||
test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with mappings and Item Mode set "
|
||||
test/e2e/common/secrets_volume.go: "should be consumable in multiple volumes in a pod "
|
||||
test/e2e/common/secrets_volume.go: "optional updates should be reflected in volume "
|
||||
test/e2e/kubectl/kubectl.go: "should create and stop a replication controller "
|
||||
test/e2e/kubectl/kubectl.go: "should scale a replication controller "
|
||||
test/e2e/kubectl/kubectl.go: "should do a rolling update of a replication controller "
|
||||
test/e2e/kubectl/kubectl.go: "should create and stop a working application "
|
||||
test/e2e/kubectl/kubectl.go: "should check if v1 is in available api versions "
|
||||
test/e2e/kubectl/kubectl.go: "should check if Kubernetes master services is included in cluster-info "
|
||||
test/e2e/kubectl/kubectl.go: "should check if kubectl describe prints relevant information for rc and pods "
|
||||
test/e2e/kubectl/kubectl.go: "should create services for rc "
|
||||
test/e2e/kubectl/kubectl.go: "should update the label on a resource "
|
||||
test/e2e/kubectl/kubectl.go: "should be able to retrieve and filter logs "
|
||||
test/e2e/kubectl/kubectl.go: "should add annotations for pods in rc "
|
||||
test/e2e/kubectl/kubectl.go: "should check is all data is printed "
|
||||
test/e2e/kubectl/kubectl.go: "should create an rc or deployment from an image "
|
||||
test/e2e/kubectl/kubectl.go: "should create an rc from an image "
|
||||
test/e2e/kubectl/kubectl.go: "should support rolling-update to same image "
|
||||
test/e2e/kubectl/kubectl.go: "should create a deployment from an image "
|
||||
test/e2e/kubectl/kubectl.go: "should create a job from an image when restart is OnFailure "
|
||||
test/e2e/kubectl/kubectl.go: "should create a pod from an image when restart is Never "
|
||||
test/e2e/kubectl/kubectl.go: "should update a single-container pod's image "
|
||||
test/e2e/kubectl/kubectl.go: "should create a job from an image, then delete the job "
|
||||
test/e2e/kubectl/kubectl.go: "should support proxy with --port 0 "
|
||||
test/e2e/kubectl/kubectl.go: "should support --unix-socket=/path "
|
||||
test/e2e/network/dns.go: "should provide DNS for the cluster "
|
||||
test/e2e/network/dns.go: "should provide DNS for services "
|
||||
test/e2e/network/proxy.go: "should proxy logs on node with explicit kubelet port using proxy subresource "
|
||||
test/e2e/network/proxy.go: "should proxy logs on node using proxy subresource "
|
||||
test/e2e/network/proxy.go: "should proxy through a service and a pod "
|
||||
test/e2e/network/service.go: "should provide secure master service "
|
||||
test/e2e/network/service.go: "should serve a basic endpoint from pods "
|
||||
test/e2e/network/service.go: "should serve multiport endpoints from pods "
|
||||
test/e2e/network/service_latency.go: "should not be very high "
|
||||
test/e2e/node/events.go: "should be sent by kubelets and the scheduler about pods scheduling and running "
|
||||
test/e2e/node/pods.go: "should be submitted and removed [Flaky]"
|
||||
test/e2e/node/pods.go: "should be submitted and removed "
|
||||
test/e2e/node/pre_stop.go: "should call prestop when killing a pod "
|
||||
test/e2e/scheduling/predicates.go: "validates resource limits of pods that are allowed to run "
|
||||
test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if not matching "
|
||||
test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if matching "
|
||||
test/e2e/common/projected.go: "should project all components that make up the projection API"
|
||||
test/e2e/common/secrets.go: "should be consumable from pods in env vars"
|
||||
test/e2e/common/secrets.go: "should be consumable via the environment"
|
||||
test/e2e/common/secrets_volume.go: "should be consumable from pods in volume"
|
||||
test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with defaultMode set"
|
||||
test/e2e/common/secrets_volume.go: "should be consumable from pods in volume as non-root with defaultMode and fsGroup set"
|
||||
test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with mappings"
|
||||
test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with mappings and Item Mode set"
|
||||
test/e2e/common/secrets_volume.go: "should be consumable in multiple volumes in a pod"
|
||||
test/e2e/common/secrets_volume.go: "optional updates should be reflected in volume"
|
||||
test/e2e/kubectl/kubectl.go: "should create and stop a replication controller"
|
||||
test/e2e/kubectl/kubectl.go: "should scale a replication controller"
|
||||
test/e2e/kubectl/kubectl.go: "should do a rolling update of a replication controller"
|
||||
test/e2e/kubectl/kubectl.go: "should create and stop a working application"
|
||||
test/e2e/kubectl/kubectl.go: "should check if v1 is in available api versions"
|
||||
test/e2e/kubectl/kubectl.go: "should check if Kubernetes master services is included in cluster-info"
|
||||
test/e2e/kubectl/kubectl.go: "should check if kubectl describe prints relevant information for rc and pods"
|
||||
test/e2e/kubectl/kubectl.go: "should create services for rc"
|
||||
test/e2e/kubectl/kubectl.go: "should update the label on a resource"
|
||||
test/e2e/kubectl/kubectl.go: "should be able to retrieve and filter logs"
|
||||
test/e2e/kubectl/kubectl.go: "should add annotations for pods in rc"
|
||||
test/e2e/kubectl/kubectl.go: "should check is all data is printed"
|
||||
test/e2e/kubectl/kubectl.go: "should create an rc or deployment from an image"
|
||||
test/e2e/kubectl/kubectl.go: "should create an rc from an image"
|
||||
test/e2e/kubectl/kubectl.go: "should support rolling-update to same image"
|
||||
test/e2e/kubectl/kubectl.go: "should create a deployment from an image"
|
||||
test/e2e/kubectl/kubectl.go: "should create a job from an image when restart is OnFailure"
|
||||
test/e2e/kubectl/kubectl.go: "should create a pod from an image when restart is Never"
|
||||
test/e2e/kubectl/kubectl.go: "should update a single-container pod's image"
|
||||
test/e2e/kubectl/kubectl.go: "should create a job from an image, then delete the job"
|
||||
test/e2e/kubectl/kubectl.go: "should support proxy with --port 0"
|
||||
test/e2e/kubectl/kubectl.go: "should support --unix-socket=/path"
|
||||
test/e2e/network/dns.go: "should provide DNS for the cluster"
|
||||
test/e2e/network/dns.go: "should provide DNS for services"
|
||||
test/e2e/network/proxy.go: "should proxy logs on node with explicit kubelet port using proxy subresource"
|
||||
test/e2e/network/proxy.go: "should proxy logs on node using proxy subresource"
|
||||
test/e2e/network/proxy.go: "should proxy through a service and a pod"
|
||||
test/e2e/network/service.go: "should provide secure master service"
|
||||
test/e2e/network/service.go: "should serve a basic endpoint from pods"
|
||||
test/e2e/network/service.go: "should serve multiport endpoints from pods"
|
||||
test/e2e/network/service_latency.go: "should not be very high"
|
||||
test/e2e/node/events.go: "should be sent by kubelets and the scheduler about pods scheduling and running"
|
||||
test/e2e/node/pods.go: "should be submitted and removed"
|
||||
test/e2e/node/pods.go: "should be submitted and removed"
|
||||
test/e2e/node/pre_stop.go: "should call prestop when killing a pod"
|
||||
test/e2e/scheduling/predicates.go: "validates resource limits of pods that are allowed to run"
|
||||
test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if not matching"
|
||||
test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if matching"
|
||||
test/e2e_node/kubelet_test.go: "it should print the output to logs"
|
||||
test/e2e_node/kubelet_test.go: "it should not write to root filesystem"
|
||||
test/e2e_node/lifecycle_hook_test.go: "should execute poststart exec hook properly"
|
||||
|
15
vendor/k8s.io/kubernetes/test/conformance/walk.go
generated
vendored
15
vendor/k8s.io/kubernetes/test/conformance/walk.go
generated
vendored
@ -176,10 +176,11 @@ func (v *visitor) emit(arg ast.Expr) {
|
||||
return
|
||||
}
|
||||
|
||||
at.Value = normalizeTestName(at.Value)
|
||||
if *confDoc {
|
||||
v.convertToConformanceData(at)
|
||||
} else {
|
||||
fmt.Printf("%s: %s\n", v.FileSet.Position(at.Pos()).Filename, at.Value)
|
||||
fmt.Printf("%s: %q\n", v.FileSet.Position(at.Pos()).Filename, at.Value)
|
||||
}
|
||||
default:
|
||||
v.failf(at, "framework.ConformanceIt() called with non-literal argument")
|
||||
@ -197,6 +198,18 @@ func (v *visitor) getDescription(value string) string {
|
||||
" " + strings.Trim(value, "\"")
|
||||
}
|
||||
|
||||
var (
|
||||
regexTag = regexp.MustCompile(`(\[[a-zA-Z0-9:-]+\])`)
|
||||
)
|
||||
|
||||
// normalizeTestName removes tags (e.g., [Feature:Foo]), double quotes and trim
|
||||
// the spaces to normalize the test name.
|
||||
func normalizeTestName(s string) string {
|
||||
r := regexTag.ReplaceAllString(s, "")
|
||||
r = strings.Trim(r, "\"")
|
||||
return strings.TrimSpace(r)
|
||||
}
|
||||
|
||||
// funcName converts a selectorExpr with two idents into a string,
|
||||
// x.y -> "x.y"
|
||||
func funcName(n ast.Expr) string {
|
||||
|
22
vendor/k8s.io/kubernetes/test/conformance/walk_test.go
generated
vendored
22
vendor/k8s.io/kubernetes/test/conformance/walk_test.go
generated
vendored
@ -93,3 +93,25 @@ func TestConformance(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeTestNames(t *testing.T) {
|
||||
testCases := []struct {
|
||||
rawName string
|
||||
normalizedName string
|
||||
}{
|
||||
{
|
||||
"should have monotonically increasing restart count [Slow]",
|
||||
"should have monotonically increasing restart count",
|
||||
},
|
||||
{
|
||||
" should check is all data is printed ",
|
||||
"should check is all data is printed",
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
actualName := normalizeTestName(tc.rawName)
|
||||
if actualName != tc.normalizedName {
|
||||
t.Errorf("test case[%d]: expected normalized name %q, got %q", i, tc.normalizedName, actualName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/BUILD
generated
vendored
@ -42,7 +42,6 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e",
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
@ -70,6 +69,7 @@ go_library(
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/plugin/pkg/client/auth:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/apimachinery/BUILD
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/apimachinery/BUILD
generated
vendored
@ -11,6 +11,7 @@ go_library(
|
||||
"aggregator.go",
|
||||
"certs.go",
|
||||
"chunking.go",
|
||||
"crd_watch.go",
|
||||
"custom_resource_definition.go",
|
||||
"etcd_failure.go",
|
||||
"framework.go",
|
||||
@ -19,12 +20,12 @@ go_library(
|
||||
"initializers.go",
|
||||
"namespace.go",
|
||||
"table_conversion.go",
|
||||
"watch.go",
|
||||
"webhook.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/apimachinery",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/rbac:go_default_library",
|
||||
"//pkg/printers:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
@ -38,6 +39,7 @@ go_library(
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/authorization/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
|
||||
@ -47,7 +49,9 @@ go_library(
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
|
||||
@ -55,6 +59,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
|
84
vendor/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go
generated
vendored
84
vendor/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go
generated
vendored
@ -24,8 +24,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -87,7 +87,7 @@ func cleanTest(client clientset.Interface, aggrclient *aggregatorclient.Clientse
|
||||
// delete the APIService first to avoid causing discovery errors
|
||||
_ = aggrclient.ApiregistrationV1beta1().APIServices().Delete("v1alpha1.wardle.k8s.io", nil)
|
||||
|
||||
_ = client.ExtensionsV1beta1().Deployments(namespace).Delete("sample-apiserver", nil)
|
||||
_ = client.AppsV1().Deployments(namespace).Delete("sample-apiserver", nil)
|
||||
_ = client.CoreV1().Secrets(namespace).Delete("sample-apiserver-secret", nil)
|
||||
_ = client.CoreV1().Services(namespace).Delete("sample-api", nil)
|
||||
_ = client.CoreV1().ServiceAccounts(namespace).Delete("sample-apiserver", nil)
|
||||
@ -133,7 +133,7 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
|
||||
// kubectl create -f deploy.yaml
|
||||
deploymentName := "sample-apiserver-deployment"
|
||||
etcdImage := "quay.io/coreos/etcd:v3.2.14"
|
||||
etcdImage := "quay.io/coreos/etcd:v3.2.18"
|
||||
podLabels := map[string]string{"app": "sample-apiserver", "apiserver": "true"}
|
||||
replicas := int32(1)
|
||||
zero := int64(0)
|
||||
@ -171,14 +171,18 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
Image: etcdImage,
|
||||
},
|
||||
}
|
||||
d := &extensions.Deployment{
|
||||
d := &apps.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Name: deploymentName,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
Type: extensions.RollingUpdateDeploymentStrategyType,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
Type: apps.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -192,7 +196,7 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
},
|
||||
},
|
||||
}
|
||||
deployment, err := client.ExtensionsV1beta1().Deployments(namespace).Create(d)
|
||||
deployment, err := client.AppsV1().Deployments(namespace).Create(d)
|
||||
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
|
||||
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
|
||||
@ -315,7 +319,16 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
})
|
||||
framework.ExpectNoError(err, "creating apiservice %s with namespace %s", "v1alpha1.wardle.k8s.io", namespace)
|
||||
|
||||
err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
var (
|
||||
currentAPIService *apiregistrationv1beta1.APIService
|
||||
currentPods *v1.PodList
|
||||
)
|
||||
|
||||
err = pollTimed(100*time.Millisecond, 60*time.Second, func() (bool, error) {
|
||||
|
||||
currentAPIService, _ = aggrclient.ApiregistrationV1beta1().APIServices().Get("v1alpha1.wardle.k8s.io", metav1.GetOptions{})
|
||||
currentPods, _ = client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
|
||||
|
||||
request := restClient.Get().AbsPath("/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders")
|
||||
request.SetHeader("Accept", "application/json")
|
||||
_, err := request.DoRaw()
|
||||
@ -333,7 +346,23 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}, "Waited %s for the sample-apiserver to be ready to handle requests.")
|
||||
if err != nil {
|
||||
currentAPIServiceJSON, _ := json.Marshal(currentAPIService)
|
||||
framework.Logf("current APIService: %s", string(currentAPIServiceJSON))
|
||||
|
||||
currentPodsJSON, _ := json.Marshal(currentPods)
|
||||
framework.Logf("current pods: %s", string(currentPodsJSON))
|
||||
|
||||
if currentPods != nil {
|
||||
for _, pod := range currentPods.Items {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
logs, err := framework.GetPodLogs(client, namespace, pod.Name, container.Name)
|
||||
framework.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
framework.ExpectNoError(err, "gave up waiting for apiservice wardle to come up successfully")
|
||||
|
||||
flunderName := generateFlunderName("rest-flunder")
|
||||
@ -382,19 +411,15 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
flunderName = generateFlunderName("dynamic-flunder")
|
||||
|
||||
// Rerun the Create/List/Delete tests using the Dynamic client.
|
||||
resources, err := client.Discovery().ServerPreferredNamespacedResources()
|
||||
framework.ExpectNoError(err, "getting server preferred namespaces resources for dynamic client")
|
||||
resources, discoveryErr := client.Discovery().ServerPreferredNamespacedResources()
|
||||
groupVersionResources, err := discovery.GroupVersionResources(resources)
|
||||
framework.ExpectNoError(err, "getting group version resources for dynamic client")
|
||||
gvr := schema.GroupVersionResource{Group: "wardle.k8s.io", Version: "v1alpha1", Resource: "flunders"}
|
||||
_, ok := groupVersionResources[gvr]
|
||||
if !ok {
|
||||
framework.Failf("could not find group version resource for dynamic client and wardle/flunders.")
|
||||
framework.Failf("could not find group version resource for dynamic client and wardle/flunders (discovery error: %v, discovery results: %#v)", discoveryErr, groupVersionResources)
|
||||
}
|
||||
clientPool := f.ClientPool
|
||||
dynamicClient, err := clientPool.ClientForGroupVersionResource(gvr)
|
||||
framework.ExpectNoError(err, "getting group version resources for dynamic client")
|
||||
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
|
||||
dynamicClient := f.DynamicClient.Resource(gvr).Namespace(namespace)
|
||||
|
||||
// kubectl create -f flunders-1.yaml
|
||||
// Request Body: {"apiVersion":"wardle.k8s.io/v1alpha1","kind":"Flunder","metadata":{"labels":{"sample-label":"true"},"name":"test-flunder","namespace":"default"}}
|
||||
@ -411,27 +436,23 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
unstruct := &unstructuredv1.Unstructured{}
|
||||
err = unstruct.UnmarshalJSON(jsonFlunder)
|
||||
framework.ExpectNoError(err, "unmarshalling test-flunder as unstructured for create using dynamic client")
|
||||
unstruct, err = dynamicClient.Resource(&apiResource, namespace).Create(unstruct)
|
||||
unstruct, err = dynamicClient.Create(unstruct)
|
||||
framework.ExpectNoError(err, "listing flunders using dynamic client")
|
||||
|
||||
// kubectl get flunders
|
||||
obj, err := dynamicClient.Resource(&apiResource, namespace).List(metav1.ListOptions{})
|
||||
unstructuredList, err := dynamicClient.List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "listing flunders using dynamic client")
|
||||
unstructuredList, ok := obj.(*unstructuredv1.UnstructuredList)
|
||||
validateErrorWithDebugInfo(f, err, pods, "casting flunders list(%T) as unstructuredList using dynamic client", obj)
|
||||
if len(unstructuredList.Items) != 1 {
|
||||
framework.Failf("failed to get back the correct flunders list %v from the dynamic client", unstructuredList)
|
||||
}
|
||||
|
||||
// kubectl delete flunder test-flunder
|
||||
err = dynamicClient.Resource(&apiResource, namespace).Delete(flunderName, &metav1.DeleteOptions{})
|
||||
err = dynamicClient.Delete(flunderName, &metav1.DeleteOptions{})
|
||||
validateErrorWithDebugInfo(f, err, pods, "deleting flunders(%v) using dynamic client", unstructuredList.Items)
|
||||
|
||||
// kubectl get flunders
|
||||
obj, err = dynamicClient.Resource(&apiResource, namespace).List(metav1.ListOptions{})
|
||||
unstructuredList, err = dynamicClient.List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "listing flunders using dynamic client")
|
||||
unstructuredList, ok = obj.(*unstructuredv1.UnstructuredList)
|
||||
validateErrorWithDebugInfo(f, err, pods, "casting flunders list(%T) as unstructuredList using dynamic client", obj)
|
||||
if len(unstructuredList.Items) != 0 {
|
||||
framework.Failf("failed to get back the correct deleted flunders list %v from the dynamic client", unstructuredList)
|
||||
}
|
||||
@ -439,6 +460,17 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
cleanTest(client, aggrclient, namespace)
|
||||
}
|
||||
|
||||
// pollTimed will call Poll but time how long Poll actually took.
|
||||
// It will then framework.logf the msg with the duration of the Poll.
|
||||
// It is assumed that msg will contain one %s for the elapsed time.
|
||||
func pollTimed(interval, timeout time.Duration, condition wait.ConditionFunc, msg string) error {
|
||||
defer func(start time.Time, msg string) {
|
||||
elapsed := time.Since(start)
|
||||
framework.Logf(msg, elapsed)
|
||||
}(time.Now(), msg)
|
||||
return wait.Poll(interval, timeout, condition)
|
||||
}
|
||||
|
||||
func validateErrorWithDebugInfo(f *framework.Framework, err error, pods *v1.PodList, msg string, fields ...interface{}) {
|
||||
if err != nil {
|
||||
namespace := f.Namespace.Name
|
||||
|
166
vendor/k8s.io/kubernetes/test/e2e/apimachinery/crd_watch.go
generated
vendored
Normal file
166
vendor/k8s.io/kubernetes/test/e2e/apimachinery/crd_watch.go
generated
vendored
Normal file
@ -0,0 +1,166 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/testserver"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
|
||||
|
||||
f := framework.NewDefaultFramework("crd-watch")
|
||||
|
||||
Context("CustomResourceDefinition Watch", func() {
|
||||
/*
|
||||
Testname: crd-watch
|
||||
Description: Create a Custom Resource Definition and make sure
|
||||
watches observe events on create/delete.
|
||||
*/
|
||||
It("watch on custom resource definition objects", func() {
|
||||
|
||||
framework.SkipUnlessServerVersionGTE(crdVersion, f.ClientSet.Discovery())
|
||||
|
||||
const (
|
||||
watchCRNameA = "name1"
|
||||
watchCRNameB = "name2"
|
||||
)
|
||||
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
framework.Failf("failed to load config: %v", err)
|
||||
}
|
||||
|
||||
apiExtensionClient, err := clientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
framework.Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
}
|
||||
|
||||
noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
|
||||
noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, f.DynamicClient)
|
||||
if err != nil {
|
||||
framework.Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = testserver.DeleteCustomResourceDefinition(noxuDefinition, apiExtensionClient)
|
||||
if err != nil {
|
||||
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
ns := ""
|
||||
noxuResourceClient := newNamespacedCustomResourceClient(ns, f.DynamicClient, noxuDefinition)
|
||||
|
||||
watchA, err := watchCRWithName(noxuResourceClient, watchCRNameA)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
watchB, err := watchCRWithName(noxuResourceClient, watchCRNameB)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
testCrA := testserver.NewNoxuInstance(ns, watchCRNameA)
|
||||
testCrB := testserver.NewNoxuInstance(ns, watchCRNameB)
|
||||
|
||||
By("Creating first CR ")
|
||||
testCrA, err = instantiateCustomResource(testCrA, noxuResourceClient, noxuDefinition)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
expectEvent(watchA, watch.Added, testCrA)
|
||||
expectNoEvent(watchB, watch.Added, testCrA)
|
||||
|
||||
By("Creating second CR")
|
||||
testCrB, err = instantiateCustomResource(testCrB, noxuResourceClient, noxuDefinition)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
expectEvent(watchB, watch.Added, testCrB)
|
||||
expectNoEvent(watchA, watch.Added, testCrB)
|
||||
|
||||
By("Deleting first CR")
|
||||
err = deleteCustomResource(noxuResourceClient, watchCRNameA)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
expectEvent(watchA, watch.Deleted, nil)
|
||||
expectNoEvent(watchB, watch.Deleted, nil)
|
||||
|
||||
By("Deleting second CR")
|
||||
err = deleteCustomResource(noxuResourceClient, watchCRNameB)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
expectEvent(watchB, watch.Deleted, nil)
|
||||
expectNoEvent(watchA, watch.Deleted, nil)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func watchCRWithName(crdResourceClient dynamic.ResourceInterface, name string) (watch.Interface, error) {
|
||||
return crdResourceClient.Watch(
|
||||
metav1.ListOptions{
|
||||
FieldSelector: "metadata.name=" + name,
|
||||
TimeoutSeconds: int64ptr(600),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func instantiateCustomResource(instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1beta1.CustomResourceDefinition) (*unstructured.Unstructured, error) {
|
||||
createdInstance, err := client.Create(instanceToCreate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
createdObjectMeta, err := meta.Accessor(createdInstance)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// it should have a UUID
|
||||
if len(createdObjectMeta.GetUID()) == 0 {
|
||||
return nil, fmt.Errorf("missing uuid: %#v", createdInstance)
|
||||
}
|
||||
createdTypeMeta, err := meta.TypeAccessor(createdInstance)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if e, a := definition.Spec.Group+"/"+definition.Spec.Version, createdTypeMeta.GetAPIVersion(); e != a {
|
||||
return nil, fmt.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
if e, a := definition.Spec.Names.Kind, createdTypeMeta.GetKind(); e != a {
|
||||
return nil, fmt.Errorf("expected %v, got %v", e, a)
|
||||
}
|
||||
return createdInstance, nil
|
||||
}
|
||||
|
||||
func deleteCustomResource(client dynamic.ResourceInterface, name string) error {
|
||||
return client.Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func newNamespacedCustomResourceClient(ns string, client dynamic.Interface, crd *apiextensionsv1beta1.CustomResourceDefinition) dynamic.ResourceInterface {
|
||||
gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: crd.Spec.Version, Resource: crd.Spec.Names.Plural}
|
||||
|
||||
if crd.Spec.Scope != apiextensionsv1beta1.ClusterScoped {
|
||||
return client.Resource(gvr).Namespace(ns)
|
||||
} else {
|
||||
return client.Resource(gvr)
|
||||
}
|
||||
|
||||
}
|
2
vendor/k8s.io/kubernetes/test/e2e/apimachinery/custom_resource_definition.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/apimachinery/custom_resource_definition.go
generated
vendored
@ -55,7 +55,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources", func() {
|
||||
randomDefinition := testserver.NewRandomNameCustomResourceDefinition(v1beta1.ClusterScoped)
|
||||
|
||||
//create CRD and waits for the resource to be recognized and available.
|
||||
_, err = testserver.CreateNewCustomResourceDefinition(randomDefinition, apiExtensionClient, f.ClientPool)
|
||||
randomDefinition, err = testserver.CreateNewCustomResourceDefinition(randomDefinition, apiExtensionClient, f.DynamicClient)
|
||||
if err != nil {
|
||||
framework.Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/apimachinery/etcd_failure.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/apimachinery/etcd_failure.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/apps"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -47,7 +48,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
|
||||
Client: f.ClientSet,
|
||||
Name: "baz",
|
||||
Namespace: f.Namespace.Name,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: 1,
|
||||
})).NotTo(HaveOccurred())
|
||||
})
|
||||
|
228
vendor/k8s.io/kubernetes/test/e2e/apimachinery/garbage_collector.go
generated
vendored
228
vendor/k8s.io/kubernetes/test/e2e/apimachinery/garbage_collector.go
generated
vendored
@ -18,6 +18,7 @@ package apimachinery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
@ -32,10 +33,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
|
||||
@ -88,8 +89,8 @@ func getOrphanOptions() *metav1.DeleteOptions {
|
||||
}
|
||||
|
||||
var (
|
||||
zero = int64(0)
|
||||
|
||||
zero = int64(0)
|
||||
lablecount = int64(0)
|
||||
CronJobGroupVersionResource = schema.GroupVersionResource{Group: batchv1beta1.GroupName, Version: "v1beta1", Resource: "cronjobs"}
|
||||
)
|
||||
|
||||
@ -175,7 +176,7 @@ func verifyRemainingDeploymentsReplicaSetsPods(
|
||||
}
|
||||
if len(deployments.Items) != deploymentNum {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d Deploymentss, got %d Deployments", deploymentNum, len(deployments.Items)))
|
||||
By(fmt.Sprintf("expected %d Deployments, got %d Deployments", deploymentNum, len(deployments.Items)))
|
||||
}
|
||||
pods, err := clientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -323,6 +324,14 @@ func newCronJob(name, schedule string) *batchv1beta1.CronJob {
|
||||
}
|
||||
}
|
||||
|
||||
// getUniqLabel returns a UniqLabel based on labeLkey and labelvalue.
|
||||
func getUniqLabel(labelkey, labelvalue string) map[string]string {
|
||||
count := atomic.AddInt64(&lablecount, 1)
|
||||
uniqlabelkey := fmt.Sprintf("%s-%05d", labelkey, count)
|
||||
uniqlabelvalue := fmt.Sprintf("%s-%05d", labelvalue, count)
|
||||
return map[string]string{uniqlabelkey: uniqlabelvalue}
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("Garbage collector", func() {
|
||||
f := framework.NewDefaultFramework("gc")
|
||||
|
||||
@ -337,8 +346,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
rcName := "simpletest.rc"
|
||||
// TODO: find better way to keep this label unique in the test
|
||||
uniqLabels := map[string]string{"gctest": "delete_pods"}
|
||||
uniqLabels := getUniqLabel("gctest", "delete_pods")
|
||||
rc := newOwnerRC(f, rcName, 2, uniqLabels)
|
||||
By("create the rc")
|
||||
rc, err := rcClient.Create(rc)
|
||||
@ -396,8 +404,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
rcName := "simpletest.rc"
|
||||
// TODO: find better way to keep this label unique in the test
|
||||
uniqLabels := map[string]string{"gctest": "orphan_pods"}
|
||||
uniqLabels := getUniqLabel("gctest", "orphan_pods")
|
||||
rc := newOwnerRC(f, rcName, estimateMaximumPods(clientSet, 10, 100), uniqLabels)
|
||||
By("create the rc")
|
||||
rc, err := rcClient.Create(rc)
|
||||
@ -445,17 +452,13 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
framework.Failf("%v", err)
|
||||
}
|
||||
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
|
||||
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a {
|
||||
return false, fmt.Errorf("expect %d pods, got %d pods", e, a)
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil && err != wait.ErrWaitTimeout {
|
||||
framework.Failf("%v", err)
|
||||
time.Sleep(30 * time.Second)
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to list pods: %v", err)
|
||||
}
|
||||
if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a {
|
||||
framework.Failf("expect %d pods, got %d pods", e, a)
|
||||
}
|
||||
gatherMetrics(f)
|
||||
})
|
||||
@ -465,8 +468,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
rcName := "simpletest.rc"
|
||||
// TODO: find better way to keep this label unique in the test
|
||||
uniqLabels := map[string]string{"gctest": "orphan_pods_nil_option"}
|
||||
uniqLabels := getUniqLabel("gctest", "orphan_pods_nil_option")
|
||||
rc := newOwnerRC(f, rcName, 2, uniqLabels)
|
||||
By("create the rc")
|
||||
rc, err := rcClient.Create(rc)
|
||||
@ -494,17 +496,13 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
|
||||
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a {
|
||||
return false, fmt.Errorf("expect %d pods, got %d pods", e, a)
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil && err != wait.ErrWaitTimeout {
|
||||
framework.Failf("%v", err)
|
||||
time.Sleep(30 * time.Second)
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to list pods: %v", err)
|
||||
}
|
||||
if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a {
|
||||
framework.Failf("expect %d pods, got %d pods", e, a)
|
||||
}
|
||||
gatherMetrics(f)
|
||||
})
|
||||
@ -520,8 +518,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
deployClient := clientSet.ExtensionsV1beta1().Deployments(f.Namespace.Name)
|
||||
rsClient := clientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name)
|
||||
deploymentName := "simpletest.deployment"
|
||||
// TODO: find better way to keep this label unique in the test
|
||||
uniqLabels := map[string]string{"gctest": "delete_rs"}
|
||||
uniqLabels := getUniqLabel("gctest", "delete_rs")
|
||||
deployment := newOwnerDeployment(f, deploymentName, uniqLabels)
|
||||
By("create the deployment")
|
||||
createdDeployment, err := deployClient.Create(deployment)
|
||||
@ -552,14 +549,17 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
return verifyRemainingDeploymentsReplicaSetsPods(f, clientSet, deployment, 0, 0, 0)
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("Failed to wait for all rs to be garbage collected: %v", err)
|
||||
if err != nil {
|
||||
errList := make([]error, 0)
|
||||
errList = append(errList, err)
|
||||
remainingRSs, err := rsClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("failed to list RSs post mortem: %v", err)
|
||||
errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err))
|
||||
} else {
|
||||
framework.Failf("remaining rs are: %#v", remainingRSs)
|
||||
errList = append(errList, fmt.Errorf("remaining rs are: %#v", remainingRSs))
|
||||
}
|
||||
aggregatedError := utilerrors.NewAggregate(errList)
|
||||
framework.Failf("Failed to wait for all rs to be garbage collected: %v", aggregatedError)
|
||||
|
||||
}
|
||||
|
||||
@ -577,8 +577,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
deployClient := clientSet.ExtensionsV1beta1().Deployments(f.Namespace.Name)
|
||||
rsClient := clientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name)
|
||||
deploymentName := "simpletest.deployment"
|
||||
// TODO: find better way to keep this label unique in the test
|
||||
uniqLabels := map[string]string{"gctest": "orphan_rs"}
|
||||
uniqLabels := getUniqLabel("gctest", "orphan_rs")
|
||||
deployment := newOwnerDeployment(f, deploymentName, uniqLabels)
|
||||
By("create the deployment")
|
||||
createdDeployment, err := deployClient.Create(deployment)
|
||||
@ -605,24 +604,28 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
|
||||
framework.Failf("failed to delete the deployment: %v", err)
|
||||
}
|
||||
By("wait for 2 Minute to see if the garbage collector mistakenly deletes the rs")
|
||||
err = wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
|
||||
return verifyRemainingDeploymentsReplicaSetsPods(f, clientSet, deployment, 0, 1, 2)
|
||||
})
|
||||
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the rs")
|
||||
time.Sleep(30 * time.Second)
|
||||
ok, err := verifyRemainingDeploymentsReplicaSetsPods(f, clientSet, deployment, 0, 1, 2)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Failed to wait to see if the garbage collecter mistakenly deletes the rs: %v", err)
|
||||
framework.Failf("Unexpected error while verifying remaining deployments, rs, and pods: %v", err)
|
||||
}
|
||||
if !ok {
|
||||
errList := make([]error, 0)
|
||||
remainingRSs, err := rsClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("failed to list RSs post mortem: %v", err)
|
||||
errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err))
|
||||
} else {
|
||||
framework.Failf("remaining rs post mortem: %#v", remainingRSs)
|
||||
errList = append(errList, fmt.Errorf("remaining rs post mortem: %#v", remainingRSs))
|
||||
}
|
||||
remainingDSs, err := deployClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("failed to list Deployments post mortem: %v", err)
|
||||
errList = append(errList, fmt.Errorf("failed to list Deployments post mortem: %v", err))
|
||||
} else {
|
||||
framework.Failf("remaining deployment's post mortem: %#v", remainingDSs)
|
||||
errList = append(errList, fmt.Errorf("remaining deployment's post mortem: %#v", remainingDSs))
|
||||
}
|
||||
aggregatedError := utilerrors.NewAggregate(errList)
|
||||
framework.Failf("Failed to verify remaining deployments, rs, and pods: %v", aggregatedError)
|
||||
}
|
||||
rs, err := clientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -647,8 +650,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
|
||||
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
rcName := "simpletest.rc"
|
||||
// TODO: find better way to keep this label unique in the test
|
||||
uniqLabels := map[string]string{"gctest": "delete_pods_foreground"}
|
||||
uniqLabels := getUniqLabel("gctest", "delete_pods_foreground")
|
||||
rc := newOwnerRC(f, rcName, estimateMaximumPods(clientSet, 10, 100), uniqLabels)
|
||||
By("create the rc")
|
||||
rc, err := rcClient.Create(rc)
|
||||
@ -738,18 +740,16 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
rc1Name := "simpletest-rc-to-be-deleted"
|
||||
replicas := int32(estimateMaximumPods(clientSet, 10, 100))
|
||||
halfReplicas := int(replicas / 2)
|
||||
// TODO: find better way to keep this label unique in the test
|
||||
uniqLabels := map[string]string{"gctest": "valid_and_pending_owners"}
|
||||
rc1 := newOwnerRC(f, rc1Name, replicas, uniqLabels)
|
||||
uniqLabels_deleted := getUniqLabel("gctest_d", "valid_and_pending_owners_d")
|
||||
rc1 := newOwnerRC(f, rc1Name, replicas, uniqLabels_deleted)
|
||||
By("create the rc1")
|
||||
rc1, err := rcClient.Create(rc1)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create replication controller: %v", err)
|
||||
}
|
||||
rc2Name := "simpletest-rc-to-stay"
|
||||
// TODO: find better way to keep this label unique in the test
|
||||
uniqLabels = map[string]string{"another.key": "another.value"}
|
||||
rc2 := newOwnerRC(f, rc2Name, 0, uniqLabels)
|
||||
uniqLabels_stay := getUniqLabel("gctest_s", "valid_and_pending_owners_s")
|
||||
rc2 := newOwnerRC(f, rc2Name, 0, uniqLabels_stay)
|
||||
By("create the rc2")
|
||||
rc2, err = rcClient.Create(rc2)
|
||||
if err != nil {
|
||||
@ -917,16 +917,15 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
|
||||
}
|
||||
}()
|
||||
client, err := apiextensionstestserver.CreateNewCustomResourceDefinition(definition, apiExtensionClient, f.ClientPool)
|
||||
definition, err = apiextensionstestserver.CreateNewCustomResourceDefinition(definition, apiExtensionClient, f.DynamicClient)
|
||||
if err != nil {
|
||||
framework.Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
}
|
||||
|
||||
// Get a client for the custom resource.
|
||||
resourceClient := client.Resource(&metav1.APIResource{
|
||||
Name: definition.Spec.Names.Plural,
|
||||
Namespaced: false,
|
||||
}, api.NamespaceNone)
|
||||
gvr := schema.GroupVersionResource{Group: definition.Spec.Group, Version: definition.Spec.Version, Resource: definition.Spec.Names.Plural}
|
||||
resourceClient := f.DynamicClient.Resource(gvr)
|
||||
|
||||
apiVersion := definition.Spec.Group + "/" + definition.Spec.Version
|
||||
|
||||
// Create a custom owner resource.
|
||||
@ -999,8 +998,111 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("should support orphan deletion of custom resources", func() {
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
framework.Failf("failed to load config: %v", err)
|
||||
}
|
||||
|
||||
apiExtensionClient, err := apiextensionsclientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
framework.Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
}
|
||||
|
||||
// Create a random custom resource definition and ensure it's available for
|
||||
// use.
|
||||
definition := apiextensionstestserver.NewRandomNameCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
|
||||
defer func() {
|
||||
err = apiextensionstestserver.DeleteCustomResourceDefinition(definition, apiExtensionClient)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
|
||||
}
|
||||
}()
|
||||
definition, err = apiextensionstestserver.CreateNewCustomResourceDefinition(definition, apiExtensionClient, f.DynamicClient)
|
||||
if err != nil {
|
||||
framework.Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
}
|
||||
|
||||
// Get a client for the custom resource.
|
||||
gvr := schema.GroupVersionResource{Group: definition.Spec.Group, Version: definition.Spec.Version, Resource: definition.Spec.Names.Plural}
|
||||
resourceClient := f.DynamicClient.Resource(gvr)
|
||||
|
||||
apiVersion := definition.Spec.Group + "/" + definition.Spec.Version
|
||||
|
||||
// Create a custom owner resource.
|
||||
ownerName := names.SimpleNameGenerator.GenerateName("owner")
|
||||
owner := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": apiVersion,
|
||||
"kind": definition.Spec.Names.Kind,
|
||||
"metadata": map[string]interface{}{
|
||||
"name": ownerName,
|
||||
},
|
||||
},
|
||||
}
|
||||
persistedOwner, err := resourceClient.Create(owner)
|
||||
if err != nil {
|
||||
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
|
||||
}
|
||||
framework.Logf("created owner resource %q", ownerName)
|
||||
|
||||
// Create a custom dependent resource.
|
||||
dependentName := names.SimpleNameGenerator.GenerateName("dependent")
|
||||
dependent := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"apiVersion": apiVersion,
|
||||
"kind": definition.Spec.Names.Kind,
|
||||
"metadata": map[string]interface{}{
|
||||
"name": dependentName,
|
||||
"ownerReferences": []map[string]string{
|
||||
{
|
||||
"uid": string(persistedOwner.GetUID()),
|
||||
"apiVersion": apiVersion,
|
||||
"kind": definition.Spec.Names.Kind,
|
||||
"name": ownerName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = resourceClient.Create(dependent)
|
||||
if err != nil {
|
||||
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
|
||||
}
|
||||
framework.Logf("created dependent resource %q", dependentName)
|
||||
|
||||
// Delete the owner and orphan the dependent.
|
||||
err = resourceClient.Delete(ownerName, getOrphanOptions())
|
||||
if err != nil {
|
||||
framework.Failf("failed to delete owner resource %q: %v", ownerName, err)
|
||||
}
|
||||
|
||||
By("wait for the owner to be deleted")
|
||||
if err := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
|
||||
_, err = resourceClient.Get(ownerName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return false, fmt.Errorf("Failed to get owner: %v", err)
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
framework.Failf("timeout in waiting for the owner to be deleted: %v", err)
|
||||
}
|
||||
|
||||
// Wait 30s and ensure the dependent is not deleted.
|
||||
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the dependent crd")
|
||||
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
|
||||
_, err := resourceClient.Get(dependentName, metav1.GetOptions{})
|
||||
return false, err
|
||||
}); err != nil && err != wait.ErrWaitTimeout {
|
||||
framework.Failf("failed to ensure the dependent is not deleted: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
It("should delete jobs and pods created by cronjob", func() {
|
||||
framework.SkipIfMissingResource(f.ClientPool, CronJobGroupVersionResource, f.Namespace.Name)
|
||||
framework.SkipIfMissingResource(f.DynamicClient, CronJobGroupVersionResource, f.Namespace.Name)
|
||||
|
||||
By("Create the cronjob")
|
||||
cronJob := newCronJob("simple", "*/1 * * * ?")
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/apimachinery/generated_clientset.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/apimachinery/generated_clientset.go
generated
vendored
@ -264,7 +264,7 @@ var _ = SIGDescribe("Generated clientset", func() {
|
||||
f := framework.NewDefaultFramework("clientset")
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipIfMissingResource(f.ClientPool, CronJobGroupVersionResource, f.Namespace.Name)
|
||||
framework.SkipIfMissingResource(f.DynamicClient, CronJobGroupVersionResource, f.Namespace.Name)
|
||||
})
|
||||
|
||||
It("should create v1beta1 cronJobs, delete cronJobs, watch cronJobs", func() {
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e/apimachinery/namespace.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/apimachinery/namespace.go
generated
vendored
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -114,7 +115,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -135,7 +136,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/apimachinery/table_conversion.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/apimachinery/table_conversion.go
generated
vendored
@ -32,13 +32,20 @@ import (
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var serverPrintVersion = utilversion.MustParseSemantic("v1.10.0")
|
||||
|
||||
var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
f := framework.NewDefaultFramework("tables")
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessServerVersionGTE(serverPrintVersion, f.ClientSet.Discovery())
|
||||
})
|
||||
|
||||
It("should return pod details", func() {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
383
vendor/k8s.io/kubernetes/test/e2e/apimachinery/watch.go
generated
vendored
Normal file
383
vendor/k8s.io/kubernetes/test/e2e/apimachinery/watch.go
generated
vendored
Normal file
@ -0,0 +1,383 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
watchConfigMapLabelKey = "watch-this-configmap"
|
||||
|
||||
multipleWatchersLabelValueA = "multiple-watchers-A"
|
||||
multipleWatchersLabelValueB = "multiple-watchers-B"
|
||||
fromResourceVersionLabelValue = "from-resource-version"
|
||||
watchRestartedLabelValue = "watch-closed-and-restarted"
|
||||
toBeChangedLabelValue = "label-changed-and-restored"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Watchers", func() {
|
||||
f := framework.NewDefaultFramework("watch")
|
||||
|
||||
/*
|
||||
Testname: watch-configmaps-with-multiple-watchers
|
||||
Description: Ensure that multiple watchers are able to receive all add,
|
||||
update, and delete notifications on configmaps that match a label selector and do
|
||||
not receive notifications for configmaps which do not match that label selector.
|
||||
*/
|
||||
framework.ConformanceIt("should observe add, update, and delete watch notifications on configmaps", func() {
|
||||
c := f.ClientSet
|
||||
ns := f.Namespace.Name
|
||||
|
||||
By("creating a watch on configmaps with label A")
|
||||
watchA, err := watchConfigMaps(f, "", multipleWatchersLabelValueA)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("creating a watch on configmaps with label B")
|
||||
watchB, err := watchConfigMaps(f, "", multipleWatchersLabelValueB)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("creating a watch on configmaps with label A or B")
|
||||
watchAB, err := watchConfigMaps(f, "", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
testConfigMapA := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "e2e-watch-test-configmap-a",
|
||||
Labels: map[string]string{
|
||||
watchConfigMapLabelKey: multipleWatchersLabelValueA,
|
||||
},
|
||||
},
|
||||
}
|
||||
testConfigMapB := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "e2e-watch-test-configmap-b",
|
||||
Labels: map[string]string{
|
||||
watchConfigMapLabelKey: multipleWatchersLabelValueB,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
By("creating a configmap with label A and ensuring the correct watchers observe the notification")
|
||||
testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapA)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
expectEvent(watchA, watch.Added, testConfigMapA)
|
||||
expectEvent(watchAB, watch.Added, testConfigMapA)
|
||||
expectNoEvent(watchB, watch.Added, testConfigMapA)
|
||||
|
||||
By("modifying configmap A and ensuring the correct watchers observe the notification")
|
||||
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
expectEvent(watchA, watch.Modified, testConfigMapA)
|
||||
expectEvent(watchAB, watch.Modified, testConfigMapA)
|
||||
expectNoEvent(watchB, watch.Modified, testConfigMapA)
|
||||
|
||||
By("modifying configmap A again and ensuring the correct watchers observe the notification")
|
||||
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
expectEvent(watchA, watch.Modified, testConfigMapA)
|
||||
expectEvent(watchAB, watch.Modified, testConfigMapA)
|
||||
expectNoEvent(watchB, watch.Modified, testConfigMapA)
|
||||
|
||||
By("deleting configmap A and ensuring the correct watchers observe the notification")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapA.GetName(), nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
expectEvent(watchA, watch.Deleted, nil)
|
||||
expectEvent(watchAB, watch.Deleted, nil)
|
||||
expectNoEvent(watchB, watch.Deleted, nil)
|
||||
|
||||
By("creating a configmap with label B and ensuring the correct watchers observe the notification")
|
||||
testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapB)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
expectEvent(watchB, watch.Added, testConfigMapB)
|
||||
expectEvent(watchAB, watch.Added, testConfigMapB)
|
||||
expectNoEvent(watchA, watch.Added, testConfigMapB)
|
||||
|
||||
By("deleting configmap B and ensuring the correct watchers observe the notification")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapB.GetName(), nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
expectEvent(watchB, watch.Deleted, nil)
|
||||
expectEvent(watchAB, watch.Deleted, nil)
|
||||
expectNoEvent(watchA, watch.Deleted, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: watch-configmaps-from-resource-version
|
||||
Description: Ensure that a watch can be opened from a particular resource version
|
||||
in the past and only notifications happening after that resource version are observed.
|
||||
*/
|
||||
framework.ConformanceIt("should be able to start watching from a specific resource version", func() {
|
||||
c := f.ClientSet
|
||||
ns := f.Namespace.Name
|
||||
|
||||
testConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "e2e-watch-test-resource-version",
|
||||
Labels: map[string]string{
|
||||
watchConfigMapLabelKey: fromResourceVersionLabelValue,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
By("creating a new configmap")
|
||||
testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("modifying the configmap once")
|
||||
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("modifying the configmap a second time")
|
||||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("deleting the configmap")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("creating a watch on configmaps from the resource version returned by the first update")
|
||||
testWatch, err := watchConfigMaps(f, testConfigMapFirstUpdate.ObjectMeta.ResourceVersion, fromResourceVersionLabelValue)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Expecting to observe notifications for all changes to the configmap after the first update")
|
||||
expectEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
|
||||
expectEvent(testWatch, watch.Deleted, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: watch-configmaps-closed-and-restarted
|
||||
Description: Ensure that a watch can be reopened from the last resource version
|
||||
observed by the previous watch, and it will continue delivering notifications from
|
||||
that point in time.
|
||||
*/
|
||||
framework.ConformanceIt("should be able to restart watching from the last resource version observed by the previous watch", func() {
|
||||
c := f.ClientSet
|
||||
ns := f.Namespace.Name
|
||||
|
||||
testConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "e2e-watch-test-watch-closed",
|
||||
Labels: map[string]string{
|
||||
watchConfigMapLabelKey: watchRestartedLabelValue,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
By("creating a watch on configmaps")
|
||||
testWatchBroken, err := watchConfigMaps(f, "", watchRestartedLabelValue)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("creating a new configmap")
|
||||
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("modifying the configmap once")
|
||||
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("closing the watch once it receives two notifications")
|
||||
expectEvent(testWatchBroken, watch.Added, testConfigMap)
|
||||
lastEvent, ok := waitForEvent(testWatchBroken, watch.Modified, nil, 1*time.Minute)
|
||||
if !ok {
|
||||
framework.Failf("Timed out waiting for second watch notification")
|
||||
}
|
||||
testWatchBroken.Stop()
|
||||
|
||||
By("modifying the configmap a second time, while the watch is closed")
|
||||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("creating a new watch on configmaps from the last resource version observed by the first watch")
|
||||
lastEventConfigMap, ok := lastEvent.Object.(*v1.ConfigMap)
|
||||
if !ok {
|
||||
framework.Failf("Expected last notfication to refer to a configmap but got: %v", lastEvent)
|
||||
}
|
||||
testWatchRestarted, err := watchConfigMaps(f, lastEventConfigMap.ObjectMeta.ResourceVersion, watchRestartedLabelValue)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("deleting the configmap")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Expecting to observe notifications for all changes to the configmap since the first watch closed")
|
||||
expectEvent(testWatchRestarted, watch.Modified, testConfigMapSecondUpdate)
|
||||
expectEvent(testWatchRestarted, watch.Deleted, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: watch-configmaps-label-changed
|
||||
Description: Ensure that a watched object stops meeting the requirements of
|
||||
a watch's selector, the watch will observe a delete, and will not observe
|
||||
notifications for that object until it meets the selector's requirements again.
|
||||
*/
|
||||
framework.ConformanceIt("should observe an object deletion if it stops meeting the requirements of the selector", func() {
|
||||
c := f.ClientSet
|
||||
ns := f.Namespace.Name
|
||||
|
||||
testConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "e2e-watch-test-label-changed",
|
||||
Labels: map[string]string{
|
||||
watchConfigMapLabelKey: toBeChangedLabelValue,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
By("creating a watch on configmaps with a certain label")
|
||||
testWatch, err := watchConfigMaps(f, "", toBeChangedLabelValue)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("creating a new configmap")
|
||||
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("modifying the configmap once")
|
||||
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("changing the label value of the configmap")
|
||||
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = "wrong-value"
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Expecting to observe a delete notification for the watched object")
|
||||
expectEvent(testWatch, watch.Added, testConfigMap)
|
||||
expectEvent(testWatch, watch.Modified, testConfigMapFirstUpdate)
|
||||
expectEvent(testWatch, watch.Deleted, nil)
|
||||
|
||||
By("modifying the configmap a second time")
|
||||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Expecting not to observe a notification because the object no longer meets the selector's requirements")
|
||||
expectNoEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
|
||||
|
||||
By("changing the label value of the configmap back")
|
||||
testConfigMapLabelRestored, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = toBeChangedLabelValue
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("modifying the configmap a third time")
|
||||
testConfigMapThirdUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "3")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("deleting the configmap")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Expecting to observe an add notification for the watched object when the label value was restored")
|
||||
expectEvent(testWatch, watch.Added, testConfigMapLabelRestored)
|
||||
expectEvent(testWatch, watch.Modified, testConfigMapThirdUpdate)
|
||||
expectEvent(testWatch, watch.Deleted, nil)
|
||||
})
|
||||
})
|
||||
|
||||
func watchConfigMaps(f *framework.Framework, resourceVersion string, labels ...string) (watch.Interface, error) {
|
||||
c := f.ClientSet
|
||||
ns := f.Namespace.Name
|
||||
opts := metav1.ListOptions{
|
||||
ResourceVersion: resourceVersion,
|
||||
LabelSelector: metav1.FormatLabelSelector(&metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: watchConfigMapLabelKey,
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: labels,
|
||||
},
|
||||
},
|
||||
}),
|
||||
}
|
||||
return c.CoreV1().ConfigMaps(ns).Watch(opts)
|
||||
}
|
||||
|
||||
func int64ptr(i int) *int64 {
|
||||
i64 := int64(i)
|
||||
return &i64
|
||||
}
|
||||
|
||||
func setConfigMapData(cm *v1.ConfigMap, key, value string) {
|
||||
if cm.Data == nil {
|
||||
cm.Data = make(map[string]string)
|
||||
}
|
||||
cm.Data[key] = value
|
||||
}
|
||||
|
||||
func expectEvent(w watch.Interface, eventType watch.EventType, object runtime.Object) {
|
||||
if event, ok := waitForEvent(w, eventType, object, 1*time.Minute); !ok {
|
||||
framework.Failf("Timed out waiting for expected watch notification: %v", event)
|
||||
}
|
||||
}
|
||||
|
||||
func expectNoEvent(w watch.Interface, eventType watch.EventType, object runtime.Object) {
|
||||
if event, ok := waitForEvent(w, eventType, object, 10*time.Second); ok {
|
||||
framework.Failf("Unexpected watch notification observed: %v", event)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForEvent(w watch.Interface, expectType watch.EventType, expectObject runtime.Object, duration time.Duration) (watch.Event, bool) {
|
||||
stopTimer := time.NewTimer(duration)
|
||||
defer stopTimer.Stop()
|
||||
for {
|
||||
select {
|
||||
case actual, ok := <-w.ResultChan():
|
||||
if ok {
|
||||
framework.Logf("Got : %v %v", actual.Type, actual.Object)
|
||||
} else {
|
||||
framework.Failf("Watch closed unexpectedly")
|
||||
}
|
||||
if expectType == actual.Type && (expectObject == nil || apiequality.Semantic.DeepEqual(expectObject, actual.Object)) {
|
||||
return actual, true
|
||||
}
|
||||
case <-stopTimer.C:
|
||||
expected := watch.Event{
|
||||
Type: expectType,
|
||||
Object: expectObject,
|
||||
}
|
||||
return expected, false
|
||||
}
|
||||
}
|
||||
}
|
515
vendor/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go
generated
vendored
515
vendor/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go
generated
vendored
@ -23,10 +23,11 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/admissionregistration/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
@ -51,12 +52,16 @@ const (
|
||||
roleBindingName = "webhook-auth-reader"
|
||||
|
||||
// The webhook configuration names should not be reused between test instances.
|
||||
crdWebhookConfigName = "e2e-test-webhook-config-crd"
|
||||
crWebhookConfigName = "e2e-test-webhook-config-cr"
|
||||
webhookConfigName = "e2e-test-webhook-config"
|
||||
mutatingWebhookConfigName = "e2e-test-mutating-webhook-config"
|
||||
podMutatingWebhookConfigName = "e2e-test-mutating-webhook-pod"
|
||||
crdMutatingWebhookConfigName = "e2e-test-mutating-webhook-config-crd"
|
||||
crMutatingWebhookConfigName = "e2e-test-mutating-webhook-config-cr"
|
||||
webhookFailClosedConfigName = "e2e-test-webhook-fail-closed"
|
||||
webhookForWebhooksConfigName = "e2e-test-webhook-for-webhooks-config"
|
||||
removableValidatingHookName = "e2e-test-should-be-removable-validating-webhook-config"
|
||||
removableMutatingHookName = "e2e-test-should-be-removable-mutating-webhook-config"
|
||||
crdWebhookConfigName = "e2e-test-webhook-config-crd"
|
||||
|
||||
skipNamespaceLabelKey = "skip-webhook-admission"
|
||||
skipNamespaceLabelValue = "yes"
|
||||
@ -118,9 +123,9 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
|
||||
return
|
||||
}
|
||||
defer testcrd.CleanUp()
|
||||
webhookCleanup := registerWebhookForCRD(f, context, testcrd)
|
||||
webhookCleanup := registerWebhookForCustomResource(f, context, testcrd)
|
||||
defer webhookCleanup()
|
||||
testCRDWebhook(f, testcrd.Crd, testcrd.DynamicClient)
|
||||
testCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClient)
|
||||
})
|
||||
|
||||
It("Should unconditionally reject operations on fail closed webhook", func() {
|
||||
@ -141,15 +146,28 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
|
||||
testMutatingPodWebhook(f)
|
||||
})
|
||||
|
||||
It("Should mutate crd", func() {
|
||||
It("Should not be able to prevent deleting validating-webhook-configurations or mutating-webhook-configurations", func() {
|
||||
webhookCleanup := registerWebhookForWebhookConfigurations(f, context)
|
||||
defer webhookCleanup()
|
||||
testWebhookForWebhookConfigurations(f)
|
||||
})
|
||||
|
||||
It("Should mutate custom resource", func() {
|
||||
testcrd, err := framework.CreateTestCRD(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer testcrd.CleanUp()
|
||||
webhookCleanup := registerMutatingWebhookForCRD(f, context, testcrd)
|
||||
webhookCleanup := registerMutatingWebhookForCustomResource(f, context, testcrd)
|
||||
defer webhookCleanup()
|
||||
testMutatingCRDWebhook(f, testcrd.Crd, testcrd.DynamicClient)
|
||||
testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClient)
|
||||
})
|
||||
|
||||
It("Should deny crd creation", func() {
|
||||
crdWebhookCleanup := registerValidatingWebhookForCRD(f, context)
|
||||
defer crdWebhookCleanup()
|
||||
|
||||
testCRDDenyWebhook(f)
|
||||
})
|
||||
|
||||
// TODO: add more e2e tests for mutating webhooks
|
||||
@ -245,14 +263,18 @@ func deployWebhookAndService(f *framework.Framework, image string, context *cert
|
||||
Image: image,
|
||||
},
|
||||
}
|
||||
d := &extensions.Deployment{
|
||||
d := &apps.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Name: deploymentName,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
Type: extensions.RollingUpdateDeploymentStrategyType,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
Type: apps.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -266,7 +288,7 @@ func deployWebhookAndService(f *framework.Framework, image string, context *cert
|
||||
},
|
||||
},
|
||||
}
|
||||
deployment, err := client.ExtensionsV1beta1().Deployments(namespace).Create(d)
|
||||
deployment, err := client.AppsV1().Deployments(namespace).Create(d)
|
||||
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
|
||||
By("Wait for the deployment to be ready")
|
||||
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
|
||||
@ -375,7 +397,7 @@ func registerWebhook(f *framework.Framework, context *certContext) func() {
|
||||
})
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 1s.
|
||||
// The webhook configuration is honored in 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
return func() {
|
||||
@ -437,7 +459,7 @@ func registerMutatingWebhookForConfigMap(f *framework.Framework, context *certCo
|
||||
})
|
||||
framework.ExpectNoError(err, "registering mutating webhook config %s with namespace %s", configName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 1s.
|
||||
// The webhook configuration is honored in 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
return func() { client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(configName, nil) }
|
||||
}
|
||||
@ -493,7 +515,7 @@ func registerMutatingWebhookForPod(f *framework.Framework, context *certContext)
|
||||
})
|
||||
framework.ExpectNoError(err, "registering mutating webhook config %s with namespace %s", configName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 1s.
|
||||
// The webhook configuration is honored in 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
return func() { client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(configName, nil) }
|
||||
@ -525,7 +547,7 @@ func toBeMutatedPod(f *framework.Framework) *v1.Pod {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "example",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -709,6 +731,154 @@ func testFailClosedWebhook(f *framework.Framework) {
|
||||
}
|
||||
}
|
||||
|
||||
func registerWebhookForWebhookConfigurations(f *framework.Framework, context *certContext) func() {
|
||||
var err error
|
||||
client := f.ClientSet
|
||||
By("Registering a webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := webhookForWebhooksConfigName
|
||||
failurePolicy := v1beta1.Fail
|
||||
|
||||
// This webhook will deny all requests to Delete admissionregistration objects
|
||||
_, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
Name: "deny-webhook-configuration-deletions.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Delete},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{"admissionregistration.k8s.io"},
|
||||
APIVersions: []string{"*"},
|
||||
Resources: []string{
|
||||
"validatingwebhookconfigurations",
|
||||
"mutatingwebhookconfigurations",
|
||||
},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/always-deny"),
|
||||
},
|
||||
CABundle: context.signingCert,
|
||||
},
|
||||
FailurePolicy: &failurePolicy,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
return func() {
|
||||
err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(configName, nil)
|
||||
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace)
|
||||
}
|
||||
}
|
||||
|
||||
// This test assumes that the deletion-rejecting webhook defined in
|
||||
// registerWebhookForWebhookConfigurations is in place.
|
||||
func testWebhookForWebhookConfigurations(f *framework.Framework) {
|
||||
var err error
|
||||
client := f.ClientSet
|
||||
By("Creating a validating-webhook-configuration object")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
failurePolicy := v1beta1.Ignore
|
||||
|
||||
_, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: removableValidatingHookName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
Name: "should-be-removable-validating-webhook.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
// This will not match any real resources so this webhook should never be called.
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"invalid"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
// This path not recognized by the webhook service,
|
||||
// so the call to this webhook will always fail,
|
||||
// but because the failure policy is ignore, it will
|
||||
// have no effect on admission requests.
|
||||
Path: strPtr(""),
|
||||
},
|
||||
CABundle: nil,
|
||||
},
|
||||
FailurePolicy: &failurePolicy,
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", removableValidatingHookName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
By("Deleting the validating-webhook-configuration, which should be possible to remove")
|
||||
|
||||
err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(removableValidatingHookName, nil)
|
||||
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", removableValidatingHookName, namespace)
|
||||
|
||||
By("Creating a mutating-webhook-configuration object")
|
||||
|
||||
_, err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: removableMutatingHookName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
Name: "should-be-removable-mutating-webhook.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
// This will not match any real resources so this webhook should never be called.
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"invalid"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
// This path not recognized by the webhook service,
|
||||
// so the call to this webhook will always fail,
|
||||
// but because the failure policy is ignore, it will
|
||||
// have no effect on admission requests.
|
||||
Path: strPtr(""),
|
||||
},
|
||||
CABundle: nil,
|
||||
},
|
||||
FailurePolicy: &failurePolicy,
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", removableMutatingHookName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
By("Deleting the mutating-webhook-configuration, which should be possible to remove")
|
||||
|
||||
err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(removableMutatingHookName, nil)
|
||||
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", removableMutatingHookName, namespace)
|
||||
}
|
||||
|
||||
func createNamespace(f *framework.Framework, ns *v1.Namespace) error {
|
||||
return wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
_, err := f.ClientSet.CoreV1().Namespaces().Create(ns)
|
||||
@ -734,7 +904,7 @@ func nonCompliantPod(f *framework.Framework) *v1.Pod {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "webhook-disallow",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -753,7 +923,7 @@ func hangingPod(f *framework.Framework) *v1.Pod {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "wait-forever",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -810,24 +980,24 @@ func updateConfigMap(c clientset.Interface, ns, name string, update updateConfig
|
||||
|
||||
func cleanWebhookTest(client clientset.Interface, namespaceName string) {
|
||||
_ = client.CoreV1().Services(namespaceName).Delete(serviceName, nil)
|
||||
_ = client.ExtensionsV1beta1().Deployments(namespaceName).Delete(deploymentName, nil)
|
||||
_ = client.AppsV1().Deployments(namespaceName).Delete(deploymentName, nil)
|
||||
_ = client.CoreV1().Secrets(namespaceName).Delete(secretName, nil)
|
||||
_ = client.RbacV1beta1().RoleBindings("kube-system").Delete(roleBindingName, nil)
|
||||
}
|
||||
|
||||
func registerWebhookForCRD(f *framework.Framework, context *certContext, testcrd *framework.TestCrd) func() {
|
||||
func registerWebhookForCustomResource(f *framework.Framework, context *certContext, testcrd *framework.TestCrd) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the crd webhook via the AdmissionRegistration API")
|
||||
By("Registering the custom resource webhook via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := crdWebhookConfigName
|
||||
configName := crWebhookConfigName
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
Name: "deny-unwanted-crd-data.k8s.io",
|
||||
Name: "deny-unwanted-custom-resource-data.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
@ -836,6 +1006,161 @@ func registerWebhookForCRD(f *framework.Framework, context *certContext, testcrd
|
||||
Resources: []string{testcrd.GetPluralName()},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/custom-resource"),
|
||||
},
|
||||
CABundle: context.signingCert,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering custom resource webhook config %s with namespace %s", configName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
return func() {
|
||||
client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(configName, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func registerMutatingWebhookForCustomResource(f *framework.Framework, context *certContext, testcrd *framework.TestCrd) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the mutating webhook for a custom resource via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := crMutatingWebhookConfigName
|
||||
_, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
Name: "mutate-custom-resource-data-stage-1.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{testcrd.ApiGroup},
|
||||
APIVersions: []string{testcrd.ApiVersion},
|
||||
Resources: []string{testcrd.GetPluralName()},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/mutating-custom-resource"),
|
||||
},
|
||||
CABundle: context.signingCert,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "mutate-custom-resource-data-stage-2.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{testcrd.ApiGroup},
|
||||
APIVersions: []string{testcrd.ApiVersion},
|
||||
Resources: []string{testcrd.GetPluralName()},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/mutating-custom-resource"),
|
||||
},
|
||||
CABundle: context.signingCert,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering custom resource webhook config %s with namespace %s", configName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
return func() { client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(configName, nil) }
|
||||
}
|
||||
|
||||
func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) {
|
||||
By("Creating a custom resource that should be denied by the webhook")
|
||||
crInstance := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": crd.Spec.Names.Kind,
|
||||
"apiVersion": crd.Spec.Group + "/" + crd.Spec.Version,
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "cr-instance-1",
|
||||
"namespace": f.Namespace.Name,
|
||||
},
|
||||
"data": map[string]interface{}{
|
||||
"webhook-e2e-test": "webhook-disallow",
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := customResourceClient.Create(crInstance)
|
||||
Expect(err).NotTo(BeNil())
|
||||
expectedErrMsg := "the custom resource contains unwanted data"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) {
|
||||
By("Creating a custom resource that should be mutated by the webhook")
|
||||
cr := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": crd.Spec.Names.Kind,
|
||||
"apiVersion": crd.Spec.Group + "/" + crd.Spec.Version,
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "cr-instance-1",
|
||||
"namespace": f.Namespace.Name,
|
||||
},
|
||||
"data": map[string]interface{}{
|
||||
"mutation-start": "yes",
|
||||
},
|
||||
},
|
||||
}
|
||||
mutatedCR, err := customResourceClient.Create(cr)
|
||||
Expect(err).To(BeNil())
|
||||
expectedCRData := map[string]interface{}{
|
||||
"mutation-start": "yes",
|
||||
"mutation-stage-1": "yes",
|
||||
"mutation-stage-2": "yes",
|
||||
}
|
||||
if !reflect.DeepEqual(expectedCRData, mutatedCR.Object["data"]) {
|
||||
framework.Failf("\nexpected %#v\n, got %#v\n", expectedCRData, mutatedCR.Object["data"])
|
||||
}
|
||||
}
|
||||
|
||||
func registerValidatingWebhookForCRD(f *framework.Framework, context *certContext) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the crd webhook via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := crdWebhookConfigName
|
||||
|
||||
// This webhook will deny the creation of CustomResourceDefinitions which have the
|
||||
// label "webhook-e2e-test":"webhook-disallow"
|
||||
// NOTE: Because tests are run in parallel and in an unpredictable order, it is critical
|
||||
// that no other test attempts to create CRD with that label.
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
Name: "deny-crd-with-unwanted-label.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{"apiextensions.k8s.io"},
|
||||
APIVersions: []string{"*"},
|
||||
Resources: []string{"customresourcedefinitions"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
@ -849,118 +1174,62 @@ func registerWebhookForCRD(f *framework.Framework, context *certContext, testcrd
|
||||
})
|
||||
framework.ExpectNoError(err, "registering crd webhook config %s with namespace %s", configName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 1s.
|
||||
// The webhook configuration is honored in 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
return func() {
|
||||
client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(configName, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func registerMutatingWebhookForCRD(f *framework.Framework, context *certContext, testcrd *framework.TestCrd) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the mutating webhook for crd via the AdmissionRegistration API")
|
||||
func testCRDDenyWebhook(f *framework.Framework) {
|
||||
By("Creating a custom resource definition that should be denied by the webhook")
|
||||
name := fmt.Sprintf("e2e-test-%s-%s-crd", f.BaseName, "deny")
|
||||
kind := fmt.Sprintf("E2e-test-%s-%s-crd", f.BaseName, "deny")
|
||||
group := fmt.Sprintf("%s-crd-test.k8s.io", f.BaseName)
|
||||
apiVersion := "v1"
|
||||
testcrd := &framework.TestCrd{
|
||||
Name: name,
|
||||
Kind: kind,
|
||||
ApiGroup: group,
|
||||
ApiVersion: apiVersion,
|
||||
}
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := crdMutatingWebhookConfigName
|
||||
_, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
|
||||
// Creating a custom resource definition for use by assorted tests.
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
framework.Failf("failed to load config: %v", err)
|
||||
return
|
||||
}
|
||||
apiExtensionClient, err := crdclientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
framework.Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
return
|
||||
}
|
||||
crd := &apiextensionsv1beta1.CustomResourceDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
Name: "mutate-crd-data-stage-1.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{testcrd.ApiGroup},
|
||||
APIVersions: []string{testcrd.ApiVersion},
|
||||
Resources: []string{testcrd.GetPluralName()},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/mutating-crd"),
|
||||
},
|
||||
CABundle: context.signingCert,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "mutate-crd-data-stage-2.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{testcrd.ApiGroup},
|
||||
APIVersions: []string{testcrd.ApiVersion},
|
||||
Resources: []string{testcrd.GetPluralName()},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/mutating-crd"),
|
||||
},
|
||||
CABundle: context.signingCert,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering crd webhook config %s with namespace %s", configName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 1s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
return func() { client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(configName, nil) }
|
||||
}
|
||||
|
||||
func testCRDWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, crdClient dynamic.ResourceInterface) {
|
||||
By("Creating a custom resource that should be denied by the webhook")
|
||||
crInstance := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": crd.Spec.Names.Kind,
|
||||
"apiVersion": crd.Spec.Group + "/" + crd.Spec.Version,
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "cr-instance-1",
|
||||
"namespace": f.Namespace.Name,
|
||||
},
|
||||
"data": map[string]interface{}{
|
||||
Name: testcrd.GetMetaName(),
|
||||
Labels: map[string]string{
|
||||
"webhook-e2e-test": "webhook-disallow",
|
||||
},
|
||||
},
|
||||
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
|
||||
Group: testcrd.ApiGroup,
|
||||
Version: testcrd.ApiVersion,
|
||||
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
|
||||
Plural: testcrd.GetPluralName(),
|
||||
Singular: testcrd.Name,
|
||||
Kind: testcrd.Kind,
|
||||
ListKind: testcrd.GetListName(),
|
||||
},
|
||||
Scope: apiextensionsv1beta1.NamespaceScoped,
|
||||
},
|
||||
}
|
||||
_, err := crdClient.Create(crInstance)
|
||||
|
||||
// create CRD
|
||||
_, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd)
|
||||
Expect(err).NotTo(BeNil())
|
||||
expectedErrMsg := "the custom resource contains unwanted data"
|
||||
expectedErrMsg := "the crd contains unwanted label"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func testMutatingCRDWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, crdClient dynamic.ResourceInterface) {
|
||||
By("Creating a custom resource that should be mutated by the webhook")
|
||||
cr := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": crd.Spec.Names.Kind,
|
||||
"apiVersion": crd.Spec.Group + "/" + crd.Spec.Version,
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "cr-instance-1",
|
||||
"namespace": f.Namespace.Name,
|
||||
},
|
||||
"data": map[string]interface{}{
|
||||
"mutation-start": "yes",
|
||||
},
|
||||
},
|
||||
}
|
||||
mutatedCR, err := crdClient.Create(cr)
|
||||
Expect(err).To(BeNil())
|
||||
expectedCRData := map[string]interface{}{
|
||||
"mutation-start": "yes",
|
||||
"mutation-stage-1": "yes",
|
||||
"mutation-stage-2": "yes",
|
||||
}
|
||||
if !reflect.DeepEqual(expectedCRData, mutatedCR.Object["data"]) {
|
||||
framework.Failf("\nexpected %#v\n, got %#v\n", expectedCRData, mutatedCR.Object["data"])
|
||||
}
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e/apps/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/apps/BUILD
generated
vendored
@ -25,20 +25,18 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/apps:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/daemon:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/job:go_default_library",
|
||||
"//pkg/controller/nodelifecycle:go_default_library",
|
||||
"//pkg/controller/replicaset:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/scheduler/schedulercache:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
|
9
vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go
generated
vendored
@ -33,7 +33,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/controller/job"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
@ -51,7 +50,7 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
successCommand := []string{"/bin/true"}
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipIfMissingResource(f.ClientPool, CronJobGroupVersionResourceBeta, f.Namespace.Name)
|
||||
framework.SkipIfMissingResource(f.DynamicClient, CronJobGroupVersionResourceBeta, f.Namespace.Name)
|
||||
})
|
||||
|
||||
// multiple jobs running at once
|
||||
@ -207,11 +206,7 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
|
||||
By("Deleting the job")
|
||||
job := cronJob.Status.Active[0]
|
||||
reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
timeout := 1 * time.Minute
|
||||
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
|
||||
|
||||
By("Ensuring job was deleted")
|
||||
_, err = framework.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
|
9
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go
generated
vendored
@ -34,6 +34,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -202,7 +203,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: rcName,
|
||||
Namespace: ns,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: numPods,
|
||||
CreatedPods: &[]*v1.Pod{},
|
||||
}
|
||||
@ -257,7 +258,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
|
||||
// to the same size achieves this, because the scale operation advances the RC's sequence number
|
||||
// and awaits it to be observed and reported back in the RC's status.
|
||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods, true)
|
||||
framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods, true)
|
||||
|
||||
// Only check the keys, the pods can be different if the kubelet updated it.
|
||||
// TODO: Can it really?
|
||||
@ -288,9 +289,9 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
restarter.kill()
|
||||
// This is best effort to try and create pods while the scheduler is down,
|
||||
// since we don't know exactly when it is restarted after the kill signal.
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods+5, false))
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, false))
|
||||
restarter.waitUp()
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, rcName, numPods+5, true))
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true))
|
||||
})
|
||||
|
||||
It("Kubelet should not restart containers across restart", func() {
|
||||
|
212
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go
generated
vendored
212
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -34,10 +35,8 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -69,22 +68,19 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
Expect(err).NotTo(HaveOccurred(), "unable to dump DaemonSets")
|
||||
if daemonsets != nil && len(daemonsets.Items) > 0 {
|
||||
for _, ds := range daemonsets.Items {
|
||||
By(fmt.Sprintf("Deleting DaemonSet %q with reaper", ds.Name))
|
||||
dsReaper, err := kubectl.ReaperFor(extensionsinternal.Kind("DaemonSet"), f.InternalClientset)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = dsReaper.Stop(f.Namespace.Name, ds.Name, 0, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By(fmt.Sprintf("Deleting DaemonSet %q", ds.Name))
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, extensionsinternal.Kind("DaemonSet"), f.Namespace.Name, ds.Name))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
|
||||
}
|
||||
}
|
||||
if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("daemonset: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), daemonsets))
|
||||
framework.Logf("daemonset: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets))
|
||||
} else {
|
||||
framework.Logf("unable to dump daemonsets: %v", err)
|
||||
}
|
||||
if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("pods: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), pods))
|
||||
framework.Logf("pods: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Scheme.PrioritizedVersionsAllGroups()...), pods))
|
||||
} else {
|
||||
framework.Logf("unable to dump pods: %v", err)
|
||||
}
|
||||
@ -108,7 +104,12 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should run and stop simple daemon", func() {
|
||||
/*
|
||||
Testname: DaemonSet-Creation
|
||||
Description: A conformant Kubernetes distribution MUST support the creation of DaemonSets. When a DaemonSet
|
||||
Pod is deleted, the DaemonSet controller MUST create a replacement Pod.
|
||||
*/
|
||||
framework.ConformanceIt("should run and stop simple daemon", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
|
||||
@ -130,7 +131,12 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
|
||||
})
|
||||
|
||||
It("should run and stop complex daemon", func() {
|
||||
/*
|
||||
Testname: DaemonSet-NodeSelection
|
||||
Description: A conformant Kubernetes distribution MUST support DaemonSet Pod node selection via label
|
||||
selectors.
|
||||
*/
|
||||
framework.ConformanceIt("should run and stop complex daemon", func() {
|
||||
complexLabel := map[string]string{daemonsetNameLabel: dsName}
|
||||
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
|
||||
framework.Logf("Creating daemon %q with a node selector", dsName)
|
||||
@ -175,6 +181,8 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// We defer adding this test to conformance pending the disposition of moving DaemonSet scheduling logic to the
|
||||
// default scheduler.
|
||||
It("should run and stop complex daemon with node affinity", func() {
|
||||
complexLabel := map[string]string{daemonsetNameLabel: dsName}
|
||||
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
|
||||
@ -223,7 +231,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
|
||||
})
|
||||
|
||||
It("should retry creating failed daemon pods", func() {
|
||||
/*
|
||||
Testname: DaemonSet-FailedPodCreation
|
||||
Description: A conformant Kubernetes distribution MUST create new DaemonSet Pods when they fail.
|
||||
*/
|
||||
framework.ConformanceIt("should retry creating failed daemon pods", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
|
||||
@ -245,13 +257,21 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
Expect(err).NotTo(HaveOccurred(), "error failing a daemon pod")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
|
||||
|
||||
By("Wait for the failed daemon pod to be completely deleted.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, waitFailedDaemonPodDeleted(c, &pod))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for the failed daemon pod to be completely deleted")
|
||||
})
|
||||
|
||||
It("Should not update pod when spec was updated and update strategy is OnDelete", func() {
|
||||
// This test should not be added to conformance. We will consider deprecating OnDelete when the
|
||||
// extensions/v1beta1 and apps/v1beta1 are removed.
|
||||
It("should not update pod when spec was updated and update strategy is OnDelete", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
framework.Logf("Creating simple daemon set %s", dsName)
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.OnDeleteDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
@ -290,11 +310,14 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash)
|
||||
})
|
||||
|
||||
It("Should update pod when spec was updated and update strategy is RollingUpdate", func() {
|
||||
/*
|
||||
Testname: DaemonSet-RollingUpdate
|
||||
Description: A conformant Kubernetes distribution MUST support DaemonSet RollingUpdates.
|
||||
*/
|
||||
framework.ConformanceIt("should update pod when spec was updated and update strategy is RollingUpdate", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
templateGeneration := int64(999)
|
||||
framework.Logf("Creating simple daemon set %s with templateGeneration %d", dsName, templateGeneration)
|
||||
framework.Logf("Creating simple daemon set %s", dsName)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
@ -304,10 +327,6 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
By(fmt.Sprintf("Make sure all daemon pods have correct template generation %d", templateGeneration))
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -321,16 +340,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
templateGeneration++
|
||||
|
||||
By("Check that daemon pods images are updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Make sure all daemon pods have correct template generation %d", templateGeneration))
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
@ -345,7 +359,12 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash)
|
||||
})
|
||||
|
||||
It("Should rollback without unnecessary restarts", func() {
|
||||
/*
|
||||
Testname: DaemonSet-Rollback
|
||||
Description: A conformant Kubernetes distribution MUST support automated, minimally disruptive
|
||||
rollback of updates to a DaemonSet.
|
||||
*/
|
||||
framework.ConformanceIt("should rollback without unnecessary restarts", func() {
|
||||
// Skip clusters with only one node, where we cannot have half-done DaemonSet rollout for this test
|
||||
framework.SkipUnlessNodeCountIsAtLeast(2)
|
||||
|
||||
@ -416,29 +435,15 @@ func getDaemonSetImagePatch(containerName, containerImage string) string {
|
||||
return fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","image":"%s"}]}}}}`, containerName, containerImage)
|
||||
}
|
||||
|
||||
// deleteDaemonSetAndOrphan deletes the given DaemonSet and orphans all its dependents.
|
||||
// It also checks that all dependents are orphaned, and the DaemonSet is deleted.
|
||||
func deleteDaemonSetAndOrphan(c clientset.Interface, ds *apps.DaemonSet) {
|
||||
trueVar := true
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(ds.UID))
|
||||
err := c.AppsV1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be orphaned")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetHistoryOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet history to be orphaned")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetDeleted(c, ds.Namespace, ds.Name))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted")
|
||||
}
|
||||
|
||||
func newDaemonSet(dsName, image string, label map[string]string) *apps.DaemonSet {
|
||||
return &apps.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: dsName,
|
||||
},
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: label,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: label,
|
||||
@ -670,111 +675,6 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonS
|
||||
}
|
||||
}
|
||||
|
||||
func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label map[string]string, templateGeneration string) error {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
for _, pod := range pods.Items {
|
||||
// We don't care about inactive pods
|
||||
if !controller.IsPodActive(&pod) {
|
||||
continue
|
||||
}
|
||||
podTemplateGeneration := pod.Labels[apps.DeprecatedTemplateGeneration]
|
||||
if podTemplateGeneration != templateGeneration {
|
||||
return fmt.Errorf("expected pod %s/%s template generation %s, but got %s", pod.Namespace, pod.Name, templateGeneration, podTemplateGeneration)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDaemonSetDeleted(c clientset.Interface, ns, name string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
_, err := c.AppsV1().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||
if !apierrs.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkDaemonSetPodsOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
for _, pod := range pods.Items {
|
||||
// This pod is orphaned only when controller ref is cleared
|
||||
if controllerRef := metav1.GetControllerOf(&pod); controllerRef != nil {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkDaemonSetHistoryOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
histories := listDaemonHistories(c, ns, label)
|
||||
for _, history := range histories.Items {
|
||||
// This history is orphaned only when controller ref is cleared
|
||||
if controllerRef := metav1.GetControllerOf(&history); controllerRef != nil {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkDaemonSetPodsAdopted(c clientset.Interface, ns string, dsUID types.UID, label map[string]string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
for _, pod := range pods.Items {
|
||||
// This pod is adopted only when its controller ref is update
|
||||
if controllerRef := metav1.GetControllerOf(&pod); controllerRef == nil || controllerRef.UID != dsUID {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkDaemonSetHistoryAdopted(c clientset.Interface, ns string, dsUID types.UID, label map[string]string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
histories := listDaemonHistories(c, ns, label)
|
||||
for _, history := range histories.Items {
|
||||
// This history is adopted only when its controller ref is update
|
||||
if controllerRef := metav1.GetControllerOf(&history); controllerRef == nil || controllerRef.UID != dsUID {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func waitDaemonSetAdoption(c clientset.Interface, ds *apps.DaemonSet, podPrefix string, podTemplateGeneration int64) {
|
||||
ns := ds.Namespace
|
||||
label := ds.Spec.Template.Labels
|
||||
|
||||
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsAdopted(c, ns, ds.UID, label))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be adopted")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetHistoryAdopted(c, ns, ds.UID, label))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet history to be adopted")
|
||||
|
||||
framework.Logf("Make sure no daemon pod updated its template generation %d", podTemplateGeneration)
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(podTemplateGeneration))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Make sure no pods are recreated by looking at their names")
|
||||
err = checkDaemonSetPodsName(c, ns, podPrefix, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func checkDaemonSetPodsName(c clientset.Interface, ns, prefix string, label map[string]string) error {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
for _, pod := range pods.Items {
|
||||
if !strings.HasPrefix(pod.Name, prefix) {
|
||||
return fmt.Errorf("expected pod %s name to be prefixed %q", pod.Name, prefix)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDaemonSetPodsLabels(podList *v1.PodList, hash string) {
|
||||
for _, pod := range podList.Items {
|
||||
podHash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
@ -830,3 +730,15 @@ func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *a
|
||||
Expect(curHistory).NotTo(BeNil())
|
||||
return curHistory
|
||||
}
|
||||
|
||||
func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
if _, err := c.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to get failed daemon pod %q: %v", pod.Name, err)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
163
vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go
generated
vendored
163
vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -35,10 +36,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutil "k8s.io/kubernetes/test/utils"
|
||||
@ -50,7 +49,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
nilRs *extensions.ReplicaSet
|
||||
nilRs *apps.ReplicaSet
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Deployment", func() {
|
||||
@ -100,7 +99,7 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
})
|
||||
|
||||
func failureTrap(c clientset.Interface, ns string) {
|
||||
deployments, err := c.ExtensionsV1beta1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
deployments, err := c.AppsV1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
framework.Logf("Could not list Deployments in namespace %q: %v", ns, err)
|
||||
return
|
||||
@ -109,7 +108,7 @@ func failureTrap(c clientset.Interface, ns string) {
|
||||
d := deployments.Items[i]
|
||||
|
||||
framework.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d))
|
||||
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.ExtensionsV1beta1())
|
||||
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.AppsV1())
|
||||
if err != nil {
|
||||
framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err)
|
||||
return
|
||||
@ -126,7 +125,7 @@ func failureTrap(c clientset.Interface, ns string) {
|
||||
return
|
||||
}
|
||||
framework.Logf("Log out all the ReplicaSets if there is no deployment created")
|
||||
rss, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
rss, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err)
|
||||
return
|
||||
@ -158,27 +157,22 @@ func newDeploymentRollback(name string, annotations map[string]string, revision
|
||||
}
|
||||
}
|
||||
|
||||
func stopDeployment(c clientset.Interface, internalClient internalclientset.Interface, ns, deploymentName string) {
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
func stopDeployment(c clientset.Interface, ns, deploymentName string) {
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Deleting deployment %s", deploymentName)
|
||||
reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("Deployment"), internalClient)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
timeout := 1 * time.Minute
|
||||
|
||||
err = reaper.Stop(ns, deployment.Name, timeout, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name))
|
||||
|
||||
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
|
||||
_, err = c.ExtensionsV1beta1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
_, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(errors.IsNotFound(err)).To(BeTrue())
|
||||
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rss, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(options)
|
||||
rss, err := c.AppsV1().ReplicaSets(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(rss.Items).Should(HaveLen(0))
|
||||
framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
|
||||
@ -201,15 +195,14 @@ func stopDeployment(c clientset.Interface, internalClient internalclientset.Inte
|
||||
func testDeleteDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
internalClient := f.InternalClientset
|
||||
|
||||
deploymentName := "test-new-deployment"
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(1)
|
||||
framework.Logf("Creating simple deployment %s", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
|
||||
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
@ -219,12 +212,12 @@ func testDeleteDeployment(f *framework.Framework) {
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newRS).NotTo(Equal(nilRs))
|
||||
stopDeployment(c, internalClient, ns, deploymentName)
|
||||
stopDeployment(c, ns, deploymentName)
|
||||
}
|
||||
|
||||
func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
@ -245,7 +238,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
rs := newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage)
|
||||
rs.Annotations = annotations
|
||||
framework.Logf("Creating replica set %q (going to be adopted)", rs.Name)
|
||||
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(rs)
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
|
||||
@ -254,8 +247,8 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
deploymentName := "test-rolling-update-deployment"
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 3546343826724305833.
|
||||
@ -269,15 +262,11 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
|
||||
// There should be 1 old RS (nginx-controller, which is adopted)
|
||||
framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1())
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(allOldRSs)).Should(Equal(1))
|
||||
// The old RS should contain pod-template-hash in its selector, label, and template label
|
||||
Expect(len(allOldRSs[0].Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
|
||||
Expect(len(allOldRSs[0].Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
|
||||
Expect(len(allOldRSs[0].Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
|
||||
}
|
||||
|
||||
func testRecreateDeployment(f *framework.Framework) {
|
||||
@ -287,8 +276,8 @@ func testRecreateDeployment(f *framework.Framework) {
|
||||
// Create a deployment that brings up redis pods.
|
||||
deploymentName := "test-recreate-deployment"
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, extensions.RecreateDeploymentStrategyType)
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, apps.RecreateDeploymentStrategyType)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
@ -301,7 +290,7 @@ func testRecreateDeployment(f *framework.Framework) {
|
||||
|
||||
// Update deployment to delete redis pods and bring up nginx pods.
|
||||
framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *extensions.Deployment) {
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = NginxImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = NginxImage
|
||||
})
|
||||
@ -324,7 +313,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
rsName := "test-cleanup-controller"
|
||||
replicas := int32(1)
|
||||
revisionHistoryLimit := utilpointer.Int32Ptr(0)
|
||||
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
@ -371,9 +360,9 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
}
|
||||
}
|
||||
}()
|
||||
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
|
||||
_, err = c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
_, err = c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
|
||||
@ -395,7 +384,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
|
||||
rsName := "test-rollover-controller"
|
||||
rsReplicas := int32(1)
|
||||
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
|
||||
@ -410,19 +399,19 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
deploymentName, deploymentImageName := "test-rollover-deployment", "redis-slave"
|
||||
deploymentReplicas := int32(1)
|
||||
deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent"
|
||||
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
|
||||
deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
newDeployment := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
|
||||
newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{
|
||||
newDeployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{
|
||||
MaxUnavailable: intOrStrP(0),
|
||||
MaxSurge: intOrStrP(1),
|
||||
}
|
||||
newDeployment.Spec.MinReadySeconds = int32(10)
|
||||
_, err = c.ExtensionsV1beta1().Deployments(ns).Create(newDeployment)
|
||||
_, err = c.AppsV1().Deployments(ns).Create(newDeployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify that the pods were scaled up and down as expected.
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
|
||||
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
|
||||
@ -433,17 +422,17 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensure that both replica sets have 1 created replica")
|
||||
oldRS, err := c.ExtensionsV1beta1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
oldRS, err := c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ensureReplicas(oldRS, int32(1))
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ensureReplicas(newRS, int32(1))
|
||||
|
||||
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods.
|
||||
framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
|
||||
updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *extensions.Deployment) {
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
})
|
||||
@ -464,16 +453,16 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensure that both old replica sets have no replicas")
|
||||
oldRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
oldRS, err = c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ensureReplicas(oldRS, int32(0))
|
||||
// Not really the new replica set anymore but we GET by name so that's fine.
|
||||
newRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
|
||||
newRS, err = c.AppsV1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ensureReplicas(newRS, int32(0))
|
||||
}
|
||||
|
||||
func ensureReplicas(rs *extensions.ReplicaSet, replicas int32) {
|
||||
func ensureReplicas(rs *apps.ReplicaSet, replicas int32) {
|
||||
Expect(*rs.Spec.Replicas).Should(Equal(replicas))
|
||||
Expect(rs.Status.Replicas).Should(Equal(replicas))
|
||||
}
|
||||
@ -493,12 +482,12 @@ func testRollbackDeployment(f *framework.Framework) {
|
||||
deploymentName, deploymentImageName := "test-rollback-deployment", NginxImageName
|
||||
deploymentReplicas := int32(1)
|
||||
deploymentImage := NginxImage
|
||||
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
|
||||
deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType
|
||||
framework.Logf("Creating deployment %s", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
|
||||
createAnnotation := map[string]string{"action": "create", "author": "node"}
|
||||
d.Annotations = createAnnotation
|
||||
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
@ -516,7 +505,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
||||
updatedDeploymentImage := RedisImage
|
||||
updatedDeploymentImageName := RedisImageName
|
||||
updateAnnotation := map[string]string{"action": "update", "log": "I need to update it"}
|
||||
deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
|
||||
deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
update.Annotations = updateAnnotation
|
||||
@ -619,7 +608,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func randomScale(d *extensions.Deployment, i int) {
|
||||
func randomScale(d *apps.Deployment, i int) {
|
||||
switch r := rand.Float32(); {
|
||||
case r < 0.3:
|
||||
framework.Logf("%02d: scaling up", i)
|
||||
@ -644,12 +633,12 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
// Create a nginx deployment.
|
||||
deploymentName := "nginx"
|
||||
thirty := int32(30)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.ProgressDeadlineSeconds = &thirty
|
||||
d.Spec.RevisionHistoryLimit = &two
|
||||
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
iterations := 20
|
||||
@ -662,7 +651,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
case n < 0.2:
|
||||
// trigger a new deployment
|
||||
framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
|
||||
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
|
||||
randomScale(update, i)
|
||||
@ -672,16 +661,18 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
case n < 0.4:
|
||||
// rollback to the previous version
|
||||
framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
rollbackTo := &extensions.RollbackConfig{Revision: 0}
|
||||
update.Spec.RollbackTo = rollbackTo
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
if update.Annotations == nil {
|
||||
update.Annotations = make(map[string]string)
|
||||
}
|
||||
update.Annotations[apps.DeprecatedRollbackTo] = "0"
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
case n < 0.6:
|
||||
// just scaling
|
||||
framework.Logf("%02d: scaling deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
randomScale(update, i)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -690,14 +681,14 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
// toggling the deployment
|
||||
if deployment.Spec.Paused {
|
||||
framework.Logf("%02d: pausing deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Paused = true
|
||||
randomScale(update, i)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
} else {
|
||||
framework.Logf("%02d: resuming deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
randomScale(update, i)
|
||||
})
|
||||
@ -731,10 +722,10 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
}
|
||||
|
||||
// unpause the deployment if we end up pausing it
|
||||
deployment, err = c.ExtensionsV1beta1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
deployment, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if deployment.Spec.Paused {
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
})
|
||||
}
|
||||
@ -746,7 +737,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Checking deployment %q for a complete condition", deploymentName)
|
||||
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred())
|
||||
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
@ -757,8 +748,8 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
framework.Logf("Creating Deployment %q", deploymentName)
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(1)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -784,8 +775,8 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
|
||||
deploymentName = "test-adopt-deployment"
|
||||
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
|
||||
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err = c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err = c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -814,13 +805,13 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
|
||||
// Create a nginx deployment.
|
||||
deploymentName := "nginx-deployment"
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.Strategy.RollingUpdate = new(extensions.RollingUpdateDeployment)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.Strategy.RollingUpdate = new(apps.RollingUpdateDeployment)
|
||||
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
|
||||
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
|
||||
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
@ -834,13 +825,13 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
framework.Logf("Waiting for deployment %q to complete", deployment.Name)
|
||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
||||
|
||||
firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Update the deployment with a non-existent image so that the new replica set
|
||||
// will be blocked to simulate a partial rollout.
|
||||
framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -863,13 +854,13 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
|
||||
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForReplicaSetDesiredReplicas(c.ExtensionsV1beta1(), firstRS)
|
||||
err = framework.WaitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Checking state of second rollout's replicaset.
|
||||
secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
maxSurge, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), false)
|
||||
@ -886,9 +877,9 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
secondRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
|
||||
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForReplicaSetDesiredReplicas(c.ExtensionsV1beta1(), secondRS)
|
||||
err = framework.WaitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check the deployment's minimum availability.
|
||||
@ -900,15 +891,15 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
// Scale the deployment to 30 replicas.
|
||||
newReplicas = int32(30)
|
||||
framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName)
|
||||
firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
|
||||
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
secondRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
|
||||
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas.
|
||||
@ -946,18 +937,18 @@ func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label m
|
||||
}
|
||||
}
|
||||
|
||||
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *extensions.ReplicaSetList {
|
||||
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *apps.ReplicaSetList {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rsList, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(options)
|
||||
rsList, err := c.AppsV1().ReplicaSets(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(rsList.Items)).To(BeNumerically(">", 0))
|
||||
return rsList
|
||||
}
|
||||
|
||||
func orphanDeploymentReplicaSets(c clientset.Interface, d *extensions.Deployment) error {
|
||||
func orphanDeploymentReplicaSets(c clientset.Interface, d *apps.Deployment) error {
|
||||
trueVar := true
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID))
|
||||
return c.ExtensionsV1beta1().Deployments(d.Namespace).Delete(d.Name, deleteOptions)
|
||||
return c.AppsV1().Deployments(d.Namespace).Delete(d.Name, deleteOptions)
|
||||
}
|
||||
|
25
vendor/k8s.io/kubernetes/test/e2e/apps/job.go
generated
vendored
25
vendor/k8s.io/kubernetes/test/e2e/apps/job.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -111,11 +110,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("delete a job")
|
||||
reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
timeout := 1 * time.Minute
|
||||
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
|
||||
|
||||
By("Ensuring job was deleted")
|
||||
_, err = framework.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
@ -179,7 +174,8 @@ var _ = SIGDescribe("Job", func() {
|
||||
|
||||
It("should exceed backoffLimit", func() {
|
||||
By("Creating a job")
|
||||
job := framework.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, 0)
|
||||
backoff := 1
|
||||
job := framework.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, int32(backoff))
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("Ensuring job exceed backofflimit")
|
||||
@ -187,11 +183,18 @@ var _ = SIGDescribe("Job", func() {
|
||||
err = framework.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, framework.JobTimeout, "BackoffLimitExceeded")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking that only one pod created and status is failed")
|
||||
By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1))
|
||||
pods, err := framework.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(pods.Items).To(HaveLen(1))
|
||||
pod := pods.Items[0]
|
||||
Expect(pod.Status.Phase).To(Equal(v1.PodFailed))
|
||||
// Expect(pods.Items).To(HaveLen(backoff + 1))
|
||||
// due to NumRequeus not being stable enough, especially with failed status
|
||||
// updates we need to allow more than backoff+1
|
||||
// TODO revert this back to above when https://github.com/kubernetes/kubernetes/issues/64787 gets fixed
|
||||
if len(pods.Items) < backoff+1 {
|
||||
framework.Failf("Not enough pod created expected at least %d, got %#v", backoff+1, pods.Items)
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
Expect(pod.Status.Phase).To(Equal(v1.PodFailed))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
19
vendor/k8s.io/kubernetes/test/e2e/apps/network_partition.go
generated
vendored
19
vendor/k8s.io/kubernetes/test/e2e/apps/network_partition.go
generated
vendored
@ -233,9 +233,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
||||
name := "my-hostname-net"
|
||||
common.NewSVCByName(c, ns, name)
|
||||
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
|
||||
numNodes, err := framework.NumberOfRegisteredNodes(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
replicas := int32(numNodes)
|
||||
common.NewRCByName(c, ns, name, replicas, nil)
|
||||
err := framework.VerifyPods(c, ns, name, true, replicas)
|
||||
err = framework.VerifyPods(c, ns, name, true, replicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
|
||||
|
||||
By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
@ -298,9 +300,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
gracePeriod := int64(30)
|
||||
|
||||
common.NewSVCByName(c, ns, name)
|
||||
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
|
||||
numNodes, err := framework.NumberOfRegisteredNodes(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
replicas := int32(numNodes)
|
||||
common.NewRCByName(c, ns, name, replicas, &gracePeriod)
|
||||
err := framework.VerifyPods(c, ns, name, true, replicas)
|
||||
err = framework.VerifyPods(c, ns, name, true, replicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
|
||||
|
||||
By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
@ -371,10 +375,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
|
||||
pst := framework.NewStatefulSetTester(c)
|
||||
|
||||
nn := framework.TestContext.CloudConfig.NumNodes
|
||||
nodeNames, err := framework.CheckNodesReady(f.ClientSet, framework.NodeReadyInitialTimeout, nn)
|
||||
nn, err := framework.NumberOfRegisteredNodes(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
common.RestartNodes(f.ClientSet, nodeNames)
|
||||
nodes, err := framework.CheckNodesReady(f.ClientSet, nn, framework.NodeReadyInitialTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
common.RestartNodes(f.ClientSet, nodes)
|
||||
|
||||
By("waiting for pods to be running again")
|
||||
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e/apps/rc.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/apps/rc.go
generated
vendored
@ -38,6 +38,12 @@ import (
|
||||
var _ = SIGDescribe("ReplicationController", func() {
|
||||
f := framework.NewDefaultFramework("replication-controller")
|
||||
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Replication Controller, run basic image
|
||||
Description: Replication Controller MUST create a Pod with Basic Image and MUST run the service with the provided image. Image MUST be tested by dialing into the service listening through TCP, UDP and HTTP.
|
||||
*/
|
||||
|
||||
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func() {
|
||||
TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage)
|
||||
})
|
||||
|
35
vendor/k8s.io/kubernetes/test/e2e/apps/replica_set.go
generated
vendored
35
vendor/k8s.io/kubernetes/test/e2e/apps/replica_set.go
generated
vendored
@ -20,8 +20,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -36,13 +36,17 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet {
|
||||
func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *apps.ReplicaSet {
|
||||
zero := int64(0)
|
||||
return &extensions.ReplicaSet{
|
||||
return &apps.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rsName,
|
||||
Name: rsName,
|
||||
Labels: rsPodLabels,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: rsPodLabels,
|
||||
},
|
||||
Replicas: &replicas,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -78,6 +82,11 @@ func newPodQuota(name, number string) *v1.ResourceQuota {
|
||||
var _ = SIGDescribe("ReplicaSet", func() {
|
||||
f := framework.NewDefaultFramework("replicaset")
|
||||
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Replica Set, run basic image
|
||||
Description: Create a ReplicaSet with a Pod and a single Container. Make sure that the Pod is running. Pod SHOULD send a valid response when queried.
|
||||
*/
|
||||
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func() {
|
||||
testReplicaSetServeImageOrFail(f, "basic", framework.ServeHostnameImage)
|
||||
})
|
||||
@ -111,7 +120,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
||||
framework.Logf("Creating ReplicaSet %s", name)
|
||||
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image)
|
||||
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
|
||||
_, err := f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).Create(newRS)
|
||||
_, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(newRS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check that pods for the new RS were created.
|
||||
@ -187,14 +196,14 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
||||
|
||||
By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name))
|
||||
rs := newRS(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
|
||||
rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Create(rs)
|
||||
rs, err = c.AppsV1().ReplicaSets(namespace).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name))
|
||||
generation := rs.Generation
|
||||
conditions := rs.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
|
||||
rs, err = c.AppsV1().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -204,7 +213,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
||||
}
|
||||
conditions = rs.Status.Conditions
|
||||
|
||||
cond := replicaset.GetCondition(rs.Status, extensions.ReplicaSetReplicaFailure)
|
||||
cond := replicaset.GetCondition(rs.Status, apps.ReplicaSetReplicaFailure)
|
||||
return cond != nil, nil
|
||||
|
||||
})
|
||||
@ -214,7 +223,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name))
|
||||
rs, err = framework.UpdateReplicaSetWithRetries(c, namespace, name, func(update *extensions.ReplicaSet) {
|
||||
rs, err = framework.UpdateReplicaSetWithRetries(c, namespace, name, func(update *apps.ReplicaSet) {
|
||||
x := int32(2)
|
||||
update.Spec.Replicas = &x
|
||||
})
|
||||
@ -224,7 +233,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
||||
generation = rs.Generation
|
||||
conditions = rs.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
|
||||
rs, err = c.AppsV1().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -234,7 +243,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
||||
}
|
||||
conditions = rs.Status.Conditions
|
||||
|
||||
cond := replicaset.GetCondition(rs.Status, extensions.ReplicaSetReplicaFailure)
|
||||
cond := replicaset.GetCondition(rs.Status, apps.ReplicaSetReplicaFailure)
|
||||
return cond == nil, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
@ -267,7 +276,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
||||
replicas := int32(1)
|
||||
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, NginxImageName)
|
||||
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
|
||||
rs, err := f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).Create(rsSt)
|
||||
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(rsSt)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Then the orphan pod is adopted")
|
||||
|
38
vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go
generated
vendored
38
vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go
generated
vendored
@ -249,12 +249,9 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: StatefulSet-RollingUpdate
|
||||
Description: StatefulSet MUST support the RollingUpdate strategy to automatically replace Pods
|
||||
one at a time when the Pod template changes. The StatefulSet's status MUST indicate the
|
||||
CurrentRevision and UpdateRevision. If the template is changed to match a prior revision,
|
||||
StatefulSet MUST detect this as a rollback instead of creating a new revision.
|
||||
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
Release : v1.9
|
||||
Testname: StatefulSet, Rolling Update
|
||||
Description: StatefulSet MUST support the RollingUpdate strategy to automatically replace Pods one at a time when the Pod template changes. The StatefulSet's status MUST indicate the CurrentRevision and UpdateRevision. If the template is changed to match a prior revision, StatefulSet MUST detect this as a rollback instead of creating a new revision. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
*/
|
||||
framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() {
|
||||
By("Creating a new StatefulSet")
|
||||
@ -372,11 +369,9 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: StatefulSet-RollingUpdatePartition
|
||||
Description: StatefulSet's RollingUpdate strategy MUST support the Partition parameter for
|
||||
canaries and phased rollouts. If a Pod is deleted while a rolling update is in progress,
|
||||
StatefulSet MUST restore the Pod without violating the Partition.
|
||||
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
Release : v1.9
|
||||
Testname: StatefulSet, Rolling Update with Partition
|
||||
Description: StatefulSet's RollingUpdate strategy MUST support the Partition parameter for canaries and phased rollouts. If a Pod is deleted while a rolling update is in progress, StatefulSet MUST restore the Pod without violating the Partition. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
*/
|
||||
framework.ConformanceIt("should perform canary updates and phased rolling updates of template modifications", func() {
|
||||
By("Creating a new StaefulSet")
|
||||
@ -670,11 +665,9 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: StatefulSet-Scaling
|
||||
Description: StatefulSet MUST create Pods in ascending order by ordinal index when scaling up,
|
||||
and delete Pods in descending order when scaling down. Scaling up or down MUST pause if any
|
||||
Pods belonging to the StatefulSet are unhealthy.
|
||||
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
Release : v1.9
|
||||
Testname: StatefulSet, Scaling
|
||||
Description: StatefulSet MUST create Pods in ascending order by ordinal index when scaling up, and delete Pods in descending order when scaling down. Scaling up or down MUST pause if any Pods belonging to the StatefulSet are unhealthy. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
*/
|
||||
framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", func() {
|
||||
psLabels := klabels.Set(labels)
|
||||
@ -753,9 +746,9 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: StatefulSet-BurstScaling
|
||||
Description: StatefulSet MUST support the Parallel PodManagementPolicy for burst scaling.
|
||||
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
Release : v1.9
|
||||
Testname: StatefulSet, Burst Scaling
|
||||
Description: StatefulSet MUST support the Parallel PodManagementPolicy for burst scaling. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
*/
|
||||
framework.ConformanceIt("Burst scaling should run to completion even with unhealthy pods", func() {
|
||||
psLabels := klabels.Set(labels)
|
||||
@ -796,10 +789,9 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: StatefulSet-RecreateFailedPod
|
||||
Description: StatefulSet MUST delete and recreate Pods it owns that go into a Failed state,
|
||||
such as when they are rejected or evicted by a Node.
|
||||
This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
Release : v1.9
|
||||
Testname: StatefulSet, Recreate Failed Pod
|
||||
Description: StatefulSet MUST delete and recreate Pods it owns that go into a Failed state, such as when they are rejected or evicted by a Node. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||
*/
|
||||
framework.ConformanceIt("Should recreate evicted statefulset", func() {
|
||||
podName := "test-pod"
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e/auth/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e/auth/BUILD
generated
vendored
@ -29,6 +29,7 @@ go_library(
|
||||
"//vendor/github.com/evanphx/json-patch:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
|
258
vendor/k8s.io/kubernetes/test/e2e/auth/audit.go
generated
vendored
258
vendor/k8s.io/kubernetes/test/e2e/auth/audit.go
generated
vendored
@ -21,21 +21,24 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
apiextensionclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/testserver"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/apis/audit/v1beta1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/evanphx/json-patch"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -62,9 +65,19 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err, "failed to load config")
|
||||
apiExtensionClient, err := clientset.NewForConfig(config)
|
||||
apiExtensionClient, err := apiextensionclientset.NewForConfig(config)
|
||||
framework.ExpectNoError(err, "failed to initialize apiExtensionClient")
|
||||
|
||||
By("Creating a kubernetes client that impersonates an unauthorized anonymous user")
|
||||
config, err = framework.LoadConfig()
|
||||
framework.ExpectNoError(err)
|
||||
config.Impersonate = restclient.ImpersonationConfig{
|
||||
UserName: "system:anonymous",
|
||||
Groups: []string{"system:unauthenticated"},
|
||||
}
|
||||
anonymousClient, err := clientset.NewForConfig(config)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testCases := []struct {
|
||||
action func()
|
||||
events []auditEvent
|
||||
@ -79,7 +92,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
Spec: apiv1.PodSpec{
|
||||
Containers: []apiv1.Container{{
|
||||
Name: "pause",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
}},
|
||||
},
|
||||
}
|
||||
@ -117,6 +130,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
true,
|
||||
true,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelRequest,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -128,6 +142,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelRequest,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -139,6 +154,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelRequest,
|
||||
v1beta1.StageResponseStarted,
|
||||
@ -150,6 +166,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelRequest,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -161,6 +178,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelRequestResponse,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -172,6 +190,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
true,
|
||||
true,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelRequestResponse,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -183,6 +202,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
true,
|
||||
true,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelRequestResponse,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -194,6 +214,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
true,
|
||||
true,
|
||||
"allow",
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -201,36 +222,36 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
{
|
||||
func() {
|
||||
podLabels := map[string]string{"name": "audit-deployment-pod"}
|
||||
d := framework.NewDeployment("audit-deployment", int32(1), podLabels, "redis", imageutils.GetE2EImage(imageutils.Redis), extensions.RecreateDeploymentStrategyType)
|
||||
d := framework.NewDeployment("audit-deployment", int32(1), podLabels, "redis", imageutils.GetE2EImage(imageutils.Redis), apps.RecreateDeploymentStrategyType)
|
||||
|
||||
_, err := f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Create(d)
|
||||
_, err := f.ClientSet.AppsV1().Deployments(namespace).Create(d)
|
||||
framework.ExpectNoError(err, "failed to create audit-deployment")
|
||||
|
||||
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Get(d.Name, metav1.GetOptions{})
|
||||
_, err = f.ClientSet.AppsV1().Deployments(namespace).Get(d.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to get audit-deployment")
|
||||
|
||||
deploymentChan, err := f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Watch(watchOptions)
|
||||
deploymentChan, err := f.ClientSet.AppsV1().Deployments(namespace).Watch(watchOptions)
|
||||
framework.ExpectNoError(err, "failed to create watch for deployments")
|
||||
for range deploymentChan.ResultChan() {
|
||||
}
|
||||
|
||||
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Update(d)
|
||||
_, err = f.ClientSet.AppsV1().Deployments(namespace).Update(d)
|
||||
framework.ExpectNoError(err, "failed to update audit-deployment")
|
||||
|
||||
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Patch(d.Name, types.JSONPatchType, patch)
|
||||
_, err = f.ClientSet.AppsV1().Deployments(namespace).Patch(d.Name, types.JSONPatchType, patch)
|
||||
framework.ExpectNoError(err, "failed to patch deployment")
|
||||
|
||||
_, err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).List(metav1.ListOptions{})
|
||||
_, err = f.ClientSet.AppsV1().Deployments(namespace).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "failed to create list deployments")
|
||||
|
||||
err = f.ClientSet.ExtensionsV1beta1().Deployments(namespace).Delete("audit-deployment", &metav1.DeleteOptions{})
|
||||
err = f.ClientSet.AppsV1().Deployments(namespace).Delete("audit-deployment", &metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "failed to delete deployments")
|
||||
},
|
||||
[]auditEvent{
|
||||
{
|
||||
v1beta1.LevelRequestResponse,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments", namespace),
|
||||
fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments", namespace),
|
||||
"create",
|
||||
201,
|
||||
auditTestUser,
|
||||
@ -238,10 +259,11 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
true,
|
||||
true,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelRequest,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments/audit-deployment", namespace),
|
||||
fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments/audit-deployment", namespace),
|
||||
"get",
|
||||
200,
|
||||
auditTestUser,
|
||||
@ -249,10 +271,11 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelRequest,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments", namespace),
|
||||
fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments", namespace),
|
||||
"list",
|
||||
200,
|
||||
auditTestUser,
|
||||
@ -260,10 +283,11 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelRequest,
|
||||
v1beta1.StageResponseStarted,
|
||||
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
|
||||
fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
|
||||
"watch",
|
||||
200,
|
||||
auditTestUser,
|
||||
@ -271,10 +295,11 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelRequest,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
|
||||
fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
|
||||
"watch",
|
||||
200,
|
||||
auditTestUser,
|
||||
@ -282,10 +307,11 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelRequestResponse,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments/audit-deployment", namespace),
|
||||
fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments/audit-deployment", namespace),
|
||||
"update",
|
||||
200,
|
||||
auditTestUser,
|
||||
@ -293,10 +319,11 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
true,
|
||||
true,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelRequestResponse,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments/audit-deployment", namespace),
|
||||
fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments/audit-deployment", namespace),
|
||||
"patch",
|
||||
200,
|
||||
auditTestUser,
|
||||
@ -304,10 +331,11 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
true,
|
||||
true,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelRequestResponse,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments/audit-deployment", namespace),
|
||||
fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments/audit-deployment", namespace),
|
||||
"delete",
|
||||
200,
|
||||
auditTestUser,
|
||||
@ -315,6 +343,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
true,
|
||||
true,
|
||||
"allow",
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -365,6 +394,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -376,6 +406,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -387,6 +418,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseStarted,
|
||||
@ -398,6 +430,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -409,6 +442,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -420,6 +454,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -431,6 +466,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -442,6 +478,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -491,6 +528,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -502,6 +540,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -513,6 +552,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseStarted,
|
||||
@ -524,6 +564,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -535,6 +576,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -546,6 +588,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -557,6 +600,7 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
@ -568,87 +612,137 @@ var _ = SIGDescribe("Advanced Audit", func() {
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
"allow",
|
||||
},
|
||||
},
|
||||
},
|
||||
// Create and delete custom resource definition.
|
||||
{
|
||||
func() {
|
||||
_, err = testserver.CreateNewCustomResourceDefinition(crd, apiExtensionClient, f.ClientPool)
|
||||
crd, err = testserver.CreateNewCustomResourceDefinition(crd, apiExtensionClient, f.DynamicClient)
|
||||
framework.ExpectNoError(err, "failed to create custom resource definition")
|
||||
testserver.DeleteCustomResourceDefinition(crd, apiExtensionClient)
|
||||
},
|
||||
[]auditEvent{
|
||||
{
|
||||
level: v1beta1.LevelRequestResponse,
|
||||
stage: v1beta1.StageResponseComplete,
|
||||
requestURI: "/apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions",
|
||||
verb: "create",
|
||||
code: 201,
|
||||
user: auditTestUser,
|
||||
resource: "customresourcedefinitions",
|
||||
requestObject: true,
|
||||
responseObject: true,
|
||||
level: v1beta1.LevelRequestResponse,
|
||||
stage: v1beta1.StageResponseComplete,
|
||||
requestURI: "/apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions",
|
||||
verb: "create",
|
||||
code: 201,
|
||||
user: auditTestUser,
|
||||
resource: "customresourcedefinitions",
|
||||
requestObject: true,
|
||||
responseObject: true,
|
||||
authorizeDecision: "allow",
|
||||
}, {
|
||||
level: v1beta1.LevelMetadata,
|
||||
stage: v1beta1.StageResponseComplete,
|
||||
requestURI: fmt.Sprintf("/apis/%s/v1beta1/%s", crdNamespace, crdName),
|
||||
verb: "create",
|
||||
code: 201,
|
||||
user: auditTestUser,
|
||||
resource: crdName,
|
||||
requestObject: false,
|
||||
responseObject: false,
|
||||
level: v1beta1.LevelMetadata,
|
||||
stage: v1beta1.StageResponseComplete,
|
||||
requestURI: fmt.Sprintf("/apis/%s/v1beta1/%s", crdNamespace, crdName),
|
||||
verb: "create",
|
||||
code: 201,
|
||||
user: auditTestUser,
|
||||
resource: crdName,
|
||||
requestObject: false,
|
||||
responseObject: false,
|
||||
authorizeDecision: "allow",
|
||||
}, {
|
||||
level: v1beta1.LevelRequestResponse,
|
||||
stage: v1beta1.StageResponseComplete,
|
||||
requestURI: fmt.Sprintf("/apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/%s", crd.Name),
|
||||
verb: "delete",
|
||||
code: 200,
|
||||
user: auditTestUser,
|
||||
resource: "customresourcedefinitions",
|
||||
requestObject: false,
|
||||
responseObject: true,
|
||||
level: v1beta1.LevelRequestResponse,
|
||||
stage: v1beta1.StageResponseComplete,
|
||||
requestURI: fmt.Sprintf("/apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/%s", crd.Name),
|
||||
verb: "delete",
|
||||
code: 200,
|
||||
user: auditTestUser,
|
||||
resource: "customresourcedefinitions",
|
||||
requestObject: false,
|
||||
responseObject: true,
|
||||
authorizeDecision: "allow",
|
||||
}, {
|
||||
level: v1beta1.LevelMetadata,
|
||||
stage: v1beta1.StageResponseComplete,
|
||||
requestURI: fmt.Sprintf("/apis/%s/v1beta1/%s/setup-instance", crdNamespace, crdName),
|
||||
verb: "delete",
|
||||
code: 200,
|
||||
user: auditTestUser,
|
||||
resource: crdName,
|
||||
requestObject: false,
|
||||
responseObject: false,
|
||||
level: v1beta1.LevelMetadata,
|
||||
stage: v1beta1.StageResponseComplete,
|
||||
requestURI: fmt.Sprintf("/apis/%s/v1beta1/%s/setup-instance", crdNamespace, crdName),
|
||||
verb: "delete",
|
||||
code: 200,
|
||||
user: auditTestUser,
|
||||
resource: crdName,
|
||||
requestObject: false,
|
||||
responseObject: false,
|
||||
authorizeDecision: "allow",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// test authorizer annotations, RBAC is required.
|
||||
annotationTestCases := []struct {
|
||||
action func()
|
||||
events []auditEvent
|
||||
}{
|
||||
|
||||
// get a pod with unauthorized user
|
||||
{
|
||||
func() {
|
||||
_, err := anonymousClient.CoreV1().Pods(namespace).Get("another-audit-pod", metav1.GetOptions{})
|
||||
expectForbidden(err)
|
||||
},
|
||||
[]auditEvent{
|
||||
{
|
||||
level: v1beta1.LevelRequest,
|
||||
stage: v1beta1.StageResponseComplete,
|
||||
requestURI: fmt.Sprintf("/api/v1/namespaces/%s/pods/another-audit-pod", namespace),
|
||||
verb: "get",
|
||||
code: 403,
|
||||
user: auditTestUser,
|
||||
resource: "pods",
|
||||
namespace: namespace,
|
||||
requestObject: false,
|
||||
responseObject: false,
|
||||
authorizeDecision: "forbid",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if framework.IsRBACEnabled(f) {
|
||||
testCases = append(testCases, annotationTestCases...)
|
||||
}
|
||||
expectedEvents := []auditEvent{}
|
||||
for _, t := range testCases {
|
||||
t.action()
|
||||
expectedEvents = append(expectedEvents, t.events...)
|
||||
}
|
||||
|
||||
expectAuditLines(f, expectedEvents)
|
||||
// The default flush timeout is 30 seconds, therefore it should be enough to retry once
|
||||
// to find all expected events. However, we're waiting for 5 minutes to avoid flakes.
|
||||
pollingInterval := 30 * time.Second
|
||||
pollingTimeout := 5 * time.Minute
|
||||
err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
|
||||
ok, err := checkAuditLines(f, expectedEvents)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to observe audit events: %v", err)
|
||||
}
|
||||
return ok, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "after %v failed to observe audit events", pollingTimeout)
|
||||
})
|
||||
})
|
||||
|
||||
type auditEvent struct {
|
||||
level v1beta1.Level
|
||||
stage v1beta1.Stage
|
||||
requestURI string
|
||||
verb string
|
||||
code int32
|
||||
user string
|
||||
resource string
|
||||
namespace string
|
||||
requestObject bool
|
||||
responseObject bool
|
||||
level v1beta1.Level
|
||||
stage v1beta1.Stage
|
||||
requestURI string
|
||||
verb string
|
||||
code int32
|
||||
user string
|
||||
resource string
|
||||
namespace string
|
||||
requestObject bool
|
||||
responseObject bool
|
||||
authorizeDecision string
|
||||
}
|
||||
|
||||
// Search the audit log for the expected audit lines.
|
||||
func expectAuditLines(f *framework.Framework, expected []auditEvent) {
|
||||
func checkAuditLines(f *framework.Framework, expected []auditEvent) (bool, error) {
|
||||
expectations := map[auditEvent]bool{}
|
||||
for _, event := range expected {
|
||||
expectations[event] = false
|
||||
@ -656,25 +750,36 @@ func expectAuditLines(f *framework.Framework, expected []auditEvent) {
|
||||
|
||||
// Fetch the log stream.
|
||||
stream, err := f.ClientSet.CoreV1().RESTClient().Get().AbsPath("/logs/kube-apiserver-audit.log").Stream()
|
||||
framework.ExpectNoError(err, "could not read audit log")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
scanner := bufio.NewScanner(stream)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
event, err := parseAuditLine(line)
|
||||
framework.ExpectNoError(err)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// If the event was expected, mark it as found.
|
||||
if _, found := expectations[event]; found {
|
||||
expectations[event] = true
|
||||
}
|
||||
}
|
||||
framework.ExpectNoError(scanner.Err(), "error reading audit log")
|
||||
|
||||
for event, found := range expectations {
|
||||
Expect(found).To(BeTrue(), "Event %#v not found!", event)
|
||||
if err := scanner.Err(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
noneMissing := true
|
||||
for event, found := range expectations {
|
||||
if !found {
|
||||
framework.Logf("Event %#v not found!", event)
|
||||
}
|
||||
noneMissing = noneMissing && found
|
||||
}
|
||||
return noneMissing, nil
|
||||
}
|
||||
|
||||
func parseAuditLine(line string) (auditEvent, error) {
|
||||
@ -702,5 +807,6 @@ func parseAuditLine(line string) (auditEvent, error) {
|
||||
if e.RequestObject != nil {
|
||||
event.requestObject = true
|
||||
}
|
||||
event.authorizeDecision = e.Annotations["authorization.k8s.io/decision"]
|
||||
return event, nil
|
||||
}
|
||||
|
31
vendor/k8s.io/kubernetes/test/e2e/auth/node_authz.go
generated
vendored
31
vendor/k8s.io/kubernetes/test/e2e/auth/node_authz.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -74,11 +75,33 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
})
|
||||
|
||||
It("Getting an existent secret should exit with the Forbidden error", func() {
|
||||
It("Getting an existing secret should exit with the Forbidden error", func() {
|
||||
_, err := c.CoreV1().Secrets(ns).Get(defaultSaSecret, metav1.GetOptions{})
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
})
|
||||
|
||||
It("Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error", func() {
|
||||
_, err := c.CoreV1().ConfigMaps(ns).Get("foo", metav1.GetOptions{})
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
})
|
||||
|
||||
It("Getting an existing configmap should exit with the Forbidden error", func() {
|
||||
By("Create a configmap for testing")
|
||||
configmap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: "node-auth-configmap",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data": "content",
|
||||
},
|
||||
}
|
||||
_, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(configmap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = c.CoreV1().ConfigMaps(ns).Get(configmap.Name, metav1.GetOptions{})
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
})
|
||||
|
||||
It("Getting a secret for a workload the node has access to should succeed", func() {
|
||||
By("Create a secret for testing")
|
||||
secret := &v1.Secret{
|
||||
@ -106,7 +129,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pause",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
NodeName: nodeName,
|
||||
@ -138,7 +161,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("A node shouldn't be able to create an other node", func() {
|
||||
It("A node shouldn't be able to create another node", func() {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@ -151,7 +174,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
})
|
||||
|
||||
It("A node shouldn't be able to delete an other node", func() {
|
||||
It("A node shouldn't be able to delete another node", func() {
|
||||
By(fmt.Sprintf("Create node foo by user: %v", asUser))
|
||||
err := c.CoreV1().Nodes().Delete("foo", &metav1.DeleteOptions{})
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
|
19
vendor/k8s.io/kubernetes/test/e2e/auth/pod_security_policy.go
generated
vendored
19
vendor/k8s.io/kubernetes/test/e2e/auth/pod_security_policy.go
generated
vendored
@ -35,6 +35,7 @@ import (
|
||||
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -315,17 +316,17 @@ func restrictedPod(f *framework.Framework, name string) *v1.Pod {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{
|
||||
v1.SeccompPodAnnotationKey: "docker/default",
|
||||
v1.SeccompPodAnnotationKey: v1.SeccompProfileRuntimeDefault,
|
||||
apparmor.ContainerAnnotationKeyPrefix + "pause": apparmor.ProfileRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: boolPtr(false),
|
||||
RunAsUser: intPtr(65534),
|
||||
RunAsUser: utilpointer.Int64Ptr(65534),
|
||||
},
|
||||
}},
|
||||
},
|
||||
@ -373,8 +374,8 @@ func restrictedPSPInPolicy(name string) *policy.PodSecurityPolicy {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{
|
||||
seccomp.AllowedProfilesAnnotationKey: "docker/default",
|
||||
seccomp.DefaultProfileAnnotationKey: "docker/default",
|
||||
seccomp.AllowedProfilesAnnotationKey: v1.SeccompProfileRuntimeDefault,
|
||||
seccomp.DefaultProfileAnnotationKey: v1.SeccompProfileRuntimeDefault,
|
||||
apparmor.AllowedProfilesAnnotationKey: apparmor.ProfileRuntimeDefault,
|
||||
apparmor.DefaultProfileAnnotationKey: apparmor.ProfileRuntimeDefault,
|
||||
},
|
||||
@ -428,8 +429,8 @@ func restrictedPSP(name string) *extensionsv1beta1.PodSecurityPolicy {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{
|
||||
seccomp.AllowedProfilesAnnotationKey: "docker/default",
|
||||
seccomp.DefaultProfileAnnotationKey: "docker/default",
|
||||
seccomp.AllowedProfilesAnnotationKey: v1.SeccompProfileRuntimeDefault,
|
||||
seccomp.DefaultProfileAnnotationKey: v1.SeccompProfileRuntimeDefault,
|
||||
apparmor.AllowedProfilesAnnotationKey: apparmor.ProfileRuntimeDefault,
|
||||
apparmor.DefaultProfileAnnotationKey: apparmor.ProfileRuntimeDefault,
|
||||
},
|
||||
@ -480,7 +481,3 @@ func restrictedPSP(name string) *extensionsv1beta1.PodSecurityPolicy {
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
func intPtr(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e/autoscaling/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/autoscaling/BUILD
generated
vendored
@ -11,7 +11,7 @@ go_library(
|
||||
"autoscaling_timer.go",
|
||||
"cluster_autoscaler_scalability.go",
|
||||
"cluster_size_autoscaling.go",
|
||||
"custom_metrics_autoscaling.go",
|
||||
"custom_metrics_stackdriver_autoscaling.go",
|
||||
"dns_autoscaling.go",
|
||||
"framework.go",
|
||||
"horizontal_pod_autoscaling.go",
|
||||
@ -24,6 +24,7 @@ go_library(
|
||||
"//test/e2e/instrumentation/monitoring:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
@ -33,7 +34,7 @@ go_library(
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/scheduling/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/autoscaling/autoscaling_timer.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/autoscaling/autoscaling_timer.go
generated
vendored
@ -93,7 +93,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
||||
nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024
|
||||
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
|
||||
replicas := 1
|
||||
resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.InternalClientset)
|
||||
resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.InternalClientset, f.ScalesGetter)
|
||||
defer resourceConsumer.CleanUp()
|
||||
resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough.
|
||||
|
||||
|
13
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_autoscaler_scalability.go
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_autoscaler_scalability.go
generated
vendored
@ -31,6 +31,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -347,7 +348,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
timeToWait := 5 * time.Minute
|
||||
podsConfig := reserveMemoryRCConfig(f, "unschedulable-pod", unschedulablePodReplicas, totalMemReservation, timeToWait)
|
||||
framework.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, podsConfig.Name)
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name)
|
||||
|
||||
// Ensure that no new nodes have been added so far.
|
||||
Expect(framework.NumberOfReadyNodes(f.ClientSet)).To(Equal(nodeCount))
|
||||
@ -417,7 +418,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC
|
||||
}
|
||||
timeTrack(start, fmt.Sprintf("Scale up to %v", config.expectedResult.nodes))
|
||||
return func() error {
|
||||
return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, config.extraPods.Name)
|
||||
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, config.extraPods.Name)
|
||||
}
|
||||
}
|
||||
|
||||
@ -432,7 +433,7 @@ func reserveMemoryRCConfig(f *framework.Framework, id string, replicas, megabyte
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: timeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
MemRequest: int64(1024 * 1024 * megabytes / replicas),
|
||||
}
|
||||
@ -492,7 +493,7 @@ func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, p
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: timeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
HostPorts: map[string]int{"port1": port},
|
||||
MemRequest: request,
|
||||
@ -500,7 +501,7 @@ func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, p
|
||||
err := framework.RunRC(*config)
|
||||
framework.ExpectNoError(err)
|
||||
return func() error {
|
||||
return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, id)
|
||||
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
|
||||
}
|
||||
}
|
||||
|
||||
@ -540,7 +541,7 @@ func distributeLoad(f *framework.Framework, namespace string, id string, podDist
|
||||
framework.ExpectNoError(framework.RunRC(*rcConfig))
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet))
|
||||
return func() error {
|
||||
return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, id)
|
||||
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
|
||||
}
|
||||
}
|
||||
|
||||
|
217
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_size_autoscaling.go
generated
vendored
217
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_size_autoscaling.go
generated
vendored
@ -30,7 +30,7 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/api/scheduling/v1alpha1"
|
||||
schedulerapi "k8s.io/api/scheduling/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@ -45,6 +45,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -168,7 +169,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
By("Creating unschedulable pod")
|
||||
ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
By("Waiting for scale up hoping it won't happen")
|
||||
// Verify that the appropriate event was generated
|
||||
@ -195,7 +196,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
simpleScaleUpTest := func(unready int) {
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Verify that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
|
||||
@ -206,6 +207,108 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]",
|
||||
func() { simpleScaleUpTest(0) })
|
||||
|
||||
supportedGpuTypes := []string{"nvidia-tesla-k80", "nvidia-tesla-v100", "nvidia-tesla-p100"}
|
||||
for _, gpuType := range supportedGpuTypes {
|
||||
gpuType := gpuType // create new variable for each iteration step
|
||||
|
||||
It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
const gpuPoolName = "gpu-pool"
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
|
||||
By("Schedule a pod which requires GPU")
|
||||
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
})
|
||||
|
||||
It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
const gpuPoolName = "gpu-pool"
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Schedule a single pod which requires GPU")
|
||||
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
|
||||
|
||||
By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 2)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
|
||||
By("Scale GPU deployment")
|
||||
framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(2))
|
||||
})
|
||||
|
||||
It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
const gpuPoolName = "gpu-pool"
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
|
||||
By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
// Verify that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
||||
|
||||
// Expect gpu pool to stay intact
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
})
|
||||
|
||||
It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
|
||||
const gpuPoolName = "gpu-pool"
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Schedule a single pod which requires GPU")
|
||||
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
|
||||
|
||||
By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
|
||||
By("Remove the only POD requiring GPU")
|
||||
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
})
|
||||
}
|
||||
|
||||
It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]",
|
||||
func() {
|
||||
framework.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleUpTest(1) })
|
||||
@ -222,7 +325,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
By("Schedule more pods than can fit and wait for cluster to scale-up")
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
|
||||
return s.status == caOngoingScaleUpStatus
|
||||
@ -265,8 +368,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
By("Reserving 0.1x more memory than the cluster holds to trigger scale up")
|
||||
totalMemoryReservation := int(1.1 * float64(nodeCount*memAllocatableMb+extraMemMb))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
ReserveMemory(f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Verify, that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
@ -289,7 +392,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "host-port")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "host-port")
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
|
||||
@ -304,12 +407,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
}
|
||||
By("starting a pod with anti-affinity on each node")
|
||||
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "some-pod")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
By("scheduling extra pods with anti-affinity to existing ones")
|
||||
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels))
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "extra-pod")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
|
||||
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
|
||||
@ -323,14 +426,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
"anti-affinity": "yes",
|
||||
}
|
||||
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "some-pod")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
|
||||
|
||||
By("waiting for all pods before triggering scale up")
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
By("creating a pod requesting EmptyDir")
|
||||
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels, emptyDirVolumes))
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "extra-pod")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
|
||||
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
|
||||
@ -359,11 +462,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
},
|
||||
Prebind: nil,
|
||||
}
|
||||
emptyStorageClass := ""
|
||||
pvcConfig := framework.PersistentVolumeClaimConfig{
|
||||
Annotations: map[string]string{
|
||||
v1.BetaStorageClassAnnotation: "",
|
||||
},
|
||||
Selector: selector,
|
||||
Selector: selector,
|
||||
StorageClassName: &emptyStorageClass,
|
||||
}
|
||||
|
||||
pv, pvc, err := framework.CreatePVPVC(c, pvConfig, pvcConfig, f.Namespace.Name, false)
|
||||
@ -388,7 +490,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
}
|
||||
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
|
||||
defer func() {
|
||||
framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "some-pod")
|
||||
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
|
||||
glog.Infof("RC and pods not using volume deleted")
|
||||
}()
|
||||
|
||||
@ -401,7 +503,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
volumes := buildVolumes(pv, pvc)
|
||||
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, pvcPodName, labels, labels, volumes))
|
||||
defer func() {
|
||||
framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, pvcPodName)
|
||||
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, pvcPodName)
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
}()
|
||||
|
||||
@ -506,7 +608,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
defer removeLabels(registeredNodes)
|
||||
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "node-selector"))
|
||||
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "node-selector"))
|
||||
})
|
||||
|
||||
It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
@ -524,8 +626,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
extraPods := extraNodes + 1
|
||||
totalMemoryReservation := int(float64(extraPods) * 1.5 * float64(memAllocatableMb))
|
||||
By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
ReserveMemory(f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Apparently GKE master is restarted couple minutes after the node pool is added
|
||||
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround
|
||||
@ -663,7 +765,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
By("Run a scale-up test")
|
||||
ReserveMemory(f, "memory-reservation", 1, 100, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Verify that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
@ -776,7 +878,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction)
|
||||
} else {
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
time.Sleep(scaleUpTimeout)
|
||||
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
framework.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
|
||||
@ -957,6 +1059,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
})
|
||||
})
|
||||
|
||||
func installNvidiaDriversDaemonSet() {
|
||||
By("Add daemonset which installs nvidia drivers")
|
||||
// the link differs from one in GKE documentation; discussed with @mindprince this one should be used
|
||||
framework.RunKubectlOrDie("apply", "-f", "https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/daemonset.yaml")
|
||||
}
|
||||
|
||||
func execCmd(args ...string) *exec.Cmd {
|
||||
glog.Infof("Executing: %s", strings.Join(args, " "))
|
||||
return exec.Command(args[0], args[1:]...)
|
||||
@ -974,7 +1082,7 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str
|
||||
labelMap := map[string]string{"test_id": testID}
|
||||
framework.ExpectNoError(runReplicatedPodOnEachNode(f, nodes.Items, namespace, podsPerNode, "reschedulable-pods", labelMap, 0))
|
||||
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, namespace, "reschedulable-pods")
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, "reschedulable-pods")
|
||||
|
||||
By("Create a PodDisruptionBudget")
|
||||
minAvailable := intstr.FromInt(numPods - pdbSize)
|
||||
@ -1300,20 +1408,37 @@ func addNodePool(name string, machineType string, numNodes int) {
|
||||
framework.ExpectNoError(err, string(output))
|
||||
}
|
||||
|
||||
func addGpuNodePool(name string, gpuType string, gpuCount int, numNodes int) {
|
||||
args := []string{"beta", "container", "node-pools", "create", name, "--quiet",
|
||||
"--accelerator", "type=" + gpuType + ",count=" + strconv.Itoa(gpuCount),
|
||||
"--num-nodes=" + strconv.Itoa(numNodes),
|
||||
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
glog.Infof("Creating node-pool %s: %s", name, output)
|
||||
framework.ExpectNoError(err, string(output))
|
||||
}
|
||||
|
||||
func deleteNodePool(name string) {
|
||||
glog.Infof("Deleting node pool %s", name)
|
||||
args := []string{"container", "node-pools", "delete", name, "--quiet",
|
||||
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Infof("Error: %v", err)
|
||||
}
|
||||
glog.Infof("Node-pool deletion output: %s", output)
|
||||
err := wait.ExponentialBackoff(
|
||||
wait.Backoff{Duration: 1 * time.Minute, Factor: float64(3), Steps: 3},
|
||||
func() (bool, error) {
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Warningf("Error deleting nodegroup - error:%v, output: %s", err, output)
|
||||
return false, nil
|
||||
}
|
||||
glog.Infof("Node-pool deletion output: %s", output)
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func getPoolNodes(f *framework.Framework, poolName string) []*v1.Node {
|
||||
nodes := make([]*v1.Node, 0, 1)
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
nodeList := framework.GetReadyNodesIncludingTaintedOrDie(f.ClientSet)
|
||||
for _, node := range nodeList.Items {
|
||||
if node.Labels[gkeNodepoolNameKey] == poolName {
|
||||
nodes = append(nodes, &node)
|
||||
@ -1388,7 +1513,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: timeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: replicas,
|
||||
MemRequest: request,
|
||||
NodeSelector: selector,
|
||||
@ -1404,7 +1529,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
return func() error {
|
||||
return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, id)
|
||||
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
|
||||
}
|
||||
}
|
||||
framework.Failf("Failed to reserve memory within timeout")
|
||||
@ -1617,6 +1742,26 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAdd
|
||||
return fmt.Errorf("Failed to remove taint from node in allowed number of retries")
|
||||
}
|
||||
|
||||
func scheduleGpuPod(f *framework.Framework, id string) error {
|
||||
config := &testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: 3 * scaleUpTimeout, // spinning up GPU node is slow
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: 1,
|
||||
GpuLimit: 1,
|
||||
Labels: map[string]string{"requires-gpu": "yes"},
|
||||
}
|
||||
|
||||
err := framework.RunRC(*config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create an RC running a given number of pods with anti-affinity
|
||||
func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id string, podLabels, antiAffinityLabels map[string]string) error {
|
||||
config := &testutils.RCConfig{
|
||||
@ -1626,7 +1771,7 @@ func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id
|
||||
Name: id,
|
||||
Namespace: namespace,
|
||||
Timeout: scaleUpTimeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: pods,
|
||||
Labels: podLabels,
|
||||
}
|
||||
@ -1650,7 +1795,7 @@ func runVolumeAntiAffinityPods(f *framework.Framework, namespace string, pods in
|
||||
Name: id,
|
||||
Namespace: namespace,
|
||||
Timeout: scaleUpTimeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: pods,
|
||||
Labels: podLabels,
|
||||
}
|
||||
@ -1731,7 +1876,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
|
||||
Name: id,
|
||||
Namespace: namespace,
|
||||
Timeout: defaultTimeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: 0,
|
||||
Labels: labels,
|
||||
MemRequest: memRequest,
|
||||
@ -1790,7 +1935,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
|
||||
func runReplicatedPodOnEachNodeWithCleanup(f *framework.Framework, nodes []v1.Node, namespace string, podsPerNode int, id string, labels map[string]string, memRequest int64) (func(), error) {
|
||||
err := runReplicatedPodOnEachNode(f, nodes, namespace, podsPerNode, id, labels, memRequest)
|
||||
return func() {
|
||||
framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, namespace, id)
|
||||
framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, id)
|
||||
}, err
|
||||
}
|
||||
|
||||
@ -1853,7 +1998,7 @@ type scaleUpStatus struct {
|
||||
// Try to get timestamp from status.
|
||||
// Status configmap is not parsing-friendly, so evil regexpery follows.
|
||||
func getStatusTimestamp(status string) (time.Time, error) {
|
||||
timestampMatcher, err := regexp.Compile("Cluster-autoscaler status at \\s*([0-9\\-]+ [0-9]+:[0-9]+:[0-9]+\\.[0-9]+ \\+[0-9]+ [A-Za-z]+):")
|
||||
timestampMatcher, err := regexp.Compile("Cluster-autoscaler status at \\s*([0-9\\-]+ [0-9]+:[0-9]+:[0-9]+\\.[0-9]+ \\+[0-9]+ [A-Za-z]+)")
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
@ -2006,13 +2151,13 @@ func createPriorityClasses(f *framework.Framework) func() {
|
||||
highPriorityClassName: 1000,
|
||||
}
|
||||
for className, priority := range priorityClasses {
|
||||
_, err := f.ClientSet.SchedulingV1alpha1().PriorityClasses().Create(&v1alpha1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority})
|
||||
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority})
|
||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
|
||||
}
|
||||
|
||||
return func() {
|
||||
for className := range priorityClasses {
|
||||
f.ClientSet.SchedulingV1alpha1().PriorityClasses().Delete(className, nil)
|
||||
f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(className, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
271
vendor/k8s.io/kubernetes/test/e2e/autoscaling/custom_metrics_autoscaling.go
generated
vendored
271
vendor/k8s.io/kubernetes/test/e2e/autoscaling/custom_metrics_autoscaling.go
generated
vendored
@ -1,271 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package autoscaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
gcm "google.golang.org/api/monitoring/v3"
|
||||
as "k8s.io/api/autoscaling/v2beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
const (
|
||||
stackdriverExporterDeployment = "stackdriver-exporter-deployment"
|
||||
dummyDeploymentName = "dummy-deployment"
|
||||
stackdriverExporterPod = "stackdriver-exporter-pod"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver)", func() {
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
})
|
||||
|
||||
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
|
||||
|
||||
It("should scale down with Custom Metric of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
scaledReplicas := 1
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
deployment := monitoring.SimpleStackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue)
|
||||
customMetricTest(f, f.ClientSet, simplePodsHPA(f.Namespace.ObjectMeta.Name, metricTarget), deployment, nil, initialReplicas, scaledReplicas)
|
||||
})
|
||||
|
||||
It("should scale down with Custom Metric of type Object from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
scaledReplicas := 1
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
deployment := monitoring.SimpleStackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue)
|
||||
pod := monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, metricValue)
|
||||
customMetricTest(f, f.ClientSet, objectHPA(f.Namespace.ObjectMeta.Name, metricTarget), deployment, pod, initialReplicas, scaledReplicas)
|
||||
})
|
||||
|
||||
It("should scale down with Custom Metric of type Pod from Stackdriver with Prometheus [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
scaledReplicas := 1
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
deployment := monitoring.PrometheusExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue)
|
||||
customMetricTest(f, f.ClientSet, simplePodsHPA(f.Namespace.ObjectMeta.Name, metricTarget), deployment, nil, initialReplicas, scaledReplicas)
|
||||
})
|
||||
|
||||
It("should scale up with two metrics of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 1
|
||||
scaledReplicas := 3
|
||||
// metric 1 would cause a scale down, if not for metric 2
|
||||
metric1Value := int64(100)
|
||||
metric1Target := 2 * metric1Value
|
||||
// metric2 should cause a scale up
|
||||
metric2Value := int64(200)
|
||||
metric2Target := int64(0.5 * float64(metric2Value))
|
||||
containers := []monitoring.CustomMetricContainerSpec{
|
||||
{
|
||||
Name: "stackdriver-exporter-metric1",
|
||||
MetricName: "metric1",
|
||||
MetricValue: metric1Value,
|
||||
},
|
||||
{
|
||||
Name: "stackdriver-exporter-metric2",
|
||||
MetricName: "metric2",
|
||||
MetricValue: metric2Value,
|
||||
},
|
||||
}
|
||||
metricTargets := map[string]int64{"metric1": metric1Target, "metric2": metric2Target}
|
||||
deployment := monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers)
|
||||
customMetricTest(f, f.ClientSet, podsHPA(f.Namespace.ObjectMeta.Name, stackdriverExporterDeployment, metricTargets), deployment, nil, initialReplicas, scaledReplicas)
|
||||
})
|
||||
})
|
||||
|
||||
func customMetricTest(f *framework.Framework, kubeClient clientset.Interface, hpa *as.HorizontalPodAutoscaler,
|
||||
deployment *extensions.Deployment, pod *corev1.Pod, initialReplicas, scaledReplicas int) {
|
||||
projectId := framework.TestContext.CloudConfig.ProjectID
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
|
||||
|
||||
// Hack for running tests locally, needed to authenticate in Stackdriver
|
||||
// If this is your use case, create application default credentials:
|
||||
// $ gcloud auth application-default login
|
||||
// and uncomment following lines:
|
||||
/*
|
||||
ts, err := google.DefaultTokenSource(oauth2.NoContext)
|
||||
framework.Logf("Couldn't get application default credentials, %v", err)
|
||||
if err != nil {
|
||||
framework.Failf("Error accessing application default credentials, %v", err)
|
||||
}
|
||||
client := oauth2.NewClient(oauth2.NoContext, ts)
|
||||
*/
|
||||
|
||||
gcmService, err := gcm.New(client)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create gcm service, %v", err)
|
||||
}
|
||||
|
||||
// Set up a cluster: create a custom metric and set up k8s-sd adapter
|
||||
err = monitoring.CreateDescriptors(gcmService, projectId)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create metric descriptor: %v", err)
|
||||
}
|
||||
defer monitoring.CleanupDescriptors(gcmService, projectId)
|
||||
|
||||
err = monitoring.CreateAdapter()
|
||||
if err != nil {
|
||||
framework.Failf("Failed to set up: %v", err)
|
||||
}
|
||||
defer monitoring.CleanupAdapter()
|
||||
|
||||
// Run application that exports the metric
|
||||
err = createDeploymentToScale(f, kubeClient, deployment, pod)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create stackdriver-exporter pod: %v", err)
|
||||
}
|
||||
defer cleanupDeploymentsToScale(f, kubeClient, deployment, pod)
|
||||
|
||||
// Wait for the deployment to run
|
||||
waitForReplicas(deployment.ObjectMeta.Name, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, initialReplicas)
|
||||
|
||||
// Autoscale the deployment
|
||||
_, err = kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(f.Namespace.ObjectMeta.Name).Create(hpa)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create HPA: %v", err)
|
||||
}
|
||||
|
||||
waitForReplicas(deployment.ObjectMeta.Name, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, scaledReplicas)
|
||||
}
|
||||
|
||||
func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, deployment *extensions.Deployment, pod *corev1.Pod) error {
|
||||
if deployment != nil {
|
||||
_, err := cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Create(deployment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if pod != nil {
|
||||
_, err := cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface, deployment *extensions.Deployment, pod *corev1.Pod) {
|
||||
if deployment != nil {
|
||||
_ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(deployment.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
if pod != nil {
|
||||
_ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(pod.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
func simplePodsHPA(namespace string, metricTarget int64) *as.HorizontalPodAutoscaler {
|
||||
return podsHPA(namespace, stackdriverExporterDeployment, map[string]int64{monitoring.CustomMetricName: metricTarget})
|
||||
}
|
||||
|
||||
func podsHPA(namespace string, deploymentName string, metricTargets map[string]int64) *as.HorizontalPodAutoscaler {
|
||||
var minReplicas int32 = 1
|
||||
metrics := []as.MetricSpec{}
|
||||
for metric, target := range metricTargets {
|
||||
metrics = append(metrics, as.MetricSpec{
|
||||
Type: as.PodsMetricSourceType,
|
||||
Pods: &as.PodsMetricSource{
|
||||
MetricName: metric,
|
||||
TargetAverageValue: *resource.NewQuantity(target, resource.DecimalSI),
|
||||
},
|
||||
})
|
||||
}
|
||||
return &as.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-metrics-pods-hpa",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: as.HorizontalPodAutoscalerSpec{
|
||||
Metrics: metrics,
|
||||
MaxReplicas: 3,
|
||||
MinReplicas: &minReplicas,
|
||||
ScaleTargetRef: as.CrossVersionObjectReference{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
Name: deploymentName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func objectHPA(namespace string, metricTarget int64) *as.HorizontalPodAutoscaler {
|
||||
var minReplicas int32 = 1
|
||||
return &as.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-metrics-objects-hpa",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: as.HorizontalPodAutoscalerSpec{
|
||||
Metrics: []as.MetricSpec{
|
||||
{
|
||||
Type: as.ObjectMetricSourceType,
|
||||
Object: &as.ObjectMetricSource{
|
||||
MetricName: monitoring.CustomMetricName,
|
||||
Target: as.CrossVersionObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: stackdriverExporterPod,
|
||||
},
|
||||
TargetValue: *resource.NewQuantity(metricTarget, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
MaxReplicas: 3,
|
||||
MinReplicas: &minReplicas,
|
||||
ScaleTargetRef: as.CrossVersionObjectReference{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
Name: dummyDeploymentName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) {
|
||||
interval := 20 * time.Second
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
deployment, err := cs.ExtensionsV1beta1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get replication controller %s: %v", deployment, err)
|
||||
}
|
||||
replicas := int(deployment.Status.ReadyReplicas)
|
||||
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
|
||||
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
|
||||
})
|
||||
if err != nil {
|
||||
framework.Failf("Timeout waiting %v for %v replicas", timeout, desiredReplicas)
|
||||
}
|
||||
}
|
451
vendor/k8s.io/kubernetes/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go
generated
vendored
Normal file
451
vendor/k8s.io/kubernetes/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go
generated
vendored
Normal file
@ -0,0 +1,451 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package autoscaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
gcm "google.golang.org/api/monitoring/v3"
|
||||
as "k8s.io/api/autoscaling/v2beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
const (
|
||||
stackdriverExporterDeployment = "stackdriver-exporter-deployment"
|
||||
dummyDeploymentName = "dummy-deployment"
|
||||
stackdriverExporterPod = "stackdriver-exporter-pod"
|
||||
externalMetricValue = int64(85)
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver)", func() {
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
})
|
||||
|
||||
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
|
||||
|
||||
It("should scale down with Custom Metric of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
tc := CustomMetricTestCase{
|
||||
framework: f,
|
||||
kubeClient: f.ClientSet,
|
||||
initialReplicas: initialReplicas,
|
||||
scaledReplicas: 1,
|
||||
deployment: monitoring.SimpleStackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue),
|
||||
hpa: simplePodsHPA(f.Namespace.ObjectMeta.Name, metricTarget)}
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale down with Custom Metric of type Object from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
tc := CustomMetricTestCase{
|
||||
framework: f,
|
||||
kubeClient: f.ClientSet,
|
||||
initialReplicas: initialReplicas,
|
||||
scaledReplicas: 1,
|
||||
// Metric exported by deployment is ignored
|
||||
deployment: monitoring.SimpleStackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), 0 /* ignored */),
|
||||
pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, metricValue),
|
||||
hpa: objectHPA(f.Namespace.ObjectMeta.Name, metricTarget)}
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale down with External Metric with target value from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := externalMetricValue
|
||||
metricTarget := 2 * metricValue
|
||||
metricTargets := map[string]externalMetricTarget{
|
||||
"target": {
|
||||
value: metricTarget,
|
||||
isAverage: false,
|
||||
},
|
||||
}
|
||||
tc := CustomMetricTestCase{
|
||||
framework: f,
|
||||
kubeClient: f.ClientSet,
|
||||
initialReplicas: initialReplicas,
|
||||
scaledReplicas: 1,
|
||||
// Metric exported by deployment is ignored
|
||||
deployment: monitoring.SimpleStackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), 0 /* ignored */),
|
||||
pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, "target", metricValue),
|
||||
hpa: externalHPA(f.Namespace.ObjectMeta.Name, metricTargets)}
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale down with External Metric with target average value from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := externalMetricValue
|
||||
metricAverageTarget := 2 * metricValue
|
||||
metricTargets := map[string]externalMetricTarget{
|
||||
"target_average": {
|
||||
value: metricAverageTarget,
|
||||
isAverage: true,
|
||||
},
|
||||
}
|
||||
tc := CustomMetricTestCase{
|
||||
framework: f,
|
||||
kubeClient: f.ClientSet,
|
||||
initialReplicas: initialReplicas,
|
||||
scaledReplicas: 1,
|
||||
// Metric exported by deployment is ignored
|
||||
deployment: monitoring.SimpleStackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), 0 /* ignored */),
|
||||
pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, "target_average", externalMetricValue),
|
||||
hpa: externalHPA(f.Namespace.ObjectMeta.Name, metricTargets)}
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale down with Custom Metric of type Pod from Stackdriver with Prometheus [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := int64(100)
|
||||
metricTarget := 2 * metricValue
|
||||
tc := CustomMetricTestCase{
|
||||
framework: f,
|
||||
kubeClient: f.ClientSet,
|
||||
initialReplicas: initialReplicas,
|
||||
scaledReplicas: 1,
|
||||
deployment: monitoring.PrometheusExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), metricValue),
|
||||
hpa: simplePodsHPA(f.Namespace.ObjectMeta.Name, metricTarget)}
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale up with two metrics of type Pod from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 1
|
||||
// metric 1 would cause a scale down, if not for metric 2
|
||||
metric1Value := int64(100)
|
||||
metric1Target := 2 * metric1Value
|
||||
// metric2 should cause a scale up
|
||||
metric2Value := int64(200)
|
||||
metric2Target := int64(0.5 * float64(metric2Value))
|
||||
containers := []monitoring.CustomMetricContainerSpec{
|
||||
{
|
||||
Name: "stackdriver-exporter-metric1",
|
||||
MetricName: "metric1",
|
||||
MetricValue: metric1Value,
|
||||
},
|
||||
{
|
||||
Name: "stackdriver-exporter-metric2",
|
||||
MetricName: "metric2",
|
||||
MetricValue: metric2Value,
|
||||
},
|
||||
}
|
||||
metricTargets := map[string]int64{"metric1": metric1Target, "metric2": metric2Target}
|
||||
tc := CustomMetricTestCase{
|
||||
framework: f,
|
||||
kubeClient: f.ClientSet,
|
||||
initialReplicas: initialReplicas,
|
||||
scaledReplicas: 3,
|
||||
deployment: monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers),
|
||||
hpa: podsHPA(f.Namespace.ObjectMeta.Name, stackdriverExporterDeployment, metricTargets)}
|
||||
tc.Run()
|
||||
})
|
||||
|
||||
It("should scale up with two External metrics from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
initialReplicas := 1
|
||||
// metric 1 would cause a scale down, if not for metric 2
|
||||
metric1Value := externalMetricValue
|
||||
metric1Target := 2 * metric1Value
|
||||
// metric2 should cause a scale up
|
||||
metric2Value := externalMetricValue
|
||||
metric2Target := int64(math.Ceil(0.5 * float64(metric2Value)))
|
||||
metricTargets := map[string]externalMetricTarget{
|
||||
"external_metric_1": {
|
||||
value: metric1Target,
|
||||
isAverage: false,
|
||||
},
|
||||
"external_metric_2": {
|
||||
value: metric2Target,
|
||||
isAverage: false,
|
||||
},
|
||||
}
|
||||
containers := []monitoring.CustomMetricContainerSpec{
|
||||
{
|
||||
Name: "stackdriver-exporter-metric1",
|
||||
MetricName: "external_metric_1",
|
||||
MetricValue: metric1Value,
|
||||
},
|
||||
{
|
||||
Name: "stackdriver-exporter-metric2",
|
||||
MetricName: "external_metric_2",
|
||||
MetricValue: metric2Value,
|
||||
},
|
||||
}
|
||||
tc := CustomMetricTestCase{
|
||||
framework: f,
|
||||
kubeClient: f.ClientSet,
|
||||
initialReplicas: initialReplicas,
|
||||
scaledReplicas: 3,
|
||||
deployment: monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), containers),
|
||||
hpa: externalHPA(f.Namespace.ObjectMeta.Name, metricTargets)}
|
||||
tc.Run()
|
||||
})
|
||||
})
|
||||
|
||||
type CustomMetricTestCase struct {
|
||||
framework *framework.Framework
|
||||
hpa *as.HorizontalPodAutoscaler
|
||||
kubeClient clientset.Interface
|
||||
deployment *extensions.Deployment
|
||||
pod *corev1.Pod
|
||||
initialReplicas int
|
||||
scaledReplicas int
|
||||
}
|
||||
|
||||
func (tc *CustomMetricTestCase) Run() {
|
||||
projectId := framework.TestContext.CloudConfig.ProjectID
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
|
||||
|
||||
// Hack for running tests locally, needed to authenticate in Stackdriver
|
||||
// If this is your use case, create application default credentials:
|
||||
// $ gcloud auth application-default login
|
||||
// and uncomment following lines:
|
||||
/*
|
||||
ts, err := google.DefaultTokenSource(oauth2.NoContext)
|
||||
framework.Logf("Couldn't get application default credentials, %v", err)
|
||||
if err != nil {
|
||||
framework.Failf("Error accessing application default credentials, %v", err)
|
||||
}
|
||||
client := oauth2.NewClient(oauth2.NoContext, ts)
|
||||
*/
|
||||
|
||||
gcmService, err := gcm.New(client)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create gcm service, %v", err)
|
||||
}
|
||||
|
||||
// Set up a cluster: create a custom metric and set up k8s-sd adapter
|
||||
err = monitoring.CreateDescriptors(gcmService, projectId)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create metric descriptor: %v", err)
|
||||
}
|
||||
defer monitoring.CleanupDescriptors(gcmService, projectId)
|
||||
|
||||
err = monitoring.CreateAdapter(monitoring.AdapterDefault)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to set up: %v", err)
|
||||
}
|
||||
defer monitoring.CleanupAdapter(monitoring.AdapterDefault)
|
||||
|
||||
// Run application that exports the metric
|
||||
err = createDeploymentToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create stackdriver-exporter pod: %v", err)
|
||||
}
|
||||
defer cleanupDeploymentsToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod)
|
||||
|
||||
// Wait for the deployment to run
|
||||
waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.initialReplicas)
|
||||
|
||||
// Autoscale the deployment
|
||||
_, err = tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(tc.hpa)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create HPA: %v", err)
|
||||
}
|
||||
defer tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete(tc.hpa.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||
|
||||
waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.scaledReplicas)
|
||||
}
|
||||
|
||||
func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, deployment *extensions.Deployment, pod *corev1.Pod) error {
|
||||
if deployment != nil {
|
||||
_, err := cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Create(deployment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if pod != nil {
|
||||
_, err := cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface, deployment *extensions.Deployment, pod *corev1.Pod) {
|
||||
if deployment != nil {
|
||||
_ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(deployment.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
if pod != nil {
|
||||
_ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(pod.ObjectMeta.Name, &metav1.DeleteOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
func simplePodsHPA(namespace string, metricTarget int64) *as.HorizontalPodAutoscaler {
|
||||
return podsHPA(namespace, stackdriverExporterDeployment, map[string]int64{monitoring.CustomMetricName: metricTarget})
|
||||
}
|
||||
|
||||
func podsHPA(namespace string, deploymentName string, metricTargets map[string]int64) *as.HorizontalPodAutoscaler {
|
||||
var minReplicas int32 = 1
|
||||
metrics := []as.MetricSpec{}
|
||||
for metric, target := range metricTargets {
|
||||
metrics = append(metrics, as.MetricSpec{
|
||||
Type: as.PodsMetricSourceType,
|
||||
Pods: &as.PodsMetricSource{
|
||||
MetricName: metric,
|
||||
TargetAverageValue: *resource.NewQuantity(target, resource.DecimalSI),
|
||||
},
|
||||
})
|
||||
}
|
||||
return &as.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-metrics-pods-hpa",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: as.HorizontalPodAutoscalerSpec{
|
||||
Metrics: metrics,
|
||||
MaxReplicas: 3,
|
||||
MinReplicas: &minReplicas,
|
||||
ScaleTargetRef: as.CrossVersionObjectReference{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
Name: deploymentName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func objectHPA(namespace string, metricTarget int64) *as.HorizontalPodAutoscaler {
|
||||
var minReplicas int32 = 1
|
||||
return &as.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-metrics-objects-hpa",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: as.HorizontalPodAutoscalerSpec{
|
||||
Metrics: []as.MetricSpec{
|
||||
{
|
||||
Type: as.ObjectMetricSourceType,
|
||||
Object: &as.ObjectMetricSource{
|
||||
MetricName: monitoring.CustomMetricName,
|
||||
Target: as.CrossVersionObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: stackdriverExporterPod,
|
||||
},
|
||||
TargetValue: *resource.NewQuantity(metricTarget, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
MaxReplicas: 3,
|
||||
MinReplicas: &minReplicas,
|
||||
ScaleTargetRef: as.CrossVersionObjectReference{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
Name: dummyDeploymentName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type externalMetricTarget struct {
|
||||
value int64
|
||||
isAverage bool
|
||||
}
|
||||
|
||||
func externalHPA(namespace string, metricTargets map[string]externalMetricTarget) *as.HorizontalPodAutoscaler {
|
||||
var minReplicas int32 = 1
|
||||
metricSpecs := []as.MetricSpec{}
|
||||
selector := &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"resource.type": "gke_container"},
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "resource.labels.namespace_id",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
// TODO(bskiba): change default to real namespace name once it is available
|
||||
// from Stackdriver.
|
||||
Values: []string{"default", "dummy"},
|
||||
},
|
||||
{
|
||||
Key: "resource.labels.pod_id",
|
||||
Operator: metav1.LabelSelectorOpExists,
|
||||
Values: []string{},
|
||||
},
|
||||
},
|
||||
}
|
||||
for metric, target := range metricTargets {
|
||||
var metricSpec as.MetricSpec
|
||||
metricSpec = as.MetricSpec{
|
||||
Type: as.ExternalMetricSourceType,
|
||||
External: &as.ExternalMetricSource{
|
||||
MetricName: "custom.googleapis.com|" + metric,
|
||||
MetricSelector: selector,
|
||||
},
|
||||
}
|
||||
if target.isAverage {
|
||||
metricSpec.External.TargetAverageValue = resource.NewQuantity(target.value, resource.DecimalSI)
|
||||
} else {
|
||||
metricSpec.External.TargetValue = resource.NewQuantity(target.value, resource.DecimalSI)
|
||||
}
|
||||
metricSpecs = append(metricSpecs, metricSpec)
|
||||
}
|
||||
hpa := &as.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-metrics-external-hpa",
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: as.HorizontalPodAutoscalerSpec{
|
||||
Metrics: metricSpecs,
|
||||
MaxReplicas: 3,
|
||||
MinReplicas: &minReplicas,
|
||||
ScaleTargetRef: as.CrossVersionObjectReference{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
Name: dummyDeploymentName,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return hpa
|
||||
}
|
||||
|
||||
func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) {
|
||||
interval := 20 * time.Second
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
deployment, err := cs.ExtensionsV1beta1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get replication controller %s: %v", deployment, err)
|
||||
}
|
||||
replicas := int(deployment.Status.ReadyReplicas)
|
||||
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
|
||||
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
|
||||
})
|
||||
if err != nil {
|
||||
framework.Failf("Timeout waiting %v for %v replicas", timeout, desiredReplicas)
|
||||
}
|
||||
}
|
12
vendor/k8s.io/kubernetes/test/e2e/autoscaling/dns_autoscaling.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/e2e/autoscaling/dns_autoscaling.go
generated
vendored
@ -99,9 +99,11 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
// This test is separated because it is slow and need to run serially.
|
||||
// Will take around 5 minutes to run on a 4 nodes cluster.
|
||||
It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func() {
|
||||
numNodes, err := framework.NumberOfRegisteredNodes(c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Replace the dns autoscaling parameters with testing parameters")
|
||||
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
By("Restoring initial dns autoscaling parameters")
|
||||
@ -117,25 +119,21 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
|
||||
|
||||
originalSizes := make(map[string]int)
|
||||
sum := 0
|
||||
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
|
||||
size, err := framework.GroupSize(mig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
originalSizes[mig] = size
|
||||
sum += size
|
||||
}
|
||||
|
||||
By("Manually increase cluster size")
|
||||
increasedSize := 0
|
||||
increasedSizes := make(map[string]int)
|
||||
for key, val := range originalSizes {
|
||||
increasedSizes[key] = val + 1
|
||||
increasedSize += increasedSizes[key]
|
||||
}
|
||||
setMigSizes(increasedSizes)
|
||||
Expect(WaitForClusterSizeFunc(c,
|
||||
func(size int) bool { return size == increasedSize }, scaleUpTimeout)).NotTo(HaveOccurred())
|
||||
func(size int) bool { return size == numNodes+len(originalSizes) }, scaleUpTimeout)).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_1)
|
||||
@ -151,7 +149,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
|
||||
By("Restoring cluster size")
|
||||
setMigSizes(originalSizes)
|
||||
Expect(framework.WaitForReadyNodes(c, sum, scaleDownTimeout)).NotTo(HaveOccurred())
|
||||
Expect(framework.WaitForReadyNodes(c, numNodes, scaleDownTimeout)).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go
generated
vendored
@ -116,7 +116,7 @@ type HPAScaleTest struct {
|
||||
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
|
||||
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||
const timeToWait = 15 * time.Minute
|
||||
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset)
|
||||
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset, f.ScalesGetter)
|
||||
defer rc.CleanUp()
|
||||
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
|
||||
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/common/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/common/BUILD
generated
vendored
@ -34,13 +34,12 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/common",
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/conditions:go_default_library",
|
||||
"//pkg/kubelet:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/sysctl:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
@ -65,6 +64,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/scale:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
39
vendor/k8s.io/kubernetes/test/e2e/common/autoscaling_utils.go
generated
vendored
39
vendor/k8s.io/kubernetes/test/e2e/common/autoscaling_utils.go
generated
vendored
@ -36,6 +36,7 @@ import (
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -86,6 +87,7 @@ type ResourceConsumer struct {
|
||||
nsName string
|
||||
clientSet clientset.Interface
|
||||
internalClientset *internalclientset.Clientset
|
||||
scaleClient scaleclient.ScalesGetter
|
||||
cpu chan int
|
||||
mem chan int
|
||||
customMetric chan int
|
||||
@ -104,15 +106,20 @@ func GetResourceConsumerImage() string {
|
||||
return resourceConsumerImage
|
||||
}
|
||||
|
||||
func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
|
||||
func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
|
||||
return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
|
||||
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, internalClientset)
|
||||
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, internalClientset, scaleClient, nil, nil)
|
||||
}
|
||||
|
||||
// TODO this still defaults to replication controller
|
||||
func NewStaticResourceConsumer(name, nsName string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
|
||||
func NewStaticResourceConsumer(name, nsName string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
|
||||
return newResourceConsumer(name, nsName, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds,
|
||||
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, clientset, internalClientset)
|
||||
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, clientset, internalClientset, scaleClient, nil, nil)
|
||||
}
|
||||
|
||||
func NewMetricExporter(name, nsName string, podAnnotations, serviceAnnotations map[string]string, metricValue int, clientset clientset.Interface, internalClientset *internalclientset.Clientset, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
|
||||
return newResourceConsumer(name, nsName, KindDeployment, 1, 0, 0, metricValue, dynamicConsumptionTimeInSeconds,
|
||||
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, 100, 100, clientset, internalClientset, scaleClient, podAnnotations, serviceAnnotations)
|
||||
}
|
||||
|
||||
/*
|
||||
@ -123,9 +130,14 @@ memLimit argument is in megabytes, memLimit is a maximum amount of memory that c
|
||||
cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod
|
||||
*/
|
||||
func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
|
||||
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
|
||||
|
||||
runServiceAndWorkloadForResourceConsumer(clientset, internalClientset, nsName, name, kind, replicas, cpuLimit, memLimit)
|
||||
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset, scaleClient scaleclient.ScalesGetter, podAnnotations, serviceAnnotations map[string]string) *ResourceConsumer {
|
||||
if podAnnotations == nil {
|
||||
podAnnotations = make(map[string]string)
|
||||
}
|
||||
if serviceAnnotations == nil {
|
||||
serviceAnnotations = make(map[string]string)
|
||||
}
|
||||
runServiceAndWorkloadForResourceConsumer(clientset, internalClientset, nsName, name, kind, replicas, cpuLimit, memLimit, podAnnotations, serviceAnnotations)
|
||||
rc := &ResourceConsumer{
|
||||
name: name,
|
||||
controllerName: name + "-ctrl",
|
||||
@ -133,6 +145,7 @@ func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, repl
|
||||
nsName: nsName,
|
||||
clientSet: clientset,
|
||||
internalClientset: internalClientset,
|
||||
scaleClient: scaleClient,
|
||||
cpu: make(chan int),
|
||||
mem: make(chan int),
|
||||
customMetric: make(chan int),
|
||||
@ -224,7 +237,7 @@ func (rc *ResourceConsumer) makeConsumeCustomMetric() {
|
||||
delta := 0
|
||||
for {
|
||||
select {
|
||||
case delta := <-rc.customMetric:
|
||||
case delta = <-rc.customMetric:
|
||||
framework.Logf("RC %s: setting bump of metric %s to %d in total", rc.name, customMetricName, delta)
|
||||
case <-time.After(sleepTime):
|
||||
framework.Logf("RC %s: sending request to consume %d of custom metric %s", rc.name, delta, customMetricName)
|
||||
@ -401,17 +414,18 @@ func (rc *ResourceConsumer) CleanUp() {
|
||||
// Wait some time to ensure all child goroutines are finished.
|
||||
time.Sleep(10 * time.Second)
|
||||
kind := rc.kind.GroupKind()
|
||||
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, kind, rc.nsName, rc.name))
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, kind, rc.nsName, rc.name))
|
||||
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.name, nil))
|
||||
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, api.Kind("ReplicationController"), rc.nsName, rc.controllerName))
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, api.Kind("ReplicationController"), rc.nsName, rc.controllerName))
|
||||
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.controllerName, nil))
|
||||
}
|
||||
|
||||
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalClient internalclientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64) {
|
||||
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalClient internalclientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64, podAnnotations, serviceAnnotations map[string]string) {
|
||||
By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
|
||||
_, err := c.CoreV1().Services(ns).Create(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Name: name,
|
||||
Annotations: serviceAnnotations,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{
|
||||
@ -438,6 +452,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalCli
|
||||
CpuLimit: cpuLimitMillis,
|
||||
MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
|
||||
MemLimit: memLimitMb * 1024 * 1024,
|
||||
Annotations: podAnnotations,
|
||||
}
|
||||
|
||||
switch kind {
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/common/configmap.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/common/configmap.go
generated
vendored
@ -34,7 +34,7 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
|
||||
Description: Make sure config map value can be used as an environment
|
||||
variable in the container (on container.env field)
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable via environment variable ", func() {
|
||||
framework.ConformanceIt("should be consumable via environment variable [NodeConformance]", func() {
|
||||
name := "configmap-test-" + string(uuid.NewUUID())
|
||||
configMap := newConfigMap(f, name)
|
||||
By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
|
||||
@ -82,7 +82,7 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
|
||||
Description: Make sure config map value can be used as an source for
|
||||
environment variables in the container (on container.envFrom field)
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable via the environment ", func() {
|
||||
framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func() {
|
||||
name := "configmap-test-" + string(uuid.NewUUID())
|
||||
configMap := newEnvFromConfigMap(f, name)
|
||||
By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
|
||||
|
26
vendor/k8s.io/kubernetes/test/e2e/common/configmap_volume.go
generated
vendored
26
vendor/k8s.io/kubernetes/test/e2e/common/configmap_volume.go
generated
vendored
@ -37,7 +37,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
Description: Make sure config map without mappings works by mounting it
|
||||
to a volume with a custom path (mapping) on the pod with no other settings.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume ", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
|
||||
doConfigMapE2EWithoutMappings(f, 0, 0, nil)
|
||||
})
|
||||
|
||||
@ -46,12 +46,12 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
Description: Make sure config map without mappings works by mounting it
|
||||
to a volume with a custom path (mapping) on the pod with defaultMode set
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set ", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
|
||||
defaultMode := int32(0400)
|
||||
doConfigMapE2EWithoutMappings(f, 0, 0, &defaultMode)
|
||||
})
|
||||
|
||||
It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [Feature:FSGroup]", func() {
|
||||
It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeFeature:FSGroup]", func() {
|
||||
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
|
||||
doConfigMapE2EWithoutMappings(f, 1000, 1001, &defaultMode)
|
||||
})
|
||||
@ -61,11 +61,11 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
Description: Make sure config map without mappings works by mounting it
|
||||
to a volume with a custom path (mapping) on the pod as non-root.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume as non-root ", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() {
|
||||
doConfigMapE2EWithoutMappings(f, 1000, 0, nil)
|
||||
})
|
||||
|
||||
It("should be consumable from pods in volume as non-root with FSGroup [Feature:FSGroup]", func() {
|
||||
It("should be consumable from pods in volume as non-root with FSGroup [NodeFeature:FSGroup]", func() {
|
||||
doConfigMapE2EWithoutMappings(f, 1000, 1001, nil)
|
||||
})
|
||||
|
||||
@ -75,7 +75,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
a custom path (mapping) on the pod with no other settings and make sure
|
||||
the pod actually consumes it.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings ", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
|
||||
doConfigMapE2EWithMappings(f, 0, 0, nil)
|
||||
})
|
||||
|
||||
@ -84,7 +84,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
Description: Make sure config map works with an item mode (e.g. 0400)
|
||||
for the config map item.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [NodeConformance]", func() {
|
||||
mode := int32(0400)
|
||||
doConfigMapE2EWithMappings(f, 0, 0, &mode)
|
||||
})
|
||||
@ -93,11 +93,11 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
Testname: configmap-simple-user-mapped
|
||||
Description: Make sure config map works when it is mounted as non-root.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root ", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() {
|
||||
doConfigMapE2EWithMappings(f, 1000, 0, nil)
|
||||
})
|
||||
|
||||
It("should be consumable from pods in volume with mappings as non-root with FSGroup [Feature:FSGroup]", func() {
|
||||
It("should be consumable from pods in volume with mappings as non-root with FSGroup [NodeFeature:FSGroup]", func() {
|
||||
doConfigMapE2EWithMappings(f, 1000, 1001, nil)
|
||||
})
|
||||
|
||||
@ -106,7 +106,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
Description: Make sure update operation is working on config map and
|
||||
the result is observed on volumes mounted in containers.
|
||||
*/
|
||||
framework.ConformanceIt("updates should be reflected in volume ", func() {
|
||||
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
|
||||
@ -184,7 +184,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
|
||||
})
|
||||
|
||||
It("binary data should be reflected in volume ", func() {
|
||||
It("binary data should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
|
||||
@ -280,7 +280,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
Description: Make sure Create, Update, Delete operations are all working
|
||||
on config map and the result is observed on volumes mounted in containers.
|
||||
*/
|
||||
framework.ConformanceIt("optional updates should be reflected in volume ", func() {
|
||||
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
trueVal := true
|
||||
@ -463,7 +463,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
Description: Make sure config map works when it mounted as two different
|
||||
volumes on the same node.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable in multiple volumes in the same pod ", func() {
|
||||
framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() {
|
||||
var (
|
||||
name = "configmap-test-volume-" + string(uuid.NewUUID())
|
||||
volumeName = "configmap-volume"
|
||||
|
18
vendor/k8s.io/kubernetes/test/e2e/common/container_probe.go
generated
vendored
18
vendor/k8s.io/kubernetes/test/e2e/common/container_probe.go
generated
vendored
@ -37,7 +37,7 @@ const (
|
||||
probTestContainerName = "test-webserver"
|
||||
probTestInitialDelaySeconds = 15
|
||||
|
||||
defaultObservationTimeout = time.Minute * 2
|
||||
defaultObservationTimeout = time.Minute * 4
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Probing container", func() {
|
||||
@ -54,7 +54,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Description: Make sure that pod with readiness probe should not be
|
||||
ready before initial delay and never restart.
|
||||
*/
|
||||
framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart ", func() {
|
||||
framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart [NodeConformance]", func() {
|
||||
p := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil))
|
||||
f.WaitForPodReady(p.Name)
|
||||
|
||||
@ -86,7 +86,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Description: Make sure that pod with readiness probe that fails should
|
||||
never be ready and never restart.
|
||||
*/
|
||||
framework.ConformanceIt("with readiness probe that fails should never be ready and never restart ", func() {
|
||||
framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func() {
|
||||
p := podClient.Create(makePodSpec(probe.withFailing().build(), nil))
|
||||
Consistently(func() (bool, error) {
|
||||
p, err := podClient.Get(p.Name, metav1.GetOptions{})
|
||||
@ -111,7 +111,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Description: Make sure the pod is restarted with a cat /tmp/health
|
||||
liveness probe.
|
||||
*/
|
||||
framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe", func() {
|
||||
framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-exec",
|
||||
@ -143,7 +143,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Description: Make sure the pod is not restarted with a cat /tmp/health
|
||||
liveness probe.
|
||||
*/
|
||||
framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe", func() {
|
||||
framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-exec",
|
||||
@ -175,7 +175,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Description: Make sure when http liveness probe fails, the pod should
|
||||
be restarted.
|
||||
*/
|
||||
framework.ConformanceIt("should be restarted with a /healthz http liveness probe ", func() {
|
||||
framework.ConformanceIt("should be restarted with a /healthz http liveness probe [NodeConformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-http",
|
||||
@ -209,7 +209,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Description: Make sure when a pod gets restarted, its start count
|
||||
should increase.
|
||||
*/
|
||||
framework.ConformanceIt("should have monotonically increasing restart count [Slow]", func() {
|
||||
framework.ConformanceIt("should have monotonically increasing restart count [Slow][NodeConformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-http",
|
||||
@ -242,7 +242,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Description: Make sure when http liveness probe succeeds, the pod
|
||||
should not be restarted.
|
||||
*/
|
||||
framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe ", func() {
|
||||
framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe [NodeConformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-http",
|
||||
@ -276,7 +276,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Description: Make sure that the pod is restarted with a docker exec
|
||||
liveness probe with timeout.
|
||||
*/
|
||||
framework.ConformanceIt("should be restarted with a docker exec liveness probe with timeout ", func() {
|
||||
It("should be restarted with a docker exec liveness probe with timeout ", func() {
|
||||
// TODO: enable this test once the default exec handler supports timeout.
|
||||
framework.Skipf("The default exec handler, dockertools.NativeExecHandler, does not support timeouts due to a limitation in the Docker Remote API")
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e/common/docker_containers.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/common/docker_containers.go
generated
vendored
@ -33,7 +33,7 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
|
||||
provided for a Container, ensure that the docker image's default
|
||||
command and args are used.
|
||||
*/
|
||||
framework.ConformanceIt("should use the image defaults if command and args are blank ", func() {
|
||||
framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func() {
|
||||
f.TestContainerOutput("use defaults", entrypointTestPod(), 0, []string{
|
||||
"[/ep default arguments]",
|
||||
})
|
||||
@ -45,7 +45,7 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
|
||||
Container, ensure that they take precedent to the docker image's
|
||||
default arguments, but that the default command is used.
|
||||
*/
|
||||
framework.ConformanceIt("should be able to override the image's default arguments (docker cmd) ", func() {
|
||||
framework.ConformanceIt("should be able to override the image's default arguments (docker cmd) [NodeConformance]", func() {
|
||||
pod := entrypointTestPod()
|
||||
pod.Spec.Containers[0].Args = []string{"override", "arguments"}
|
||||
|
||||
@ -62,7 +62,7 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
|
||||
Container, ensure that it takes precedent to the docker image's default
|
||||
command.
|
||||
*/
|
||||
framework.ConformanceIt("should be able to override the image's default command (docker entrypoint) ", func() {
|
||||
framework.ConformanceIt("should be able to override the image's default command (docker entrypoint) [NodeConformance]", func() {
|
||||
pod := entrypointTestPod()
|
||||
pod.Spec.Containers[0].Command = []string{"/ep-2"}
|
||||
|
||||
@ -77,7 +77,7 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
|
||||
provided for a Container, ensure that they take precedent to the docker
|
||||
image's default command and arguments.
|
||||
*/
|
||||
framework.ConformanceIt("should be able to override the image's default command and arguments ", func() {
|
||||
framework.ConformanceIt("should be able to override the image's default command and arguments [NodeConformance]", func() {
|
||||
pod := entrypointTestPod()
|
||||
pod.Spec.Containers[0].Command = []string{"/ep-2"}
|
||||
pod.Spec.Containers[0].Args = []string{"override", "arguments"}
|
||||
|
12
vendor/k8s.io/kubernetes/test/e2e/common/downward_api.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/e2e/common/downward_api.go
generated
vendored
@ -42,7 +42,7 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
|
||||
Description: Ensure that downward API can provide pod's name, namespace
|
||||
and IP address as environment variables.
|
||||
*/
|
||||
framework.ConformanceIt("should provide pod name, namespace and IP address as env vars ", func() {
|
||||
framework.ConformanceIt("should provide pod name, namespace and IP address as env vars [NodeConformance]", func() {
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
@ -88,7 +88,7 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
|
||||
Description: Ensure that downward API can provide an IP address for
|
||||
host node as an environment variable.
|
||||
*/
|
||||
framework.ConformanceIt("should provide host IP as an env var ", func() {
|
||||
framework.ConformanceIt("should provide host IP as an env var [NodeConformance]", func() {
|
||||
framework.SkipUnlessServerVersionGTE(hostIPVersion, f.ClientSet.Discovery())
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
env := []v1.EnvVar{
|
||||
@ -115,7 +115,7 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
|
||||
Description: Ensure that downward API can provide CPU/memory limit
|
||||
and CPU/memory request as environment variables.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's limits.cpu/memory and requests.cpu/memory as env vars ", func() {
|
||||
framework.ConformanceIt("should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance]", func() {
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
@ -167,7 +167,7 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
|
||||
allocatable values for CPU and memory as environment variables if CPU
|
||||
and memory limits are not specified for a container.
|
||||
*/
|
||||
framework.ConformanceIt("should provide default limits.cpu/memory from node allocatable ", func() {
|
||||
framework.ConformanceIt("should provide default limits.cpu/memory from node allocatable [NodeConformance]", func() {
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
@ -217,7 +217,7 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
|
||||
Description: Ensure that downward API can provide pod UID as an
|
||||
environment variable.
|
||||
*/
|
||||
framework.ConformanceIt("should provide pod UID as env vars ", func() {
|
||||
framework.ConformanceIt("should provide pod UID as env vars [NodeConformance]", func() {
|
||||
framework.SkipUnlessServerVersionGTE(podUIDVersion, f.ClientSet.Discovery())
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
env := []v1.EnvVar{
|
||||
@ -240,7 +240,7 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = framework.KubeDescribe("Downward API [Serial] [Disruptive]", func() {
|
||||
var _ = framework.KubeDescribe("Downward API [Serial] [Disruptive] [NodeFeature:EphemeralStorage]", func() {
|
||||
f := framework.NewDefaultFramework("downward-api")
|
||||
|
||||
Context("Downward API tests for local ephemeral storage", func() {
|
||||
|
26
vendor/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go
generated
vendored
26
vendor/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go
generated
vendored
@ -44,7 +44,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
Description: Ensure that downward API can provide pod's name through
|
||||
DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should provide podname only ", func() {
|
||||
framework.ConformanceIt("should provide podname only [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
|
||||
|
||||
@ -58,7 +58,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
Description: Ensure that downward API can set default file permission
|
||||
mode for DownwardAPIVolumeFiles if no mode is specified.
|
||||
*/
|
||||
framework.ConformanceIt("should set DefaultMode on files ", func() {
|
||||
framework.ConformanceIt("should set DefaultMode on files [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
defaultMode := int32(0400)
|
||||
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode)
|
||||
@ -73,7 +73,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
Description: Ensure that downward API can set file permission mode for
|
||||
DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should set mode on item file ", func() {
|
||||
framework.ConformanceIt("should set mode on item file [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
mode := int32(0400)
|
||||
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
|
||||
@ -83,7 +83,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
})
|
||||
})
|
||||
|
||||
It("should provide podname as non-root with fsgroup [Feature:FSGroup]", func() {
|
||||
It("should provide podname as non-root with fsgroup [NodeFeature:FSGroup]", func() {
|
||||
podName := "metadata-volume-" + string(uuid.NewUUID())
|
||||
uid := int64(1001)
|
||||
gid := int64(1234)
|
||||
@ -97,7 +97,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
})
|
||||
})
|
||||
|
||||
It("should provide podname as non-root with fsgroup and defaultMode [Feature:FSGroup]", func() {
|
||||
It("should provide podname as non-root with fsgroup and defaultMode [NodeFeature:FSGroup]", func() {
|
||||
podName := "metadata-volume-" + string(uuid.NewUUID())
|
||||
uid := int64(1001)
|
||||
gid := int64(1234)
|
||||
@ -117,7 +117,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
Description: Ensure that downward API updates labels in
|
||||
DownwardAPIVolumeFiles when pod's labels get modified.
|
||||
*/
|
||||
framework.ConformanceIt("should update labels on modification ", func() {
|
||||
framework.ConformanceIt("should update labels on modification [NodeConformance]", func() {
|
||||
labels := map[string]string{}
|
||||
labels["key1"] = "value1"
|
||||
labels["key2"] = "value2"
|
||||
@ -149,7 +149,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
Description: Ensure that downward API updates annotations in
|
||||
DownwardAPIVolumeFiles when pod's annotations get modified.
|
||||
*/
|
||||
framework.ConformanceIt("should update annotations on modification ", func() {
|
||||
framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() {
|
||||
annotations := map[string]string{}
|
||||
annotations["builder"] = "bar"
|
||||
podName := "annotationupdate" + string(uuid.NewUUID())
|
||||
@ -183,7 +183,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
Description: Ensure that downward API can provide container's CPU limit
|
||||
through DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu limit ", func() {
|
||||
framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit")
|
||||
|
||||
@ -197,7 +197,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
Description: Ensure that downward API can provide container's memory
|
||||
limit through DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory limit ", func() {
|
||||
framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit")
|
||||
|
||||
@ -211,7 +211,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
Description: Ensure that downward API can provide container's CPU
|
||||
request through DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu request ", func() {
|
||||
framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request")
|
||||
|
||||
@ -225,7 +225,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
Description: Ensure that downward API can provide container's memory
|
||||
request through DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory request ", func() {
|
||||
framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request")
|
||||
|
||||
@ -240,7 +240,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
allocatable value for CPU through DownwardAPIVolumeFiles if CPU
|
||||
limit is not specified for a container.
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set ", func() {
|
||||
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit")
|
||||
|
||||
@ -253,7 +253,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
allocatable value for memory through DownwardAPIVolumeFiles if memory
|
||||
limit is not specified for a container.
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set ", func() {
|
||||
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit")
|
||||
|
||||
|
35
vendor/k8s.io/kubernetes/test/e2e/common/empty_dir.go
generated
vendored
35
vendor/k8s.io/kubernetes/test/e2e/common/empty_dir.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -41,7 +40,7 @@ var (
|
||||
var _ = Describe("[sig-storage] EmptyDir volumes", func() {
|
||||
f := framework.NewDefaultFramework("emptydir")
|
||||
|
||||
Context("when FSGroup is specified [Feature:FSGroup]", func() {
|
||||
Context("when FSGroup is specified [NodeFeature:FSGroup]", func() {
|
||||
It("new files should be created with FSGroup ownership when container is root", func() {
|
||||
doTestSetgidFSGroup(f, testImageRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
@ -73,7 +72,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
|
||||
of 'Memory', ensure the volume has 0777 unix file permissions and tmpfs
|
||||
mount type.
|
||||
*/
|
||||
framework.ConformanceIt("volume on tmpfs should have the correct mode", func() {
|
||||
framework.ConformanceIt("volume on tmpfs should have the correct mode [NodeConformance]", func() {
|
||||
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
@ -83,7 +82,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
|
||||
of 'Memory', ensure a root owned file with 0644 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0644,tmpfs)", func() {
|
||||
framework.ConformanceIt("should support (root,0644,tmpfs) [NodeConformance]", func() {
|
||||
doTest0644(f, testImageRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
@ -93,7 +92,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
|
||||
of 'Memory', ensure a root owned file with 0666 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0666,tmpfs)", func() {
|
||||
framework.ConformanceIt("should support (root,0666,tmpfs) [NodeConformance]", func() {
|
||||
doTest0666(f, testImageRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
@ -103,7 +102,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
|
||||
of 'Memory', ensure a root owned file with 0777 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0777,tmpfs)", func() {
|
||||
framework.ConformanceIt("should support (root,0777,tmpfs) [NodeConformance]", func() {
|
||||
doTest0777(f, testImageRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
@ -113,7 +112,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
|
||||
of 'Memory', ensure a user owned file with 0644 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0644,tmpfs)", func() {
|
||||
framework.ConformanceIt("should support (non-root,0644,tmpfs) [NodeConformance]", func() {
|
||||
doTest0644(f, testImageNonRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
@ -123,7 +122,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
|
||||
of 'Memory', ensure a user owned file with 0666 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0666,tmpfs)", func() {
|
||||
framework.ConformanceIt("should support (non-root,0666,tmpfs) [NodeConformance]", func() {
|
||||
doTest0666(f, testImageNonRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
@ -133,7 +132,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
|
||||
of 'Memory', ensure a user owned file with 0777 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0777,tmpfs)", func() {
|
||||
framework.ConformanceIt("should support (non-root,0777,tmpfs) [NodeConformance]", func() {
|
||||
doTest0777(f, testImageNonRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
@ -142,7 +141,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
|
||||
Description: For a Pod created with an 'emptyDir' Volume, ensure the
|
||||
volume has 0777 unix file permissions.
|
||||
*/
|
||||
framework.ConformanceIt("volume on default medium should have the correct mode", func() {
|
||||
framework.ConformanceIt("volume on default medium should have the correct mode [NodeConformance]", func() {
|
||||
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
@ -152,7 +151,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
|
||||
root owned file with 0644 unix file permissions is created and enforced
|
||||
correctly.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0644,default)", func() {
|
||||
framework.ConformanceIt("should support (root,0644,default) [NodeConformance]", func() {
|
||||
doTest0644(f, testImageRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
@ -162,7 +161,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
|
||||
root owned file with 0666 unix file permissions is created and enforced
|
||||
correctly.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0666,default)", func() {
|
||||
framework.ConformanceIt("should support (root,0666,default) [NodeConformance]", func() {
|
||||
doTest0666(f, testImageRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
@ -172,7 +171,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
|
||||
root owned file with 0777 unix file permissions is created and enforced
|
||||
correctly.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0777,default)", func() {
|
||||
framework.ConformanceIt("should support (root,0777,default) [NodeConformance]", func() {
|
||||
doTest0777(f, testImageRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
@ -182,7 +181,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
|
||||
user owned file with 0644 unix file permissions is created and enforced
|
||||
correctly.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0644,default)", func() {
|
||||
framework.ConformanceIt("should support (non-root,0644,default) [NodeConformance]", func() {
|
||||
doTest0644(f, testImageNonRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
@ -192,7 +191,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
|
||||
user owned file with 0666 unix file permissions is created and enforced
|
||||
correctly.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0666,default)", func() {
|
||||
framework.ConformanceIt("should support (non-root,0666,default) [NodeConformance]", func() {
|
||||
doTest0666(f, testImageNonRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
@ -202,7 +201,7 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
|
||||
user owned file with 0777 unix file permissions is created and enforced
|
||||
correctly.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0777,default)", func() {
|
||||
framework.ConformanceIt("should support (non-root,0777,default) [NodeConformance]", func() {
|
||||
doTest0777(f, testImageNonRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
})
|
||||
@ -252,6 +251,7 @@ func doTestSubPathFSGroup(f *framework.Framework, image string, medium v1.Storag
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--file_perm=%v", volumePath),
|
||||
fmt.Sprintf("--file_owner=%v", volumePath),
|
||||
fmt.Sprintf("--file_mode=%v", volumePath),
|
||||
}
|
||||
|
||||
pod.Spec.Containers[0].VolumeMounts[0].SubPath = subPath
|
||||
@ -264,6 +264,7 @@ func doTestSubPathFSGroup(f *framework.Framework, image string, medium v1.Storag
|
||||
"perms of file \"/test-volume\": -rwxrwxrwx",
|
||||
"owner UID of \"/test-volume\": 0",
|
||||
"owner GID of \"/test-volume\": 123",
|
||||
"mode of file \"/test-volume\": dgtrwxrwxrwx",
|
||||
}
|
||||
if medium == v1.StorageMediumMemory {
|
||||
out = append(out, "mount type of \"/test-volume\": tmpfs")
|
||||
@ -428,7 +429,7 @@ func testPodWithVolume(image, path string, source *v1.EmptyDirVolumeSource) *v1.
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
|
180
vendor/k8s.io/kubernetes/test/e2e/common/expansion.go
generated
vendored
180
vendor/k8s.io/kubernetes/test/e2e/common/expansion.go
generated
vendored
@ -21,6 +21,9 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// These tests exercise the Kubernetes expansion syntax $(VAR).
|
||||
@ -34,7 +37,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
Description: Make sure environment variables can be set using an
|
||||
expansion of previously defined environment variables
|
||||
*/
|
||||
framework.ConformanceIt("should allow composing env vars into new env vars ", func() {
|
||||
framework.ConformanceIt("should allow composing env vars into new env vars [NodeConformance]", func() {
|
||||
podName := "var-expansion-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -79,7 +82,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
Description: Make sure a container's commands can be set using an
|
||||
expansion of environment variables.
|
||||
*/
|
||||
framework.ConformanceIt("should allow substituting values in a container's command ", func() {
|
||||
framework.ConformanceIt("should allow substituting values in a container's command [NodeConformance]", func() {
|
||||
podName := "var-expansion-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -114,7 +117,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
Description: Make sure a container's args can be set using an
|
||||
expansion of environment variables.
|
||||
*/
|
||||
framework.ConformanceIt("should allow substituting values in a container's args ", func() {
|
||||
framework.ConformanceIt("should allow substituting values in a container's args [NodeConformance]", func() {
|
||||
podName := "var-expansion-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -144,4 +147,175 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
"test-value",
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: var-expansion-subpath
|
||||
Description: Make sure a container's subpath can be set using an
|
||||
expansion of environment variables.
|
||||
*/
|
||||
It("should allow substituting values in a volume subpath [Feature:VolumeSubpathEnvExpansion][NodeAlphaFeature:VolumeSubpathEnvExpansion]", func() {
|
||||
podName := "var-expansion-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"sh", "-c", "test -d /testcontainer/" + podName + ";echo $?"},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
Value: podName,
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "workdir1",
|
||||
MountPath: "/logscontainer",
|
||||
SubPath: "$(POD_NAME)",
|
||||
},
|
||||
{
|
||||
Name: "workdir2",
|
||||
MountPath: "/testcontainer",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "workdir1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "/tmp"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "workdir2",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "/tmp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
f.TestContainerOutput("substitution in volume subpath", pod, 0, []string{
|
||||
"0",
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: var-expansion-subpath-with-backticks
|
||||
Description: Make sure a container's subpath can not be set using an
|
||||
expansion of environment variables when backticks are supplied.
|
||||
*/
|
||||
It("should fail substituting values in a volume subpath with backticks [Feature:VolumeSubpathEnvExpansion][NodeAlphaFeature:VolumeSubpathEnvExpansion][Slow]", func() {
|
||||
|
||||
podName := "var-expansion-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
Value: "..",
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "workdir1",
|
||||
MountPath: "/logscontainer",
|
||||
SubPath: "$(POD_NAME)",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "workdir1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Pod should fail
|
||||
testPodFailSubpath(f, pod)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: var-expansion-subpath-with-absolute-path
|
||||
Description: Make sure a container's subpath can not be set using an
|
||||
expansion of environment variables when absolute path is supplied.
|
||||
*/
|
||||
It("should fail substituting values in a volume subpath with absolute path [Feature:VolumeSubpathEnvExpansion][NodeAlphaFeature:VolumeSubpathEnvExpansion][Slow]", func() {
|
||||
|
||||
podName := "var-expansion-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
Value: "/tmp",
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "workdir1",
|
||||
MountPath: "/logscontainer",
|
||||
SubPath: "$(POD_NAME)",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "workdir1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Pod should fail
|
||||
testPodFailSubpath(f, pod)
|
||||
})
|
||||
})
|
||||
|
||||
func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) {
|
||||
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).ToNot(HaveOccurred(), "while creating pod")
|
||||
|
||||
defer func() {
|
||||
framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
}()
|
||||
|
||||
err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
Expect(err).To(HaveOccurred(), "while waiting for pod to be running")
|
||||
}
|
||||
|
9
vendor/k8s.io/kubernetes/test/e2e/common/host_path.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/common/host_path.go
generated
vendored
@ -23,7 +23,6 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -46,7 +45,7 @@ var _ = Describe("[sig-storage] HostPath", func() {
|
||||
volume is a directory with 0777 unix file permissions and that is has
|
||||
the sticky bit (mode flag t) set.
|
||||
*/
|
||||
framework.ConformanceIt("should give a volume the correct mode", func() {
|
||||
framework.ConformanceIt("should give a volume the correct mode [NodeConformance]", func() {
|
||||
source := &v1.HostPathVolumeSource{
|
||||
Path: "/tmp",
|
||||
}
|
||||
@ -62,7 +61,7 @@ var _ = Describe("[sig-storage] HostPath", func() {
|
||||
})
|
||||
|
||||
// This test requires mounting a folder into a container with write privileges.
|
||||
It("should support r/w", func() {
|
||||
It("should support r/w [NodeConformance]", func() {
|
||||
filePath := path.Join(volumePath, "test-file")
|
||||
retryDuration := 180
|
||||
source := &v1.HostPathVolumeSource{
|
||||
@ -86,7 +85,7 @@ var _ = Describe("[sig-storage] HostPath", func() {
|
||||
})
|
||||
})
|
||||
|
||||
It("should support subPath", func() {
|
||||
It("should support subPath [NodeConformance]", func() {
|
||||
subPath := "sub-path"
|
||||
fileName := "test-file"
|
||||
retryDuration := 180
|
||||
@ -228,7 +227,7 @@ func testPodWithHostVol(path string, source *v1.HostPathVolumeSource) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
|
15
vendor/k8s.io/kubernetes/test/e2e/common/init_container.go
generated
vendored
15
vendor/k8s.io/kubernetes/test/e2e/common/init_container.go
generated
vendored
@ -29,12 +29,13 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/client/conditions"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("InitContainer", func() {
|
||||
var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
f := framework.NewDefaultFramework("init-container")
|
||||
var podClient *framework.PodClient
|
||||
BeforeEach(func() {
|
||||
@ -42,8 +43,6 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
||||
})
|
||||
|
||||
It("should invoke init containers on a RestartNever pod", func() {
|
||||
framework.SkipIfContainerRuntimeIs("rkt") // #25988
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
@ -101,8 +100,6 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
||||
})
|
||||
|
||||
It("should invoke init containers on a RestartAlways pod", func() {
|
||||
framework.SkipIfContainerRuntimeIs("rkt") // #25988
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
@ -130,7 +127,7 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "run1",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
@ -164,8 +161,6 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
||||
})
|
||||
|
||||
It("should not start app containers if init containers fail on a RestartAlways pod", func() {
|
||||
framework.SkipIfContainerRuntimeIs("rkt") // #25988
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
@ -194,7 +189,7 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "run1",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
@ -274,8 +269,6 @@ var _ = framework.KubeDescribe("InitContainer", func() {
|
||||
})
|
||||
|
||||
It("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() {
|
||||
framework.SkipIfContainerRuntimeIs("rkt") // #25988
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
|
73
vendor/k8s.io/kubernetes/test/e2e/common/kubelet_etc_hosts.go
generated
vendored
73
vendor/k8s.io/kubernetes/test/e2e/common/kubelet_etc_hosts.go
generated
vendored
@ -32,6 +32,8 @@ const (
|
||||
etcHostsPodName = "test-pod"
|
||||
etcHostsHostNetworkPodName = "test-host-network-pod"
|
||||
etcHostsPartialContent = "# Kubernetes-managed hosts file."
|
||||
etcHostsPath = "/etc/hosts"
|
||||
etcHostsOriginalPath = "/etc/hosts-original"
|
||||
)
|
||||
|
||||
var etcHostsImageName = imageutils.GetE2EImage(imageutils.Netexec)
|
||||
@ -53,7 +55,7 @@ var _ = framework.KubeDescribe("KubeletManagedEtcHosts", func() {
|
||||
Description: Make sure Kubelet correctly manages /etc/hosts and mounts
|
||||
it into the container.
|
||||
*/
|
||||
framework.ConformanceIt("should test kubelet managed /etc/hosts file ", func() {
|
||||
framework.ConformanceIt("should test kubelet managed /etc/hosts file [NodeConformance]", func() {
|
||||
By("Setting up the test")
|
||||
config.setup()
|
||||
|
||||
@ -106,16 +108,24 @@ func assertManagedStatus(
|
||||
etcHostsContent := ""
|
||||
|
||||
for startTime := time.Now(); time.Since(startTime) < retryTimeout; {
|
||||
etcHostsContent = config.getEtcHostsContent(podName, name)
|
||||
isManaged := strings.Contains(etcHostsContent, etcHostsPartialContent)
|
||||
etcHostsContent = config.getFileContents(podName, name, etcHostsPath)
|
||||
etcHostsOriginalContent := config.getFileContents(podName, name, etcHostsOriginalPath)
|
||||
|
||||
if expectedIsManaged == isManaged {
|
||||
return
|
||||
// Make sure there is some content in both files
|
||||
if len(etcHostsContent) > 0 && len(etcHostsOriginalContent) > 0 {
|
||||
// if the files match, kubernetes did not touch the file at all
|
||||
// if the file has the header, kubernetes is not using host network
|
||||
// and is constructing the file based on Pod IP
|
||||
isManaged := strings.HasPrefix(etcHostsContent, etcHostsPartialContent) &&
|
||||
etcHostsContent != etcHostsOriginalContent
|
||||
if expectedIsManaged == isManaged {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
glog.Warningf(
|
||||
"For pod: %s, name: %s, expected %t, actual %t (/etc/hosts was %q), retryCount: %d",
|
||||
podName, name, expectedIsManaged, isManaged, etcHostsContent, retryCount)
|
||||
"For pod: %s, name: %s, expected %t, (/etc/hosts was %q), (/etc/hosts-original was %q), retryCount: %d",
|
||||
podName, name, expectedIsManaged, etcHostsContent, etcHostsOriginalContent, retryCount)
|
||||
|
||||
retryCount++
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
@ -132,8 +142,8 @@ func assertManagedStatus(
|
||||
}
|
||||
}
|
||||
|
||||
func (config *KubeletManagedHostConfig) getEtcHostsContent(podName, containerName string) string {
|
||||
return config.f.ExecCommandInContainer(podName, containerName, "cat", "/etc/hosts")
|
||||
func (config *KubeletManagedHostConfig) getFileContents(podName, containerName, path string) string {
|
||||
return config.f.ExecCommandInContainer(podName, containerName, "cat", path)
|
||||
}
|
||||
|
||||
func (config *KubeletManagedHostConfig) createPodSpec(podName string) *v1.Pod {
|
||||
@ -153,6 +163,12 @@ func (config *KubeletManagedHostConfig) createPodSpec(podName string) *v1.Pod {
|
||||
"sleep",
|
||||
"900",
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "host-etc-hosts",
|
||||
MountPath: etcHostsOriginalPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "busybox-2",
|
||||
@ -162,6 +178,12 @@ func (config *KubeletManagedHostConfig) createPodSpec(podName string) *v1.Pod {
|
||||
"sleep",
|
||||
"900",
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "host-etc-hosts",
|
||||
MountPath: etcHostsOriginalPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "busybox-3",
|
||||
@ -174,7 +196,11 @@ func (config *KubeletManagedHostConfig) createPodSpec(podName string) *v1.Pod {
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "host-etc-hosts",
|
||||
MountPath: "/etc/hosts",
|
||||
MountPath: etcHostsPath,
|
||||
},
|
||||
{
|
||||
Name: "host-etc-hosts",
|
||||
MountPath: etcHostsOriginalPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -184,7 +210,7 @@ func (config *KubeletManagedHostConfig) createPodSpec(podName string) *v1.Pod {
|
||||
Name: "host-etc-hosts",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: "/etc/hosts",
|
||||
Path: etcHostsPath,
|
||||
Type: hostPathType,
|
||||
},
|
||||
},
|
||||
@ -196,6 +222,8 @@ func (config *KubeletManagedHostConfig) createPodSpec(podName string) *v1.Pod {
|
||||
}
|
||||
|
||||
func (config *KubeletManagedHostConfig) createPodSpecWithHostNetwork(podName string) *v1.Pod {
|
||||
hostPathType := new(v1.HostPathType)
|
||||
*hostPathType = v1.HostPathType(string(v1.HostPathFileOrCreate))
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
@ -212,6 +240,12 @@ func (config *KubeletManagedHostConfig) createPodSpecWithHostNetwork(podName str
|
||||
"sleep",
|
||||
"900",
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "host-etc-hosts",
|
||||
MountPath: etcHostsOriginalPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "busybox-2",
|
||||
@ -221,6 +255,23 @@ func (config *KubeletManagedHostConfig) createPodSpecWithHostNetwork(podName str
|
||||
"sleep",
|
||||
"900",
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "host-etc-hosts",
|
||||
MountPath: etcHostsOriginalPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "host-etc-hosts",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: etcHostsPath,
|
||||
Type: hostPathType,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e/common/networking.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/common/networking.go
generated
vendored
@ -35,7 +35,7 @@ var _ = Describe("[sig-network] Networking", func() {
|
||||
Description: Try to hit test endpoints from a test container and make
|
||||
sure each of them can report a unique hostname.
|
||||
*/
|
||||
framework.ConformanceIt("should function for intra-pod communication: http ", func() {
|
||||
framework.ConformanceIt("should function for intra-pod communication: http [NodeConformance]", func() {
|
||||
config := framework.NewCoreNetworkingTestConfig(f)
|
||||
for _, endpointPod := range config.EndpointPods {
|
||||
config.DialFromTestContainer("http", endpointPod.Status.PodIP, framework.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
|
||||
@ -47,7 +47,7 @@ var _ = Describe("[sig-network] Networking", func() {
|
||||
Description: Try to hit test endpoints from a test container using udp
|
||||
and make sure each of them can report a unique hostname.
|
||||
*/
|
||||
framework.ConformanceIt("should function for intra-pod communication: udp ", func() {
|
||||
framework.ConformanceIt("should function for intra-pod communication: udp [NodeConformance]", func() {
|
||||
config := framework.NewCoreNetworkingTestConfig(f)
|
||||
for _, endpointPod := range config.EndpointPods {
|
||||
config.DialFromTestContainer("udp", endpointPod.Status.PodIP, framework.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
|
||||
@ -59,7 +59,7 @@ var _ = Describe("[sig-network] Networking", func() {
|
||||
Description: Try to hit test endpoints from the pod and make sure each
|
||||
of them can report a unique hostname.
|
||||
*/
|
||||
framework.ConformanceIt("should function for node-pod communication: http ", func() {
|
||||
framework.ConformanceIt("should function for node-pod communication: http [NodeConformance]", func() {
|
||||
config := framework.NewCoreNetworkingTestConfig(f)
|
||||
for _, endpointPod := range config.EndpointPods {
|
||||
config.DialFromNode("http", endpointPod.Status.PodIP, framework.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
|
||||
@ -71,7 +71,7 @@ var _ = Describe("[sig-network] Networking", func() {
|
||||
Description: Try to hit test endpoints from the pod using udp and make sure
|
||||
each of them can report a unique hostname.
|
||||
*/
|
||||
framework.ConformanceIt("should function for node-pod communication: udp ", func() {
|
||||
framework.ConformanceIt("should function for node-pod communication: udp [NodeConformance]", func() {
|
||||
config := framework.NewCoreNetworkingTestConfig(f)
|
||||
for _, endpointPod := range config.EndpointPods {
|
||||
config.DialFromNode("udp", endpointPod.Status.PodIP, framework.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
|
||||
|
28
vendor/k8s.io/kubernetes/test/e2e/common/pods.go
generated
vendored
28
vendor/k8s.io/kubernetes/test/e2e/common/pods.go
generated
vendored
@ -133,7 +133,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
Description: Make sure when a pod is created that it is assigned a host IP
|
||||
Address.
|
||||
*/
|
||||
framework.ConformanceIt("should get a host IP ", func() {
|
||||
framework.ConformanceIt("should get a host IP [NodeConformance]", func() {
|
||||
name := "pod-hostip-" + string(uuid.NewUUID())
|
||||
testHostIP(podClient, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -143,7 +143,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -155,7 +155,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
Description: Makes sure a pod is created, a watch can be setup for the pod,
|
||||
pod creation was observed, pod is deleted, and pod deletion is observed.
|
||||
*/
|
||||
framework.ConformanceIt("should be submitted and removed ", func() {
|
||||
framework.ConformanceIt("should be submitted and removed [NodeConformance]", func() {
|
||||
By("creating the pod")
|
||||
name := "pod-submit-remove-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
@ -280,7 +280,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
Testname: pods-updated-successfully
|
||||
Description: Make sure it is possible to successfully update a pod's labels.
|
||||
*/
|
||||
framework.ConformanceIt("should be updated ", func() {
|
||||
framework.ConformanceIt("should be updated [NodeConformance]", func() {
|
||||
By("creating the pod")
|
||||
name := "pod-update-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
@ -335,7 +335,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
activeDeadlineSecondsValue, and then waits for the deadline to pass
|
||||
and verifies the pod is terminated.
|
||||
*/
|
||||
framework.ConformanceIt("should allow activeDeadlineSeconds to be updated ", func() {
|
||||
framework.ConformanceIt("should allow activeDeadlineSeconds to be updated [NodeConformance]", func() {
|
||||
By("creating the pod")
|
||||
name := "pod-update-activedeadlineseconds-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
@ -381,7 +381,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
Description: Make sure that when a pod is created it contains environment
|
||||
variables for each active service.
|
||||
*/
|
||||
framework.ConformanceIt("should contain environment variables for services ", func() {
|
||||
framework.ConformanceIt("should contain environment variables for services [NodeConformance]", func() {
|
||||
// Make a pod that will be a service.
|
||||
// This pod serves its hostname via HTTP.
|
||||
serverName := "server-envvars-" + string(uuid.NewUUID())
|
||||
@ -467,7 +467,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
}, maxRetries, "Container should have service environment variables set")
|
||||
})
|
||||
|
||||
It("should support remote command execution over websockets", func() {
|
||||
It("should support remote command execution over websockets [NodeConformance]", func() {
|
||||
config, err := framework.LoadConfig()
|
||||
Expect(err).NotTo(HaveOccurred(), "unable to get base config")
|
||||
|
||||
@ -523,7 +523,13 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
continue
|
||||
}
|
||||
if msg[0] != 1 {
|
||||
framework.Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg)
|
||||
if len(msg) == 1 {
|
||||
// skip an empty message on stream other than stdout
|
||||
continue
|
||||
} else {
|
||||
framework.Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg)
|
||||
}
|
||||
|
||||
}
|
||||
buf.Write(msg[1:])
|
||||
}
|
||||
@ -537,7 +543,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
}, time.Minute, 10*time.Second).Should(BeNil())
|
||||
})
|
||||
|
||||
It("should support retrieving logs from the container over websockets", func() {
|
||||
It("should support retrieving logs from the container over websockets [NodeConformance]", func() {
|
||||
config, err := framework.LoadConfig()
|
||||
Expect(err).NotTo(HaveOccurred(), "unable to get base config")
|
||||
|
||||
@ -594,7 +600,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("should have their auto-restart back-off timer reset on image update [Slow]", func() {
|
||||
It("should have their auto-restart back-off timer reset on image update [Slow][NodeConformance]", func() {
|
||||
podName := "pod-back-off-image"
|
||||
containerName := "back-off"
|
||||
pod := &v1.Pod{
|
||||
@ -635,7 +641,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
})
|
||||
|
||||
// Slow issue #19027 (20 mins)
|
||||
It("should cap back-off at MaxContainerBackOff [Slow]", func() {
|
||||
It("should cap back-off at MaxContainerBackOff [Slow][NodeConformance]", func() {
|
||||
podName := "back-off-cap"
|
||||
containerName := "back-off-cap"
|
||||
pod := &v1.Pod{
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/common/privileged.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/common/privileged.go
generated
vendored
@ -36,7 +36,7 @@ type PrivilegedPodTestConfig struct {
|
||||
pod *v1.Pod
|
||||
}
|
||||
|
||||
var _ = framework.KubeDescribe("PrivilegedPod", func() {
|
||||
var _ = framework.KubeDescribe("PrivilegedPod [NodeConformance]", func() {
|
||||
config := &PrivilegedPodTestConfig{
|
||||
f: framework.NewDefaultFramework("e2e-privileged-pod"),
|
||||
privilegedPod: "privileged-pod",
|
||||
|
68
vendor/k8s.io/kubernetes/test/e2e/common/projected.go
generated
vendored
68
vendor/k8s.io/kubernetes/test/e2e/common/projected.go
generated
vendored
@ -39,7 +39,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Testname: projected-secret-no-defaultMode
|
||||
Description: Simple projected Secret test with no defaultMode set.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
|
||||
doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil)
|
||||
})
|
||||
|
||||
@ -47,7 +47,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Testname: projected-secret-with-defaultMode
|
||||
Description: Simple projected Secret test with defaultMode set.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
|
||||
defaultMode := int32(0400)
|
||||
doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil)
|
||||
})
|
||||
@ -57,7 +57,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Description: Simple projected Secret test as non-root with
|
||||
defaultMode and fsGroup set.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance]", func() {
|
||||
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
|
||||
fsGroup := int64(1001)
|
||||
uid := int64(1000)
|
||||
@ -70,7 +70,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
mounting it to a volume with a custom path (mapping) on the pod with
|
||||
no other settings and make sure the pod actually consumes it.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
|
||||
doProjectedSecretE2EWithMapping(f, nil)
|
||||
})
|
||||
|
||||
@ -79,12 +79,12 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Description: Repeat the projected-secret-simple-mapped but this time
|
||||
with an item mode (e.g. 0400) for the secret map item.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [NodeConformance]", func() {
|
||||
mode := int32(0400)
|
||||
doProjectedSecretE2EWithMapping(f, &mode)
|
||||
})
|
||||
|
||||
It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace", func() {
|
||||
It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() {
|
||||
var (
|
||||
namespace2 *v1.Namespace
|
||||
err error
|
||||
@ -110,7 +110,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Description: Make sure secrets works when mounted as two different
|
||||
volumes on the same node.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable in multiple volumes in a pod", func() {
|
||||
framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func() {
|
||||
// This test ensures that the same secret can be mounted in multiple
|
||||
// volumes in the same pod. This test case exists to prevent
|
||||
// regressions that break this use-case.
|
||||
@ -203,7 +203,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Testname: projected-secret-simple-optional
|
||||
Description: Make sure secrets works when optional updates included.
|
||||
*/
|
||||
framework.ConformanceIt("optional updates should be reflected in volume", func() {
|
||||
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
trueVal := true
|
||||
@ -405,7 +405,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Description: Make sure that a projected volume with a configMap with
|
||||
no mappings succeeds properly.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
|
||||
doProjectedConfigMapE2EWithoutMappings(f, 0, 0, nil)
|
||||
})
|
||||
|
||||
@ -414,12 +414,12 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Description: Make sure that a projected volume configMap is consumable
|
||||
with defaultMode set.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
|
||||
defaultMode := int32(0400)
|
||||
doProjectedConfigMapE2EWithoutMappings(f, 0, 0, &defaultMode)
|
||||
})
|
||||
|
||||
It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [Feature:FSGroup]", func() {
|
||||
It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeFeature:FSGroup]", func() {
|
||||
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
|
||||
doProjectedConfigMapE2EWithoutMappings(f, 1000, 1001, &defaultMode)
|
||||
})
|
||||
@ -429,11 +429,11 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Description: Make sure that a projected volume configMap is consumable
|
||||
by a non-root userID.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume as non-root", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() {
|
||||
doProjectedConfigMapE2EWithoutMappings(f, 1000, 0, nil)
|
||||
})
|
||||
|
||||
It("should be consumable from pods in volume as non-root with FSGroup [Feature:FSGroup]", func() {
|
||||
It("should be consumable from pods in volume as non-root with FSGroup [NodeFeature:FSGroup]", func() {
|
||||
doProjectedConfigMapE2EWithoutMappings(f, 1000, 1001, nil)
|
||||
})
|
||||
|
||||
@ -443,7 +443,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
map and mounting it to a volume with a custom path (mapping) on the
|
||||
pod with no other settings and make sure the pod actually consumes it.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
|
||||
doProjectedConfigMapE2EWithMappings(f, 0, 0, nil)
|
||||
})
|
||||
|
||||
@ -452,7 +452,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Description: Repeat the projected-secret-simple-mapped but this time
|
||||
with an item mode (e.g. 0400) for the secret map item
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [NodeConformance]", func() {
|
||||
mode := int32(0400)
|
||||
doProjectedConfigMapE2EWithMappings(f, 0, 0, &mode)
|
||||
})
|
||||
@ -462,11 +462,11 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Description: Repeat the projected-config-map-simple-mapped but this
|
||||
time with a user other than root.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() {
|
||||
doProjectedConfigMapE2EWithMappings(f, 1000, 0, nil)
|
||||
})
|
||||
|
||||
It("should be consumable from pods in volume with mappings as non-root with FSGroup [Feature:FSGroup]", func() {
|
||||
It("should be consumable from pods in volume with mappings as non-root with FSGroup [NodeFeature:FSGroup]", func() {
|
||||
doProjectedConfigMapE2EWithMappings(f, 1000, 1001, nil)
|
||||
})
|
||||
|
||||
@ -476,7 +476,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
that the values in these configMaps can be updated, deleted,
|
||||
and created.
|
||||
*/
|
||||
framework.ConformanceIt("updates should be reflected in volume", func() {
|
||||
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
|
||||
@ -565,7 +565,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
configMaps, that the values in these configMaps can be updated,
|
||||
deleted, and created.
|
||||
*/
|
||||
framework.ConformanceIt("optional updates should be reflected in volume", func() {
|
||||
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
trueVal := true
|
||||
@ -766,7 +766,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Description: Make sure config map works when it mounted as two
|
||||
different volumes on the same node.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable in multiple volumes in the same pod", func() {
|
||||
framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() {
|
||||
var (
|
||||
name = "projected-configmap-test-volume-" + string(uuid.NewUUID())
|
||||
volumeName = "projected-configmap-volume"
|
||||
@ -864,7 +864,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Description: Ensure that downward API can provide pod's name through
|
||||
DownwardAPIVolumeFiles in a projected volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide podname only", func() {
|
||||
framework.ConformanceIt("should provide podname only [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
|
||||
|
||||
@ -879,7 +879,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
mode for DownwardAPIVolumeFiles if no mode is specified in a projected
|
||||
volume.
|
||||
*/
|
||||
framework.ConformanceIt("should set DefaultMode on files", func() {
|
||||
framework.ConformanceIt("should set DefaultMode on files [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
defaultMode := int32(0400)
|
||||
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode)
|
||||
@ -894,7 +894,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Description: Ensure that downward API can set file permission mode for
|
||||
DownwardAPIVolumeFiles in a projected volume.
|
||||
*/
|
||||
framework.ConformanceIt("should set mode on item file", func() {
|
||||
framework.ConformanceIt("should set mode on item file [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
mode := int32(0400)
|
||||
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
|
||||
@ -904,7 +904,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
})
|
||||
})
|
||||
|
||||
It("should provide podname as non-root with fsgroup [Feature:FSGroup]", func() {
|
||||
It("should provide podname as non-root with fsgroup [NodeFeature:FSGroup]", func() {
|
||||
podName := "metadata-volume-" + string(uuid.NewUUID())
|
||||
uid := int64(1001)
|
||||
gid := int64(1234)
|
||||
@ -918,7 +918,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
})
|
||||
})
|
||||
|
||||
It("should provide podname as non-root with fsgroup and defaultMode [Feature:FSGroup]", func() {
|
||||
It("should provide podname as non-root with fsgroup and defaultMode [NodeFeature:FSGroup]", func() {
|
||||
podName := "metadata-volume-" + string(uuid.NewUUID())
|
||||
uid := int64(1001)
|
||||
gid := int64(1234)
|
||||
@ -939,7 +939,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
DownwardAPIVolumeFiles when pod's labels get modified in a projected
|
||||
volume.
|
||||
*/
|
||||
framework.ConformanceIt("should update labels on modification", func() {
|
||||
framework.ConformanceIt("should update labels on modification [NodeConformance]", func() {
|
||||
labels := map[string]string{}
|
||||
labels["key1"] = "value1"
|
||||
labels["key2"] = "value2"
|
||||
@ -972,7 +972,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
DownwardAPIVolumeFiles when pod's annotations get modified in a
|
||||
projected volume.
|
||||
*/
|
||||
framework.ConformanceIt("should update annotations on modification", func() {
|
||||
framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() {
|
||||
annotations := map[string]string{}
|
||||
annotations["builder"] = "bar"
|
||||
podName := "annotationupdate" + string(uuid.NewUUID())
|
||||
@ -1006,7 +1006,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Description: Ensure that downward API can provide container's CPU
|
||||
limit through DownwardAPIVolumeFiles in a projected volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu limit", func() {
|
||||
framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit")
|
||||
|
||||
@ -1020,7 +1020,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Description: Ensure that downward API can provide container's memory
|
||||
limit through DownwardAPIVolumeFiles in a projected volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory limit", func() {
|
||||
framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit")
|
||||
|
||||
@ -1034,7 +1034,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Description: Ensure that downward API can provide container's CPU
|
||||
request through DownwardAPIVolumeFiles in a projected volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu request", func() {
|
||||
framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request")
|
||||
|
||||
@ -1048,7 +1048,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Description: Ensure that downward API can provide container's memory
|
||||
request through DownwardAPIVolumeFiles in a projected volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory request", func() {
|
||||
framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request")
|
||||
|
||||
@ -1063,7 +1063,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
allocatable value for CPU through DownwardAPIVolumeFiles if CPU limit
|
||||
is not specified for a container in a projected volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set", func() {
|
||||
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit")
|
||||
|
||||
@ -1076,7 +1076,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
allocatable value for memory through DownwardAPIVolumeFiles if memory
|
||||
limit is not specified for a container in a projected volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set", func() {
|
||||
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit")
|
||||
|
||||
@ -1089,7 +1089,7 @@ var _ = Describe("[sig-storage] Projected", func() {
|
||||
Description: This test projects a secret and configmap into the same
|
||||
directory to ensure projection is working as intended.
|
||||
*/
|
||||
framework.ConformanceIt("should project all components that make up the projection API [Projection]", func() {
|
||||
framework.ConformanceIt("should project all components that make up the projection API [Projection][NodeConformance]", func() {
|
||||
var err error
|
||||
podName := "projected-volume-" + string(uuid.NewUUID())
|
||||
secretName := "secret-projected-all-test-volume-" + string(uuid.NewUUID())
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/common/secrets.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/common/secrets.go
generated
vendored
@ -35,7 +35,7 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
|
||||
Description: Ensure that secret can be consumed via environment
|
||||
variables.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in env vars ", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in env vars [NodeConformance]", func() {
|
||||
name := "secret-test-" + string(uuid.NewUUID())
|
||||
secret := secretForTest(f.Namespace.Name, name)
|
||||
|
||||
@ -84,7 +84,7 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
|
||||
Description: Ensure that secret can be consumed via source of a set
|
||||
of ConfigMaps.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable via the environment ", func() {
|
||||
framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func() {
|
||||
name := "secret-test-" + string(uuid.NewUUID())
|
||||
secret := newEnvFromSecret(f.Namespace.Name, name)
|
||||
By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name))
|
||||
|
16
vendor/k8s.io/kubernetes/test/e2e/common/secrets_volume.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/common/secrets_volume.go
generated
vendored
@ -38,7 +38,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
|
||||
Description: Ensure that secret can be mounted without mapping to a
|
||||
pod volume.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume ", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
|
||||
doSecretE2EWithoutMapping(f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil)
|
||||
})
|
||||
|
||||
@ -47,7 +47,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
|
||||
Description: Ensure that secret can be mounted without mapping to a
|
||||
pod volume in default mode.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set ", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
|
||||
defaultMode := int32(0400)
|
||||
doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), nil, nil)
|
||||
})
|
||||
@ -57,7 +57,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
|
||||
Description: Ensure that secret can be mounted without mapping to a pod
|
||||
volume as non-root in default mode with fsGroup set.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set ", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance]", func() {
|
||||
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
|
||||
fsGroup := int64(1001)
|
||||
uid := int64(1000)
|
||||
@ -69,7 +69,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
|
||||
Description: Ensure that secret can be mounted with mapping to a pod
|
||||
volume.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings ", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
|
||||
doSecretE2EWithMapping(f, nil)
|
||||
})
|
||||
|
||||
@ -78,12 +78,12 @@ var _ = Describe("[sig-storage] Secrets", func() {
|
||||
Description: Ensure that secret can be mounted with mapping to a pod
|
||||
volume in item mode.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set ", func() {
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [NodeConformance]", func() {
|
||||
mode := int32(0400)
|
||||
doSecretE2EWithMapping(f, &mode)
|
||||
})
|
||||
|
||||
It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace", func() {
|
||||
It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() {
|
||||
var (
|
||||
namespace2 *v1.Namespace
|
||||
err error
|
||||
@ -108,7 +108,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
|
||||
Testname: secret-multiple-volume-mounts
|
||||
Description: Ensure that secret can be mounted to multiple pod volumes.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable in multiple volumes in a pod ", func() {
|
||||
framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func() {
|
||||
// This test ensures that the same secret can be mounted in multiple
|
||||
// volumes in the same pod. This test case exists to prevent
|
||||
// regressions that break this use-case.
|
||||
@ -186,7 +186,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
|
||||
Description: Ensure that optional update change to secret can be
|
||||
reflected on a mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("optional updates should be reflected in volume ", func() {
|
||||
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
trueVal := true
|
||||
|
88
vendor/k8s.io/kubernetes/test/e2e/common/sysctl.go
generated
vendored
88
vendor/k8s.io/kubernetes/test/e2e/common/sysctl.go
generated
vendored
@ -20,7 +20,6 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/kubelet/sysctl"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
@ -28,7 +27,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Sysctls", func() {
|
||||
var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
|
||||
f := framework.NewDefaultFramework("sysctl")
|
||||
var podClient *framework.PodClient
|
||||
|
||||
@ -59,12 +58,14 @@ var _ = framework.KubeDescribe("Sysctls", func() {
|
||||
|
||||
It("should support sysctls", func() {
|
||||
pod := testPod()
|
||||
pod.Annotations[v1.SysctlsPodAnnotationKey] = v1helper.PodAnnotationsFromSysctls([]v1.Sysctl{
|
||||
{
|
||||
Name: "kernel.shm_rmid_forced",
|
||||
Value: "1",
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
Sysctls: []v1.Sysctl{
|
||||
{
|
||||
Name: "kernel.shm_rmid_forced",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"}
|
||||
|
||||
By("Creating a pod with the kernel.shm_rmid_forced sysctl")
|
||||
@ -100,12 +101,14 @@ var _ = framework.KubeDescribe("Sysctls", func() {
|
||||
|
||||
It("should support unsafe sysctls which are actually whitelisted", func() {
|
||||
pod := testPod()
|
||||
pod.Annotations[v1.UnsafeSysctlsPodAnnotationKey] = v1helper.PodAnnotationsFromSysctls([]v1.Sysctl{
|
||||
{
|
||||
Name: "kernel.shm_rmid_forced",
|
||||
Value: "1",
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
Sysctls: []v1.Sysctl{
|
||||
{
|
||||
Name: "kernel.shm_rmid_forced",
|
||||
Value: "1",
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"}
|
||||
|
||||
By("Creating a pod with the kernel.shm_rmid_forced sysctl")
|
||||
@ -141,34 +144,27 @@ var _ = framework.KubeDescribe("Sysctls", func() {
|
||||
|
||||
It("should reject invalid sysctls", func() {
|
||||
pod := testPod()
|
||||
pod.Annotations[v1.SysctlsPodAnnotationKey] = v1helper.PodAnnotationsFromSysctls([]v1.Sysctl{
|
||||
{
|
||||
Name: "foo-",
|
||||
Value: "bar",
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
Sysctls: []v1.Sysctl{
|
||||
// Safe parameters
|
||||
{
|
||||
Name: "foo-",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "kernel.shmmax",
|
||||
Value: "100000000",
|
||||
},
|
||||
{
|
||||
Name: "safe-and-unsafe",
|
||||
Value: "100000000",
|
||||
},
|
||||
{
|
||||
Name: "bar..",
|
||||
Value: "42",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "kernel.shmmax",
|
||||
Value: "100000000",
|
||||
},
|
||||
{
|
||||
Name: "safe-and-unsafe",
|
||||
Value: "100000000",
|
||||
},
|
||||
})
|
||||
pod.Annotations[v1.UnsafeSysctlsPodAnnotationKey] = v1helper.PodAnnotationsFromSysctls([]v1.Sysctl{
|
||||
{
|
||||
Name: "kernel.shmall",
|
||||
Value: "100000000",
|
||||
},
|
||||
{
|
||||
Name: "bar..",
|
||||
Value: "42",
|
||||
},
|
||||
{
|
||||
Name: "safe-and-unsafe",
|
||||
Value: "100000000",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
By("Creating a pod with one valid and two invalid sysctls")
|
||||
client := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
@ -177,18 +173,20 @@ var _ = framework.KubeDescribe("Sysctls", func() {
|
||||
Expect(err).NotTo(BeNil())
|
||||
Expect(err.Error()).To(ContainSubstring(`Invalid value: "foo-"`))
|
||||
Expect(err.Error()).To(ContainSubstring(`Invalid value: "bar.."`))
|
||||
Expect(err.Error()).To(ContainSubstring(`safe-and-unsafe`))
|
||||
Expect(err.Error()).NotTo(ContainSubstring(`safe-and-unsafe`))
|
||||
Expect(err.Error()).NotTo(ContainSubstring("kernel.shmmax"))
|
||||
})
|
||||
|
||||
It("should not launch unsafe, but not explicitly enabled sysctls on the node", func() {
|
||||
pod := testPod()
|
||||
pod.Annotations[v1.SysctlsPodAnnotationKey] = v1helper.PodAnnotationsFromSysctls([]v1.Sysctl{
|
||||
{
|
||||
Name: "kernel.msgmax",
|
||||
Value: "10000000000",
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
Sysctls: []v1.Sysctl{
|
||||
{
|
||||
Name: "kernel.msgmax",
|
||||
Value: "10000000000",
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
By("Creating a pod with a greylisted, but not whitelisted sysctl on the node")
|
||||
pod = podClient.Create(pod)
|
||||
|
53
vendor/k8s.io/kubernetes/test/e2e/common/util.go
generated
vendored
53
vendor/k8s.io/kubernetes/test/e2e/common/util.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -98,39 +99,45 @@ func NewRCByName(c clientset.Interface, ns, name string, replicas int32, gracePe
|
||||
name, replicas, framework.ServeHostnameImage, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod))
|
||||
}
|
||||
|
||||
func RestartNodes(c clientset.Interface, nodeNames []string) error {
|
||||
// List old boot IDs.
|
||||
oldBootIDs := make(map[string]string)
|
||||
for _, name := range nodeNames {
|
||||
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting node info before reboot: %s", err)
|
||||
func RestartNodes(c clientset.Interface, nodes []v1.Node) error {
|
||||
// Build mapping from zone to nodes in that zone.
|
||||
nodeNamesByZone := make(map[string][]string)
|
||||
for i := range nodes {
|
||||
node := &nodes[i]
|
||||
zone := framework.TestContext.CloudConfig.Zone
|
||||
if z, ok := node.Labels[kubeletapis.LabelZoneFailureDomain]; ok {
|
||||
zone = z
|
||||
}
|
||||
oldBootIDs[name] = node.Status.NodeInfo.BootID
|
||||
nodeNamesByZone[zone] = append(nodeNamesByZone[zone], node.Name)
|
||||
}
|
||||
|
||||
// Reboot the nodes.
|
||||
args := []string{
|
||||
"compute",
|
||||
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
|
||||
"instances",
|
||||
"reset",
|
||||
}
|
||||
args = append(args, nodeNames...)
|
||||
args = append(args, fmt.Sprintf("--zone=%s", framework.TestContext.CloudConfig.Zone))
|
||||
stdout, stderr, err := framework.RunCmd("gcloud", args...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error restarting nodes: %s\nstdout: %s\nstderr: %s", err, stdout, stderr)
|
||||
for zone, nodeNames := range nodeNamesByZone {
|
||||
args := []string{
|
||||
"compute",
|
||||
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
|
||||
"instances",
|
||||
"reset",
|
||||
}
|
||||
args = append(args, nodeNames...)
|
||||
args = append(args, fmt.Sprintf("--zone=%s", zone))
|
||||
stdout, stderr, err := framework.RunCmd("gcloud", args...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error restarting nodes: %s\nstdout: %s\nstderr: %s", err, stdout, stderr)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for their boot IDs to change.
|
||||
for _, name := range nodeNames {
|
||||
for i := range nodes {
|
||||
node := &nodes[i]
|
||||
if err := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
|
||||
newNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error getting node info after reboot: %s", err)
|
||||
}
|
||||
return node.Status.NodeInfo.BootID != oldBootIDs[name], nil
|
||||
return node.Status.NodeInfo.BootID != newNode.Status.NodeInfo.BootID, nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error waiting for node %s boot ID to change: %s", name, err)
|
||||
return fmt.Errorf("error waiting for node %s boot ID to change: %s", node.Name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e/e2e.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/e2e.go
generated
vendored
@ -43,6 +43,9 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
"k8s.io/kubernetes/test/e2e/manifest"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
// ensure auth plugins are loaded
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -72,12 +75,9 @@ func setupProviderConfig() error {
|
||||
managedZones = []string{zone}
|
||||
}
|
||||
|
||||
gceAlphaFeatureGate, err := gcecloud.NewAlphaFeatureGate([]string{
|
||||
gceAlphaFeatureGate := gcecloud.NewAlphaFeatureGate([]string{
|
||||
gcecloud.AlphaFeatureNetworkEndpointGroup,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("Encountered error for creating alpha feature gate: %v", err)
|
||||
}
|
||||
|
||||
gceCloud, err := gcecloud.CreateGCECloud(&gcecloud.CloudConfig{
|
||||
ApiEndpoint: framework.TestContext.CloudConfig.ApiEndpoint,
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/examples.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/examples.go
generated
vendored
@ -521,7 +521,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("scaling rethinkdb")
|
||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "rethinkdb-rc", 2, true)
|
||||
framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, "rethinkdb-rc", 2, true)
|
||||
checkDbInstances()
|
||||
|
||||
By("starting admin")
|
||||
@ -564,7 +564,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("scaling hazelcast")
|
||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, f.ScalesGetter, ns, "hazelcast", 2, true)
|
||||
framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, "hazelcast", 2, true)
|
||||
forEachPod("name", "hazelcast", func(pod v1.Pod) {
|
||||
_, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
@ -45,7 +45,6 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/apps:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
@ -61,7 +60,6 @@ go_library(
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/nodelifecycle:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
@ -73,7 +71,8 @@ go_library(
|
||||
"//pkg/kubemark:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/schedulercache:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/ssh:go_default_library",
|
||||
"//pkg/util/file:go_default_library",
|
||||
@ -116,10 +115,8 @@ go_library(
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
@ -144,11 +141,12 @@ go_library(
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//vendor/k8s.io/client-go/scale:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
|
16
vendor/k8s.io/kubernetes/test/e2e/framework/crd_util.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/framework/crd_util.go
generated
vendored
@ -23,6 +23,7 @@ import (
|
||||
crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/testserver"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
)
|
||||
|
||||
@ -66,18 +67,23 @@ func CreateTestCRD(f *Framework) (*TestCrd, error) {
|
||||
Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
dynamicClient, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
Failf("failed to initialize dynamic client: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
crd := newCRDForTest(testcrd)
|
||||
|
||||
//create CRD and waits for the resource to be recognized and available.
|
||||
dynamicClient, err := testserver.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient, f.ClientPool)
|
||||
crd, err = testserver.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)
|
||||
if err != nil {
|
||||
Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
resourceClient := dynamicClient.Resource(&metav1.APIResource{
|
||||
Name: crd.Spec.Names.Plural,
|
||||
Namespaced: true,
|
||||
}, f.Namespace.Name)
|
||||
|
||||
gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: crd.Spec.Version, Resource: crd.Spec.Names.Plural}
|
||||
resourceClient := dynamicClient.Resource(gvr).Namespace(f.Namespace.Name)
|
||||
|
||||
testcrd.ApiExtensionClient = apiExtensionClient
|
||||
testcrd.Crd = crd
|
||||
|
77
vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go
generated
vendored
77
vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go
generated
vendored
@ -22,37 +22,36 @@ import (
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*extensions.Deployment, error) {
|
||||
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*apps.Deployment, error) {
|
||||
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)
|
||||
}
|
||||
|
||||
// Waits for the deployment to clean up old rcs.
|
||||
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
|
||||
var oldRSs []*extensions.ReplicaSet
|
||||
var d *extensions.Deployment
|
||||
var oldRSs []*apps.ReplicaSet
|
||||
var d *apps.Deployment
|
||||
|
||||
pollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
d = deployment
|
||||
|
||||
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1())
|
||||
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -65,7 +64,7 @@ func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string
|
||||
return pollErr
|
||||
}
|
||||
|
||||
func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {
|
||||
func logReplicaSetsOfDeployment(deployment *apps.Deployment, allOldRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) {
|
||||
testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, Logf)
|
||||
}
|
||||
|
||||
@ -73,7 +72,7 @@ func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string,
|
||||
return testutils.WaitForObservedDeployment(c, ns, deploymentName, desiredGeneration)
|
||||
}
|
||||
|
||||
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType) error {
|
||||
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType apps.DeploymentConditionType) error {
|
||||
return testutils.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
@ -84,16 +83,17 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
|
||||
return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType extensions.DeploymentStrategyType) *extensions.Deployment {
|
||||
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType apps.DeploymentStrategyType) *apps.Deployment {
|
||||
zero := int64(0)
|
||||
return &extensions.Deployment{
|
||||
return &apps.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Name: deploymentName,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: podLabels},
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
Type: strategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
@ -117,13 +117,13 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s
|
||||
// Waits for the deployment to complete, and don't check if rolling update strategy is broken.
|
||||
// Rolling update strategy is used only during a rolling update, and can be violated in other situations,
|
||||
// such as shortly after a scaling event or the deployment is just created.
|
||||
func WaitForDeploymentComplete(c clientset.Interface, d *extensions.Deployment) error {
|
||||
func WaitForDeploymentComplete(c clientset.Interface, d *apps.Deployment) error {
|
||||
return testutils.WaitForDeploymentComplete(c, d, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// Waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
|
||||
// Rolling update strategy should not be broken during a rolling update.
|
||||
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *extensions.Deployment) error {
|
||||
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *apps.Deployment) error {
|
||||
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
@ -140,12 +140,12 @@ func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName
|
||||
|
||||
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
|
||||
// old pods.
|
||||
func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) error {
|
||||
if d.Spec.Strategy.Type != extensions.RecreateDeploymentStrategyType {
|
||||
func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error {
|
||||
if d.Spec.Strategy.Type != apps.RecreateDeploymentStrategyType {
|
||||
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
|
||||
}
|
||||
|
||||
w, err := c.ExtensionsV1beta1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
|
||||
w, err := c.AppsV1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -153,12 +153,12 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er
|
||||
status := d.Status
|
||||
|
||||
condition := func(event watch.Event) (bool, error) {
|
||||
d := event.Object.(*extensions.Deployment)
|
||||
d := event.Object.(*apps.Deployment)
|
||||
status = d.Status
|
||||
|
||||
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.ExtensionsV1beta1())
|
||||
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.ExtensionsV1beta1())
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.AppsV1())
|
||||
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.AppsV1())
|
||||
if err == nil && nerr == nil {
|
||||
Logf("%+v", d)
|
||||
logReplicaSetsOfDeployment(d, allOldRSs, newRS)
|
||||
@ -179,8 +179,8 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er
|
||||
return err
|
||||
}
|
||||
|
||||
func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, internalClientset, scalesGetter, ns, name, size, wait, extensionsinternal.Kind("Deployment"), extensionsinternal.Resource("deployments"))
|
||||
func ScaleDeployment(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, scalesGetter, ns, name, size, wait, appsinternal.Kind("Deployment"), appsinternal.Resource("deployments"))
|
||||
}
|
||||
|
||||
func RunDeployment(config testutils.DeploymentConfig) error {
|
||||
@ -190,13 +190,13 @@ func RunDeployment(config testutils.DeploymentConfig) error {
|
||||
return testutils.RunDeployment(config)
|
||||
}
|
||||
|
||||
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) {
|
||||
func logPodsOfDeployment(c clientset.Interface, deployment *apps.Deployment, rsList []*apps.ReplicaSet) {
|
||||
testutils.LogPodsOfDeployment(c, deployment, rsList, Logf)
|
||||
}
|
||||
|
||||
func WaitForDeploymentRevision(c clientset.Interface, d *extensions.Deployment, targetRevision string) error {
|
||||
func WaitForDeploymentRevision(c clientset.Interface, d *apps.Deployment, targetRevision string) error {
|
||||
err := wait.PollImmediate(Poll, pollLongTimeout, func() (bool, error) {
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -214,9 +214,9 @@ func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName,
|
||||
return testutils.CheckDeploymentRevisionAndImage(c, ns, deploymentName, revision, image)
|
||||
}
|
||||
|
||||
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*extensions.Deployment, error) {
|
||||
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*apps.Deployment, error) {
|
||||
deploymentSpec := MakeDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command)
|
||||
deployment, err := client.ExtensionsV1beta1().Deployments(namespace).Create(deploymentSpec)
|
||||
deployment, err := client.AppsV1().Deployments(namespace).Create(deploymentSpec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err)
|
||||
}
|
||||
@ -230,19 +230,22 @@ func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[
|
||||
|
||||
// MakeDeployment creates a deployment definition based on the namespace. The deployment references the PVC's
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod
|
||||
func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *extensions.Deployment {
|
||||
func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *apps.Deployment {
|
||||
if len(command) == 0 {
|
||||
command = "while true; do sleep 1; done"
|
||||
command = "trap exit TERM; while true; do sleep 1; done"
|
||||
}
|
||||
zero := int64(0)
|
||||
deploymentName := "deployment-" + string(uuid.NewUUID())
|
||||
deploymentSpec := &extensions.Deployment{
|
||||
deploymentSpec := &apps.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
@ -281,8 +284,8 @@ func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector ma
|
||||
}
|
||||
|
||||
// GetPodsForDeployment gets pods for the given deployment
|
||||
func GetPodsForDeployment(client clientset.Interface, deployment *extensions.Deployment) (*v1.PodList, error) {
|
||||
replicaSet, err := deploymentutil.GetNewReplicaSet(deployment, client.ExtensionsV1beta1())
|
||||
func GetPodsForDeployment(client clientset.Interface, deployment *apps.Deployment) (*v1.PodList, error) {
|
||||
replicaSet, err := deploymentutil.GetNewReplicaSet(deployment, client.AppsV1())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get new replica set for deployment %q: %v", deployment.Name, err)
|
||||
}
|
||||
@ -292,7 +295,7 @@ func GetPodsForDeployment(client clientset.Interface, deployment *extensions.Dep
|
||||
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
|
||||
return client.CoreV1().Pods(namespace).List(options)
|
||||
}
|
||||
rsList := []*extensions.ReplicaSet{replicaSet}
|
||||
rsList := []*apps.ReplicaSet{replicaSet}
|
||||
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to list Pods of Deployment %q: %v", deployment.Name, err)
|
||||
|
28
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
28
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@ -28,7 +28,6 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@ -40,6 +39,7 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
@ -70,7 +70,7 @@ type Framework struct {
|
||||
|
||||
InternalClientset *internalclientset.Clientset
|
||||
AggregatorClient *aggregatorclient.Clientset
|
||||
ClientPool dynamic.ClientPool
|
||||
DynamicClient dynamic.Interface
|
||||
|
||||
ScalesGetter scaleclient.ScalesGetter
|
||||
|
||||
@ -167,7 +167,8 @@ func (f *Framework) BeforeEach() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.AggregatorClient, err = aggregatorclient.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.ClientPool = dynamic.NewClientPool(config, legacyscheme.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
f.DynamicClient, err = dynamic.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
|
||||
// as they are required when creating a REST client.
|
||||
@ -182,7 +183,7 @@ func (f *Framework) BeforeEach() {
|
||||
discoClient, err := discovery.NewDiscoveryClientForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient)
|
||||
restMapper := discovery.NewDeferredDiscoveryRESTMapper(cachedDiscoClient, meta.InterfacesForUnstructured)
|
||||
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient)
|
||||
restMapper.Reset()
|
||||
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
|
||||
f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
|
||||
@ -288,7 +289,7 @@ func (f *Framework) AfterEach() {
|
||||
if f.NamespaceDeletionTimeout != 0 {
|
||||
timeout = f.NamespaceDeletionTimeout
|
||||
}
|
||||
if err := deleteNS(f.ClientSet, f.ClientPool, ns.Name, timeout); err != nil {
|
||||
if err := deleteNS(f.ClientSet, f.DynamicClient, ns.Name, timeout); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
nsDeletionErrors[ns.Name] = err
|
||||
} else {
|
||||
@ -399,10 +400,7 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
|
||||
ns, err := createTestingNS(baseName, f.ClientSet, labels)
|
||||
// check ns instead of err to see if it's nil as we may
|
||||
// fail to create serviceAccount in it.
|
||||
// In this case, we should not forget to delete the namespace.
|
||||
if ns != nil {
|
||||
f.namespacesToDelete = append(f.namespacesToDelete, ns)
|
||||
}
|
||||
f.AddNamespacesToDelete(ns)
|
||||
|
||||
if err == nil && !f.SkipPrivilegedPSPBinding {
|
||||
CreatePrivilegedPSPBinding(f, ns.Name)
|
||||
@ -411,6 +409,18 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
|
||||
return ns, err
|
||||
}
|
||||
|
||||
// AddNamespacesToDelete adds one or more namespaces to be deleted when the test
|
||||
// completes.
|
||||
func (f *Framework) AddNamespacesToDelete(namespaces ...*v1.Namespace) {
|
||||
for _, ns := range namespaces {
|
||||
if ns == nil {
|
||||
continue
|
||||
}
|
||||
f.namespacesToDelete = append(f.namespacesToDelete, ns)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForPodTerminated waits for the pod to be terminated with the given reason.
|
||||
func (f *Framework) WaitForPodTerminated(podName, reason string) error {
|
||||
return waitForPodTerminatedInNamespace(f.ClientSet, podName, reason, f.Namespace.Name)
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/get-kubemark-resource-usage.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/get-kubemark-resource-usage.go
generated
vendored
@ -39,7 +39,7 @@ func getMasterUsageByPrefix(prefix string) (string, error) {
|
||||
// TODO: figure out how to move this to kubemark directory (need to factor test SSH out of e2e framework)
|
||||
func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsage {
|
||||
result := make(map[string]*KubemarkResourceUsage)
|
||||
// Get kuberenetes component resource usage
|
||||
// Get kubernetes component resource usage
|
||||
sshResult, err := getMasterUsageByPrefix("kube")
|
||||
if err != nil {
|
||||
Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
|
||||
|
56
vendor/k8s.io/kubernetes/test/e2e/framework/google_compute.go
generated
vendored
56
vendor/k8s.io/kubernetes/test/e2e/framework/google_compute.go
generated
vendored
@ -35,8 +35,12 @@ func lookupClusterImageSources() (string, string, error) {
|
||||
gcloudf := func(argv ...string) ([]string, error) {
|
||||
args := []string{"compute"}
|
||||
args = append(args, argv...)
|
||||
args = append(args, "--project", TestContext.CloudConfig.ProjectID,
|
||||
"--zone", TestContext.CloudConfig.Zone)
|
||||
args = append(args, "--project", TestContext.CloudConfig.ProjectID)
|
||||
if TestContext.CloudConfig.MultiMaster {
|
||||
args = append(args, "--region", TestContext.CloudConfig.Region)
|
||||
} else {
|
||||
args = append(args, "--zone", TestContext.CloudConfig.Zone)
|
||||
}
|
||||
outputBytes, err := exec.Command("gcloud", args...).CombinedOutput()
|
||||
str := strings.Replace(string(outputBytes), ",", "\n", -1)
|
||||
str = strings.Replace(str, ";", "\n", -1)
|
||||
@ -141,6 +145,28 @@ func CreateManagedInstanceGroup(size int64, zone, template string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetManagedInstanceGroupTemplateName(zone string) (string, error) {
|
||||
// TODO(verult): make this hit the compute API directly instead of
|
||||
// shelling out to gcloud. Use InstanceGroupManager to get Instance Template name.
|
||||
|
||||
stdout, _, err := retryCmd("gcloud", "compute", "instance-groups", "managed",
|
||||
"list",
|
||||
fmt.Sprintf("--filter=name:%s", TestContext.CloudConfig.NodeInstanceGroup),
|
||||
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
|
||||
fmt.Sprintf("--zones=%s", zone),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("gcloud compute instance-groups managed list call failed with err: %v", err)
|
||||
}
|
||||
|
||||
templateName, err := parseInstanceTemplateName(stdout)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error parsing gcloud output: %v", err)
|
||||
}
|
||||
return templateName, nil
|
||||
}
|
||||
|
||||
func DeleteManagedInstanceGroup(zone string) error {
|
||||
// TODO(verult): make this hit the compute API directly instead of
|
||||
// shelling out to gcloud.
|
||||
@ -154,3 +180,29 @@ func DeleteManagedInstanceGroup(zone string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseInstanceTemplateName(gcloudOutput string) (string, error) {
|
||||
const templateNameField = "INSTANCE_TEMPLATE"
|
||||
|
||||
lines := strings.Split(gcloudOutput, "\n")
|
||||
if len(lines) <= 1 { // Empty output or only contains column names
|
||||
return "", fmt.Errorf("the list is empty")
|
||||
}
|
||||
|
||||
// Otherwise, there should be exactly 1 entry, i.e. 2 lines
|
||||
fieldNames := strings.Fields(lines[0])
|
||||
instanceTemplateColumn := 0
|
||||
for instanceTemplateColumn < len(fieldNames) &&
|
||||
fieldNames[instanceTemplateColumn] != templateNameField {
|
||||
instanceTemplateColumn++
|
||||
}
|
||||
|
||||
if instanceTemplateColumn == len(fieldNames) {
|
||||
return "", fmt.Errorf("the list does not contain instance template information")
|
||||
}
|
||||
|
||||
fields := strings.Fields(lines[1])
|
||||
instanceTemplateName := fields[instanceTemplateColumn]
|
||||
|
||||
return instanceTemplateName, nil
|
||||
}
|
||||
|
266
vendor/k8s.io/kubernetes/test/e2e/framework/ingress_utils.go
generated
vendored
266
vendor/k8s.io/kubernetes/test/e2e/framework/ingress_utils.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
@ -119,6 +120,16 @@ const (
|
||||
// GCE only allows names < 64 characters, and the loadbalancer controller inserts
|
||||
// a single character of padding.
|
||||
nameLenLimit = 62
|
||||
|
||||
NEGAnnotation = "alpha.cloud.google.com/load-balancer-neg"
|
||||
NEGUpdateTimeout = 2 * time.Minute
|
||||
|
||||
InstanceGroupAnnotation = "ingress.gcp.kubernetes.io/instance-groups"
|
||||
|
||||
// Prefix for annotation keys used by the ingress controller to specify the
|
||||
// names of GCP resources such as forwarding rules, url maps, target proxies, etc
|
||||
// that it created for the corresponding ingress.
|
||||
StatusPrefix = "ingress.kubernetes.io"
|
||||
)
|
||||
|
||||
type TestLogger interface {
|
||||
@ -165,7 +176,7 @@ func CreateIngressComformanceTests(jig *IngressTestJig, ns string, annotations m
|
||||
updateURLMapHost := "bar.baz.com"
|
||||
updateURLMapPath := "/testurl"
|
||||
// Platform agnostic list of tests that must be satisfied by all controllers
|
||||
return []IngressConformanceTests{
|
||||
tests := []IngressConformanceTests{
|
||||
{
|
||||
fmt.Sprintf("should create a basic HTTP ingress"),
|
||||
func() { jig.CreateIngress(manifestPath, ns, annotations, annotations) },
|
||||
@ -173,30 +184,9 @@ func CreateIngressComformanceTests(jig *IngressTestJig, ns string, annotations m
|
||||
},
|
||||
{
|
||||
fmt.Sprintf("should terminate TLS for host %v", tlsHost),
|
||||
func() { jig.AddHTTPS(tlsSecretName, tlsHost) },
|
||||
func() { jig.SetHTTPS(tlsSecretName, tlsHost) },
|
||||
fmt.Sprintf("waiting for HTTPS updates to reflect in ingress"),
|
||||
},
|
||||
{
|
||||
fmt.Sprintf("should update SSL certificate with modified hostname %v", updatedTLSHost),
|
||||
func() {
|
||||
jig.Update(func(ing *extensions.Ingress) {
|
||||
newRules := []extensions.IngressRule{}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
if rule.Host != tlsHost {
|
||||
newRules = append(newRules, rule)
|
||||
continue
|
||||
}
|
||||
newRules = append(newRules, extensions.IngressRule{
|
||||
Host: updatedTLSHost,
|
||||
IngressRuleValue: rule.IngressRuleValue,
|
||||
})
|
||||
}
|
||||
ing.Spec.Rules = newRules
|
||||
})
|
||||
jig.AddHTTPS(tlsSecretName, updatedTLSHost)
|
||||
},
|
||||
fmt.Sprintf("Waiting for updated certificates to accept requests for host %v", updatedTLSHost),
|
||||
},
|
||||
{
|
||||
fmt.Sprintf("should update url map for host %v to expose a single url: %v", updateURLMapHost, updateURLMapPath),
|
||||
func() {
|
||||
@ -233,6 +223,31 @@ func CreateIngressComformanceTests(jig *IngressTestJig, ns string, annotations m
|
||||
fmt.Sprintf("Waiting for path updates to reflect in L7"),
|
||||
},
|
||||
}
|
||||
// Skip the Update TLS cert test for kubemci: https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress/issues/141.
|
||||
if jig.Class != MulticlusterIngressClassValue {
|
||||
tests = append(tests, IngressConformanceTests{
|
||||
fmt.Sprintf("should update SSL certificate with modified hostname %v", updatedTLSHost),
|
||||
func() {
|
||||
jig.Update(func(ing *extensions.Ingress) {
|
||||
newRules := []extensions.IngressRule{}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
if rule.Host != tlsHost {
|
||||
newRules = append(newRules, rule)
|
||||
continue
|
||||
}
|
||||
newRules = append(newRules, extensions.IngressRule{
|
||||
Host: updatedTLSHost,
|
||||
IngressRuleValue: rule.IngressRuleValue,
|
||||
})
|
||||
}
|
||||
ing.Spec.Rules = newRules
|
||||
})
|
||||
jig.SetHTTPS(tlsSecretName, updatedTLSHost)
|
||||
},
|
||||
fmt.Sprintf("Waiting for updated certificates to accept requests for host %v", updatedTLSHost),
|
||||
})
|
||||
}
|
||||
return tests
|
||||
}
|
||||
|
||||
// GenerateRSACerts generates a basic self signed certificate using a key length
|
||||
@ -850,14 +865,24 @@ func (cont *GCEIngressController) GetFirewallRuleName() string {
|
||||
}
|
||||
|
||||
// GetFirewallRule returns the firewall used by the GCEIngressController.
|
||||
// Causes a fatal error incase of an error.
|
||||
// TODO: Rename this to GetFirewallRuleOrDie and similarly rename all other
|
||||
// methods here to be consistent with rest of the code in this repo.
|
||||
func (cont *GCEIngressController) GetFirewallRule() *compute.Firewall {
|
||||
gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
fwName := cont.GetFirewallRuleName()
|
||||
fw, err := gceCloud.GetFirewall(fwName)
|
||||
fw, err := cont.GetFirewallRuleOrError()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return fw
|
||||
}
|
||||
|
||||
// GetFirewallRule returns the firewall used by the GCEIngressController.
|
||||
// Returns an error if that fails.
|
||||
// TODO: Rename this to GetFirewallRule when the above method with that name is renamed.
|
||||
func (cont *GCEIngressController) GetFirewallRuleOrError() (*compute.Firewall, error) {
|
||||
gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
fwName := cont.GetFirewallRuleName()
|
||||
return gceCloud.GetFirewall(fwName)
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteFirewallRule(del bool) (msg string) {
|
||||
fwList := []compute.Firewall{}
|
||||
regex := fmt.Sprintf("%vfw-l7%v.*", k8sPrefix, clusterDelimiter)
|
||||
@ -883,41 +908,72 @@ func (cont *GCEIngressController) isHTTPErrorCode(err error, code int) bool {
|
||||
}
|
||||
|
||||
// BackendServiceUsingNEG returns true only if all global backend service with matching nodeports pointing to NEG as backend
|
||||
func (cont *GCEIngressController) BackendServiceUsingNEG(nodeports []string) (bool, error) {
|
||||
return cont.backendMode(nodeports, "networkEndpointGroups")
|
||||
func (cont *GCEIngressController) BackendServiceUsingNEG(svcPorts map[string]v1.ServicePort) (bool, error) {
|
||||
return cont.backendMode(svcPorts, "networkEndpointGroups")
|
||||
}
|
||||
|
||||
// BackendServiceUsingIG returns true only if all global backend service with matching nodeports pointing to IG as backend
|
||||
func (cont *GCEIngressController) BackendServiceUsingIG(nodeports []string) (bool, error) {
|
||||
return cont.backendMode(nodeports, "instanceGroups")
|
||||
// BackendServiceUsingIG returns true only if all global backend service with matching svcPorts pointing to IG as backend
|
||||
func (cont *GCEIngressController) BackendServiceUsingIG(svcPorts map[string]v1.ServicePort) (bool, error) {
|
||||
return cont.backendMode(svcPorts, "instanceGroups")
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) backendMode(nodeports []string, keyword string) (bool, error) {
|
||||
func (cont *GCEIngressController) backendMode(svcPorts map[string]v1.ServicePort, keyword string) (bool, error) {
|
||||
gceCloud := cont.Cloud.Provider.(*gcecloud.GCECloud)
|
||||
beList, err := gceCloud.ListGlobalBackendServices()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list backend services: %v", err)
|
||||
}
|
||||
|
||||
hcList, err := gceCloud.ListHealthChecks()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list health checks: %v", err)
|
||||
}
|
||||
|
||||
uid := cont.UID
|
||||
if len(uid) > 8 {
|
||||
uid = uid[:8]
|
||||
}
|
||||
|
||||
matchingBackendService := 0
|
||||
for _, bs := range beList {
|
||||
for svcName, sp := range svcPorts {
|
||||
match := false
|
||||
for _, np := range nodeports {
|
||||
// Warning: This assumes backend service naming convention includes nodeport in the name
|
||||
if strings.Contains(bs.Name, np) {
|
||||
bsMatch := &compute.BackendService{}
|
||||
// Non-NEG BackendServices are named with the Nodeport in the name.
|
||||
// NEG BackendServices' names contain the a sha256 hash of a string.
|
||||
negString := strings.Join([]string{uid, cont.Ns, svcName, sp.TargetPort.String()}, ";")
|
||||
negHash := fmt.Sprintf("%x", sha256.Sum256([]byte(negString)))[:8]
|
||||
for _, bs := range beList {
|
||||
if strings.Contains(bs.Name, strconv.Itoa(int(sp.NodePort))) ||
|
||||
strings.Contains(bs.Name, negHash) {
|
||||
match = true
|
||||
bsMatch = bs
|
||||
matchingBackendService += 1
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if match {
|
||||
for _, be := range bs.Backends {
|
||||
for _, be := range bsMatch.Backends {
|
||||
if !strings.Contains(be.Group, keyword) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the correct HealthCheck exists for the BackendService
|
||||
hcMatch := false
|
||||
for _, hc := range hcList {
|
||||
if hc.Name == bsMatch.Name {
|
||||
hcMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !hcMatch {
|
||||
return false, fmt.Errorf("missing healthcheck for backendservice: %v", bsMatch.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return matchingBackendService == len(nodeports), nil
|
||||
return matchingBackendService == len(svcPorts), nil
|
||||
}
|
||||
|
||||
// Cleanup cleans up cloud resources.
|
||||
@ -1131,7 +1187,7 @@ func (j *IngressTestJig) CreateIngress(manifestPath, ns string, ingAnnotations m
|
||||
for k, v := range ingAnnotations {
|
||||
j.Ingress.Annotations[k] = v
|
||||
}
|
||||
j.Logger.Infof(fmt.Sprintf("creating" + j.Ingress.Name + " ingress"))
|
||||
j.Logger.Infof(fmt.Sprintf("creating " + j.Ingress.Name + " ingress"))
|
||||
j.Ingress, err = j.runCreate(j.Ingress)
|
||||
ExpectNoError(err)
|
||||
}
|
||||
@ -1146,7 +1202,7 @@ func (j *IngressTestJig) runCreate(ing *extensions.Ingress) (*extensions.Ingress
|
||||
if err := manifest.IngressToManifest(ing, filePath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err := runKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath))
|
||||
_, err := RunKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath))
|
||||
return ing, err
|
||||
}
|
||||
|
||||
@ -1161,7 +1217,7 @@ func (j *IngressTestJig) runUpdate(ing *extensions.Ingress) (*extensions.Ingress
|
||||
if err := manifest.IngressToManifest(ing, filePath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err := runKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath), "--force")
|
||||
_, err := RunKubemciWithKubeconfig("create", ing.Name, fmt.Sprintf("--ingress=%s", filePath), "--force")
|
||||
return ing, err
|
||||
}
|
||||
|
||||
@ -1172,7 +1228,7 @@ func (j *IngressTestJig) Update(update func(ing *extensions.Ingress)) {
|
||||
for i := 0; i < 3; i++ {
|
||||
j.Ingress, err = j.Client.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Failf("failed to get ingress %q: %v", name, err)
|
||||
Failf("failed to get ingress %s/%s: %v", ns, name, err)
|
||||
}
|
||||
update(j.Ingress)
|
||||
j.Ingress, err = j.runUpdate(j.Ingress)
|
||||
@ -1181,24 +1237,50 @@ func (j *IngressTestJig) Update(update func(ing *extensions.Ingress)) {
|
||||
return
|
||||
}
|
||||
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
|
||||
Failf("failed to update ingress %q: %v", name, err)
|
||||
Failf("failed to update ingress %s/%s: %v", ns, name, err)
|
||||
}
|
||||
}
|
||||
Failf("too many retries updating ingress %q", name)
|
||||
Failf("too many retries updating ingress %s/%s", ns, name)
|
||||
}
|
||||
|
||||
// AddHTTPS updates the ingress to use this secret for these hosts.
|
||||
// AddHTTPS updates the ingress to add this secret for these hosts.
|
||||
func (j *IngressTestJig) AddHTTPS(secretName string, hosts ...string) {
|
||||
j.Ingress.Spec.TLS = []extensions.IngressTLS{{Hosts: hosts, SecretName: secretName}}
|
||||
// TODO: Just create the secret in GetRootCAs once we're watching secrets in
|
||||
// the ingress controller.
|
||||
_, cert, _, err := createTLSSecret(j.Client, j.Ingress.Namespace, secretName, hosts...)
|
||||
ExpectNoError(err)
|
||||
j.Logger.Infof("Updating ingress %v to use secret %v for TLS termination", j.Ingress.Name, secretName)
|
||||
j.Logger.Infof("Updating ingress %v to also use secret %v for TLS termination", j.Ingress.Name, secretName)
|
||||
j.Update(func(ing *extensions.Ingress) {
|
||||
ing.Spec.TLS = append(ing.Spec.TLS, extensions.IngressTLS{Hosts: hosts, SecretName: secretName})
|
||||
})
|
||||
j.RootCAs[secretName] = cert
|
||||
}
|
||||
|
||||
// SetHTTPS updates the ingress to use only this secret for these hosts.
|
||||
func (j *IngressTestJig) SetHTTPS(secretName string, hosts ...string) {
|
||||
_, cert, _, err := createTLSSecret(j.Client, j.Ingress.Namespace, secretName, hosts...)
|
||||
ExpectNoError(err)
|
||||
j.Logger.Infof("Updating ingress %v to only use secret %v for TLS termination", j.Ingress.Name, secretName)
|
||||
j.Update(func(ing *extensions.Ingress) {
|
||||
ing.Spec.TLS = []extensions.IngressTLS{{Hosts: hosts, SecretName: secretName}}
|
||||
})
|
||||
j.RootCAs[secretName] = cert
|
||||
j.RootCAs = map[string][]byte{secretName: cert}
|
||||
}
|
||||
|
||||
// RemoveHTTPS updates the ingress to not use this secret for TLS.
|
||||
// Note: Does not delete the secret.
|
||||
func (j *IngressTestJig) RemoveHTTPS(secretName string) {
|
||||
newTLS := []extensions.IngressTLS{}
|
||||
for _, ingressTLS := range j.Ingress.Spec.TLS {
|
||||
if secretName != ingressTLS.SecretName {
|
||||
newTLS = append(newTLS, ingressTLS)
|
||||
}
|
||||
}
|
||||
j.Logger.Infof("Updating ingress %v to not use secret %v for TLS termination", j.Ingress.Name, secretName)
|
||||
j.Update(func(ing *extensions.Ingress) {
|
||||
ing.Spec.TLS = newTLS
|
||||
})
|
||||
delete(j.RootCAs, secretName)
|
||||
}
|
||||
|
||||
// PrepareTLSSecret creates a TLS secret and caches the cert.
|
||||
@ -1227,7 +1309,7 @@ func (j *IngressTestJig) TryDeleteIngress() {
|
||||
}
|
||||
|
||||
func (j *IngressTestJig) TryDeleteGivenIngress(ing *extensions.Ingress) {
|
||||
if err := j.runDelete(ing, j.Class); err != nil {
|
||||
if err := j.runDelete(ing); err != nil {
|
||||
j.Logger.Infof("Error while deleting the ingress %v/%v with class %s: %v", ing.Namespace, ing.Name, j.Class, err)
|
||||
}
|
||||
}
|
||||
@ -1240,7 +1322,7 @@ func (j *IngressTestJig) TryDeleteGivenService(svc *v1.Service) {
|
||||
}
|
||||
|
||||
// runDelete runs the required command to delete the given ingress.
|
||||
func (j *IngressTestJig) runDelete(ing *extensions.Ingress, class string) error {
|
||||
func (j *IngressTestJig) runDelete(ing *extensions.Ingress) error {
|
||||
if j.Class != MulticlusterIngressClassValue {
|
||||
return j.Client.ExtensionsV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil)
|
||||
}
|
||||
@ -1249,7 +1331,7 @@ func (j *IngressTestJig) runDelete(ing *extensions.Ingress, class string) error
|
||||
if err := manifest.IngressToManifest(ing, filePath); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := runKubemciWithKubeconfig("delete", ing.Name, fmt.Sprintf("--ingress=%s", filePath))
|
||||
_, err := RunKubemciWithKubeconfig("delete", ing.Name, fmt.Sprintf("--ingress=%s", filePath))
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1257,7 +1339,7 @@ func (j *IngressTestJig) runDelete(ing *extensions.Ingress, class string) error
|
||||
// TODO(nikhiljindal): Update this to be able to return hostname as well.
|
||||
func getIngressAddressFromKubemci(name string) ([]string, error) {
|
||||
var addresses []string
|
||||
out, err := runKubemciCmd("get-status", name)
|
||||
out, err := RunKubemciCmd("get-status", name)
|
||||
if err != nil {
|
||||
return addresses, err
|
||||
}
|
||||
@ -1304,13 +1386,14 @@ func (j *IngressTestJig) WaitForIngressAddress(c clientset.Interface, ns, ingNam
|
||||
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
|
||||
ipOrNameList, err := getIngressAddress(c, ns, ingName, j.Class)
|
||||
if err != nil || len(ipOrNameList) == 0 {
|
||||
j.Logger.Errorf("Waiting for Ingress %v to acquire IP, error %v", ingName, err)
|
||||
if IsRetryableAPIError(err) {
|
||||
j.Logger.Errorf("Waiting for Ingress %s/%s to acquire IP, error: %v, ipOrNameList: %v", ns, ingName, err, ipOrNameList)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
address = ipOrNameList[0]
|
||||
j.Logger.Infof("Found address %s for ingress %s/%s", address, ns, ingName)
|
||||
return true, nil
|
||||
})
|
||||
return address, err
|
||||
@ -1333,7 +1416,9 @@ func (j *IngressTestJig) pollIngressWithCert(ing *extensions.Ingress, address st
|
||||
}
|
||||
for _, p := range rules.IngressRuleValue.HTTP.Paths {
|
||||
if waitForNodePort {
|
||||
if err := j.pollServiceNodePort(ing.Namespace, p.Backend.ServiceName, int(p.Backend.ServicePort.IntVal)); err != nil {
|
||||
nodePort := int(p.Backend.ServicePort.IntVal)
|
||||
if err := j.pollServiceNodePort(ing.Namespace, p.Backend.ServiceName, nodePort); err != nil {
|
||||
j.Logger.Infof("Error in waiting for nodeport %d on service %v/%v: %s", nodePort, ing.Namespace, p.Backend.ServiceName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -1349,7 +1434,9 @@ func (j *IngressTestJig) pollIngressWithCert(ing *extensions.Ingress, address st
|
||||
}
|
||||
|
||||
func (j *IngressTestJig) WaitForIngress(waitForNodePort bool) {
|
||||
j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, LoadBalancerPollTimeout)
|
||||
if err := j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, LoadBalancerPollTimeout); err != nil {
|
||||
Failf("error in waiting for ingress to get an address: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForGivenIngressWithTimeout waits till the ingress acquires an IP,
|
||||
@ -1363,7 +1450,6 @@ func (j *IngressTestJig) WaitForGivenIngressWithTimeout(ing *extensions.Ingress,
|
||||
if err != nil {
|
||||
return fmt.Errorf("Ingress failed to acquire an IP address within %v", timeout)
|
||||
}
|
||||
j.Logger.Infof("Found address %v for ingress %v", address, ing.Name)
|
||||
|
||||
var knownHosts []string
|
||||
var cert []byte
|
||||
@ -1385,7 +1471,6 @@ func (j *IngressTestJig) WaitForIngressWithCert(waitForNodePort bool, knownHosts
|
||||
if err != nil {
|
||||
return fmt.Errorf("Ingress failed to acquire an IP address within %v", LoadBalancerPollTimeout)
|
||||
}
|
||||
j.Logger.Infof("Found address %v for ingress %v", address, j.Ingress.Name)
|
||||
|
||||
return j.pollIngressWithCert(j.Ingress, address, knownHosts, cert, waitForNodePort, LoadBalancerPollTimeout)
|
||||
}
|
||||
@ -1427,10 +1512,22 @@ func (j *IngressTestJig) GetDefaultBackendNodePort() (int32, error) {
|
||||
// by default, so retrieve its nodePort if includeDefaultBackend is true.
|
||||
func (j *IngressTestJig) GetIngressNodePorts(includeDefaultBackend bool) []string {
|
||||
nodePorts := []string{}
|
||||
svcPorts := j.GetServicePorts(includeDefaultBackend)
|
||||
for _, svcPort := range svcPorts {
|
||||
nodePorts = append(nodePorts, strconv.Itoa(int(svcPort.NodePort)))
|
||||
}
|
||||
return nodePorts
|
||||
}
|
||||
|
||||
// GetIngressNodePorts returns related backend services' svcPorts.
|
||||
// Current GCE ingress controller allows traffic to the default HTTP backend
|
||||
// by default, so retrieve its nodePort if includeDefaultBackend is true.
|
||||
func (j *IngressTestJig) GetServicePorts(includeDefaultBackend bool) map[string]v1.ServicePort {
|
||||
svcPorts := make(map[string]v1.ServicePort)
|
||||
if includeDefaultBackend {
|
||||
defaultSvc, err := j.Client.CoreV1().Services(metav1.NamespaceSystem).Get(defaultBackendName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodePorts = append(nodePorts, strconv.Itoa(int(defaultSvc.Spec.Ports[0].NodePort)))
|
||||
svcPorts[defaultBackendName] = defaultSvc.Spec.Ports[0]
|
||||
}
|
||||
|
||||
backendSvcs := []string{}
|
||||
@ -1445,9 +1542,9 @@ func (j *IngressTestJig) GetIngressNodePorts(includeDefaultBackend bool) []strin
|
||||
for _, svcName := range backendSvcs {
|
||||
svc, err := j.Client.CoreV1().Services(j.Ingress.Namespace).Get(svcName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodePorts = append(nodePorts, strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
|
||||
svcPorts[svcName] = svc.Spec.Ports[0]
|
||||
}
|
||||
return nodePorts
|
||||
return svcPorts
|
||||
}
|
||||
|
||||
// ConstructFirewallForIngress returns the expected GCE firewall rule for the ingress resource
|
||||
@ -1524,14 +1621,16 @@ func (cont *NginxIngressController) Init() {
|
||||
Logf("ingress controller running in pod %v on ip %v", cont.pod.Name, cont.externalIP)
|
||||
}
|
||||
|
||||
func GenerateReencryptionIngressSpec() *extensions.Ingress {
|
||||
func generateBacksideHTTPSIngressSpec(ns string) *extensions.Ingress {
|
||||
return &extensions.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "echoheaders-reencryption",
|
||||
Name: "echoheaders-https",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: extensions.IngressSpec{
|
||||
// Note kubemci requres a default backend.
|
||||
Backend: &extensions.IngressBackend{
|
||||
ServiceName: "echoheaders-reencryption",
|
||||
ServiceName: "echoheaders-https",
|
||||
ServicePort: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 443,
|
||||
@ -1541,10 +1640,10 @@ func GenerateReencryptionIngressSpec() *extensions.Ingress {
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateReencryptionServiceSpec() *v1.Service {
|
||||
func generateBacksideHTTPSServiceSpec() *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "echoheaders-reencryption",
|
||||
Name: "echoheaders-https",
|
||||
Annotations: map[string]string{
|
||||
ServiceApplicationProtocolKey: `{"my-https-port":"HTTPS"}`,
|
||||
},
|
||||
@ -1557,33 +1656,33 @@ func GenerateReencryptionServiceSpec() *v1.Service {
|
||||
TargetPort: intstr.FromString("echo-443"),
|
||||
}},
|
||||
Selector: map[string]string{
|
||||
"app": "echoheaders-reencryption",
|
||||
"app": "echoheaders-https",
|
||||
},
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateReencryptionDeploymentSpec() *extensions.Deployment {
|
||||
func generateBacksideHTTPSDeploymentSpec() *extensions.Deployment {
|
||||
return &extensions.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "echoheaders-reencryption",
|
||||
Name: "echoheaders-https",
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{
|
||||
"app": "echoheaders-reencryption",
|
||||
"app": "echoheaders-https",
|
||||
}},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app": "echoheaders-reencryption",
|
||||
"app": "echoheaders-https",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "echoheaders-reencryption",
|
||||
Image: "k8s.gcr.io/echoserver:1.9",
|
||||
Name: "echoheaders-https",
|
||||
Image: "k8s.gcr.io/echoserver:1.10",
|
||||
Ports: []v1.ContainerPort{{
|
||||
ContainerPort: 8443,
|
||||
Name: "echo-443",
|
||||
@ -1596,26 +1695,35 @@ func GenerateReencryptionDeploymentSpec() *extensions.Deployment {
|
||||
}
|
||||
}
|
||||
|
||||
func CreateReencryptionIngress(cs clientset.Interface, namespace string) (*extensions.Deployment, *v1.Service, *extensions.Ingress, error) {
|
||||
deployCreated, err := cs.ExtensionsV1beta1().Deployments(namespace).Create(GenerateReencryptionDeploymentSpec())
|
||||
// SetUpBacksideHTTPSIngress sets up deployment, service and ingress with backside HTTPS configured.
|
||||
func (j *IngressTestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace string, staticIPName string) (*extensions.Deployment, *v1.Service, *extensions.Ingress, error) {
|
||||
deployCreated, err := cs.ExtensionsV1beta1().Deployments(namespace).Create(generateBacksideHTTPSDeploymentSpec())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
svcCreated, err := cs.CoreV1().Services(namespace).Create(GenerateReencryptionServiceSpec())
|
||||
svcCreated, err := cs.CoreV1().Services(namespace).Create(generateBacksideHTTPSServiceSpec())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
ingCreated, err := cs.ExtensionsV1beta1().Ingresses(namespace).Create(GenerateReencryptionIngressSpec())
|
||||
ingToCreate := generateBacksideHTTPSIngressSpec(namespace)
|
||||
if staticIPName != "" {
|
||||
if ingToCreate.Annotations == nil {
|
||||
ingToCreate.Annotations = map[string]string{}
|
||||
}
|
||||
ingToCreate.Annotations[IngressStaticIPKey] = staticIPName
|
||||
}
|
||||
ingCreated, err := j.runCreate(ingToCreate)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return deployCreated, svcCreated, ingCreated, nil
|
||||
}
|
||||
|
||||
func CleanupReencryptionIngress(cs clientset.Interface, deploy *extensions.Deployment, svc *v1.Service, ing *extensions.Ingress) []error {
|
||||
// DeleteTestResource deletes given deployment, service and ingress.
|
||||
func (j *IngressTestJig) DeleteTestResource(cs clientset.Interface, deploy *extensions.Deployment, svc *v1.Service, ing *extensions.Ingress) []error {
|
||||
var errs []error
|
||||
if ing != nil {
|
||||
if err := cs.ExtensionsV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil); err != nil {
|
||||
if err := j.runDelete(ing); err != nil {
|
||||
errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err))
|
||||
}
|
||||
}
|
||||
|
11
vendor/k8s.io/kubernetes/test/e2e/framework/jobs_util.go
generated
vendored
11
vendor/k8s.io/kubernetes/test/e2e/framework/jobs_util.go
generated
vendored
@ -210,6 +210,17 @@ func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.D
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForJobGone uses c to wait for up to timeout for the Job named jobName in namespace ns to be removed.
|
||||
func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Duration) error {
|
||||
return wait.Poll(Poll, timeout, func() (bool, error) {
|
||||
_, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
})
|
||||
}
|
||||
|
||||
// CheckForAllJobPodsRunning uses c to check in the Job named jobName in ns is running. If the returned error is not
|
||||
// nil the returned bool is true if the Job is running.
|
||||
func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) (bool, error) {
|
||||
|
23
vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go
generated
vendored
23
vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go
generated
vendored
@ -97,10 +97,11 @@ func getKubeletMetrics(c clientset.Interface, nodeName string) (metrics.KubeletM
|
||||
return kubeletMetrics, nil
|
||||
}
|
||||
|
||||
// GetKubeletLatencyMetrics gets all latency related kubelet metrics. Note that the KubeletMetrcis
|
||||
// passed in should not contain subsystem prefix.
|
||||
func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics {
|
||||
latencyMethods := sets.NewString(
|
||||
// GetDefaultKubeletLatencyMetrics calls GetKubeletLatencyMetrics with a set of default metricNames
|
||||
// identifying common latency metrics.
|
||||
// Note that the KubeletMetrics passed in should not contain subsystem prefix.
|
||||
func GetDefaultKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics {
|
||||
latencyMetricNames := sets.NewString(
|
||||
kubeletmetrics.PodWorkerLatencyKey,
|
||||
kubeletmetrics.PodWorkerStartLatencyKey,
|
||||
kubeletmetrics.PodStartLatencyKey,
|
||||
@ -109,13 +110,15 @@ func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics {
|
||||
kubeletmetrics.PodWorkerStartLatencyKey,
|
||||
kubeletmetrics.PLEGRelistLatencyKey,
|
||||
)
|
||||
return GetKubeletMetrics(ms, latencyMethods)
|
||||
return GetKubeletLatencyMetrics(ms, latencyMetricNames)
|
||||
}
|
||||
|
||||
func GetKubeletMetrics(ms metrics.KubeletMetrics, methods sets.String) KubeletLatencyMetrics {
|
||||
// GetKubeletLatencyMetrics filters ms to include only those contained in the metricNames set,
|
||||
// then constructs a KubeletLatencyMetrics list based on the samples associated with those metrics.
|
||||
func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics, filterMetricNames sets.String) KubeletLatencyMetrics {
|
||||
var latencyMetrics KubeletLatencyMetrics
|
||||
for method, samples := range ms {
|
||||
if !methods.Has(method) {
|
||||
for name, samples := range ms {
|
||||
if !filterMetricNames.Has(name) {
|
||||
continue
|
||||
}
|
||||
for _, sample := range samples {
|
||||
@ -131,7 +134,7 @@ func GetKubeletMetrics(ms metrics.KubeletMetrics, methods sets.String) KubeletLa
|
||||
|
||||
latencyMetrics = append(latencyMetrics, KubeletLatencyMetric{
|
||||
Operation: operation,
|
||||
Method: method,
|
||||
Method: name,
|
||||
Quantile: quantile,
|
||||
Latency: time.Duration(int64(latency)) * time.Microsecond,
|
||||
})
|
||||
@ -265,7 +268,7 @@ func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration
|
||||
if err != nil {
|
||||
return KubeletLatencyMetrics{}, err
|
||||
}
|
||||
latencyMetrics := GetKubeletLatencyMetrics(ms)
|
||||
latencyMetrics := GetDefaultKubeletLatencyMetrics(ms)
|
||||
sort.Sort(latencyMetrics)
|
||||
var badMetrics KubeletLatencyMetrics
|
||||
logFunc("\nLatency metrics for node %v", nodeName)
|
||||
|
117
vendor/k8s.io/kubernetes/test/e2e/framework/metrics_util.go
generated
vendored
117
vendor/k8s.io/kubernetes/test/e2e/framework/metrics_util.go
generated
vendored
@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
schedulermetric "k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
|
||||
@ -43,7 +44,6 @@ const (
|
||||
// NodeStartupThreshold is a rough estimate of the time allocated for a pod to start on a node.
|
||||
NodeStartupThreshold = 4 * time.Second
|
||||
|
||||
podStartupThreshold time.Duration = 5 * time.Second
|
||||
// We are setting 1s threshold for apicalls even in small clusters to avoid flakes.
|
||||
// The problem is that if long GC is happening in small clusters (where we have e.g.
|
||||
// 1-core master machines) and tests are pretty short, it may consume significant
|
||||
@ -131,6 +131,8 @@ func (m *MetricsForE2E) SummaryKind() string {
|
||||
return "MetricsForE2E"
|
||||
}
|
||||
|
||||
var SchedulingLatencyMetricName = model.LabelValue(schedulermetric.SchedulerSubsystem + "_" + schedulermetric.SchedulingLatencyName)
|
||||
|
||||
var InterestingApiServerMetrics = []string{
|
||||
"apiserver_request_count",
|
||||
"apiserver_request_latencies_summary",
|
||||
@ -188,7 +190,11 @@ type LatencyMetric struct {
|
||||
}
|
||||
|
||||
type PodStartupLatency struct {
|
||||
Latency LatencyMetric `json:"latency"`
|
||||
CreateToScheduleLatency LatencyMetric `json:"createToScheduleLatency"`
|
||||
ScheduleToRunLatency LatencyMetric `json:"scheduleToRunLatency"`
|
||||
RunToWatchLatency LatencyMetric `json:"runToWatchLatency"`
|
||||
ScheduleToWatchLatency LatencyMetric `json:"scheduleToWatchLatency"`
|
||||
E2ELatency LatencyMetric `json:"e2eLatency"`
|
||||
}
|
||||
|
||||
func (l *PodStartupLatency) SummaryKind() string {
|
||||
@ -203,21 +209,26 @@ func (l *PodStartupLatency) PrintJSON() string {
|
||||
return PrettyPrintJSON(PodStartupLatencyToPerfData(l))
|
||||
}
|
||||
|
||||
type SchedulingLatency struct {
|
||||
Scheduling LatencyMetric `json:"scheduling"`
|
||||
Binding LatencyMetric `json:"binding"`
|
||||
Total LatencyMetric `json:"total"`
|
||||
type SchedulingMetrics struct {
|
||||
PredicateEvaluationLatency LatencyMetric `json:"predicateEvaluationLatency"`
|
||||
PriorityEvaluationLatency LatencyMetric `json:"priorityEvaluationLatency"`
|
||||
PreemptionEvaluationLatency LatencyMetric `json:"preemptionEvaluationLatency"`
|
||||
BindingLatency LatencyMetric `json:"bindingLatency"`
|
||||
ThroughputAverage float64 `json:"throughputAverage"`
|
||||
ThroughputPerc50 float64 `json:"throughputPerc50"`
|
||||
ThroughputPerc90 float64 `json:"throughputPerc90"`
|
||||
ThroughputPerc99 float64 `json:"throughputPerc99"`
|
||||
}
|
||||
|
||||
func (l *SchedulingLatency) SummaryKind() string {
|
||||
return "SchedulingLatency"
|
||||
func (l *SchedulingMetrics) SummaryKind() string {
|
||||
return "SchedulingMetrics"
|
||||
}
|
||||
|
||||
func (l *SchedulingLatency) PrintHumanReadable() string {
|
||||
func (l *SchedulingMetrics) PrintHumanReadable() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
func (l *SchedulingLatency) PrintJSON() string {
|
||||
func (l *SchedulingMetrics) PrintJSON() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
@ -398,17 +409,17 @@ func HighLatencyRequests(c clientset.Interface, nodeCount int) (int, *APIRespons
|
||||
return badMetrics, metrics, nil
|
||||
}
|
||||
|
||||
// Verifies whether 50, 90 and 99th percentiles of PodStartupLatency are
|
||||
// within the threshold.
|
||||
func VerifyPodStartupLatency(latency *PodStartupLatency) error {
|
||||
if latency.Latency.Perc50 > podStartupThreshold {
|
||||
return fmt.Errorf("too high pod startup latency 50th percentile: %v", latency.Latency.Perc50)
|
||||
// Verifies whether 50, 90 and 99th percentiles of a latency metric are
|
||||
// within the expected threshold.
|
||||
func VerifyLatencyWithinThreshold(threshold, actual LatencyMetric, metricName string) error {
|
||||
if actual.Perc50 > threshold.Perc50 {
|
||||
return fmt.Errorf("too high %v latency 50th percentile: %v", metricName, actual.Perc50)
|
||||
}
|
||||
if latency.Latency.Perc90 > podStartupThreshold {
|
||||
return fmt.Errorf("too high pod startup latency 90th percentile: %v", latency.Latency.Perc90)
|
||||
if actual.Perc90 > threshold.Perc90 {
|
||||
return fmt.Errorf("too high %v latency 90th percentile: %v", metricName, actual.Perc90)
|
||||
}
|
||||
if latency.Latency.Perc99 > podStartupThreshold {
|
||||
return fmt.Errorf("too high pod startup latency 99th percentile: %v", latency.Latency.Perc99)
|
||||
if actual.Perc99 > threshold.Perc99 {
|
||||
return fmt.Errorf("too high %v latency 99th percentile: %v", metricName, actual.Perc99)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -435,27 +446,29 @@ func getMetrics(c clientset.Interface) (string, error) {
|
||||
return string(body), nil
|
||||
}
|
||||
|
||||
// Retrieves scheduler metrics information.
|
||||
func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) {
|
||||
result := SchedulingLatency{}
|
||||
// Sends REST request to kube scheduler metrics
|
||||
func sendRestRequestToScheduler(c clientset.Interface, op string) (string, error) {
|
||||
opUpper := strings.ToUpper(op)
|
||||
if opUpper != "GET" && opUpper != "DELETE" {
|
||||
return "", fmt.Errorf("Unknown REST request")
|
||||
}
|
||||
|
||||
// Check if master Node is registered
|
||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
ExpectNoError(err)
|
||||
|
||||
var data string
|
||||
var masterRegistered = false
|
||||
for _, node := range nodes.Items {
|
||||
if system.IsMasterNode(node.Name) {
|
||||
masterRegistered = true
|
||||
}
|
||||
}
|
||||
|
||||
var responseText string
|
||||
if masterRegistered {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
var rawData []byte
|
||||
rawData, err = c.CoreV1().RESTClient().Get().
|
||||
body, err := c.CoreV1().RESTClient().Verb(opUpper).
|
||||
Context(ctx).
|
||||
Namespace(metav1.NamespaceSystem).
|
||||
Resource("pods").
|
||||
@ -465,51 +478,65 @@ func getSchedulingLatency(c clientset.Interface) (*SchedulingLatency, error) {
|
||||
Do().Raw()
|
||||
|
||||
ExpectNoError(err)
|
||||
data = string(rawData)
|
||||
responseText = string(body)
|
||||
} else {
|
||||
// If master is not registered fall back to old method of using SSH.
|
||||
if TestContext.Provider == "gke" {
|
||||
Logf("Not grabbing scheduler metrics through master SSH: unsupported for gke")
|
||||
return nil, nil
|
||||
return "", nil
|
||||
}
|
||||
cmd := "curl http://localhost:10251/metrics"
|
||||
|
||||
cmd := "curl -X " + opUpper + " http://localhost:10251/metrics"
|
||||
sshResult, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil || sshResult.Code != 0 {
|
||||
return &result, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
|
||||
return "", fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
|
||||
}
|
||||
data = sshResult.Stdout
|
||||
responseText = sshResult.Stdout
|
||||
}
|
||||
return responseText, nil
|
||||
}
|
||||
|
||||
// Retrieves scheduler latency metrics.
|
||||
func getSchedulingLatency(c clientset.Interface) (*SchedulingMetrics, error) {
|
||||
result := SchedulingMetrics{}
|
||||
data, err := sendRestRequestToScheduler(c, "GET")
|
||||
|
||||
samples, err := extractMetricSamples(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, sample := range samples {
|
||||
if sample.Metric[model.MetricNameLabel] != SchedulingLatencyMetricName {
|
||||
continue
|
||||
}
|
||||
|
||||
var metric *LatencyMetric = nil
|
||||
switch sample.Metric[model.MetricNameLabel] {
|
||||
case "scheduler_scheduling_algorithm_latency_microseconds":
|
||||
metric = &result.Scheduling
|
||||
case "scheduler_binding_latency_microseconds":
|
||||
metric = &result.Binding
|
||||
case "scheduler_e2e_scheduling_latency_microseconds":
|
||||
metric = &result.Total
|
||||
switch sample.Metric[schedulermetric.OperationLabel] {
|
||||
case schedulermetric.PredicateEvaluation:
|
||||
metric = &result.PredicateEvaluationLatency
|
||||
case schedulermetric.PriorityEvaluation:
|
||||
metric = &result.PriorityEvaluationLatency
|
||||
case schedulermetric.PreemptionEvaluation:
|
||||
metric = &result.PreemptionEvaluationLatency
|
||||
case schedulermetric.Binding:
|
||||
metric = &result.BindingLatency
|
||||
}
|
||||
if metric == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
latency := sample.Value
|
||||
quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setQuantile(metric, quantile, time.Duration(int64(latency))*time.Microsecond)
|
||||
setQuantile(metric, quantile, time.Duration(int64(float64(sample.Value)*float64(time.Second))))
|
||||
}
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// Verifies (currently just by logging them) the scheduling latencies.
|
||||
func VerifySchedulerLatency(c clientset.Interface) (*SchedulingLatency, error) {
|
||||
func VerifySchedulerLatency(c clientset.Interface) (*SchedulingMetrics, error) {
|
||||
latency, err := getSchedulingLatency(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -517,6 +544,14 @@ func VerifySchedulerLatency(c clientset.Interface) (*SchedulingLatency, error) {
|
||||
return latency, nil
|
||||
}
|
||||
|
||||
func ResetSchedulerMetrics(c clientset.Interface) error {
|
||||
responseText, err := sendRestRequestToScheduler(c, "DELETE")
|
||||
if err != nil || responseText != "metrics reset\n" {
|
||||
return fmt.Errorf("Unexpected response: %q", responseText)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func PrettyPrintJSON(metrics interface{}) string {
|
||||
output := &bytes.Buffer{}
|
||||
if err := json.NewEncoder(output).Encode(metrics); err != nil {
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e/framework/networking_utils.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/framework/networking_utils.go
generated
vendored
@ -40,7 +40,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -403,7 +402,7 @@ func (config *NetworkingTestConfig) createNetShellPodSpec(podName, hostname stri
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
@ -447,7 +446,7 @@ func (config *NetworkingTestConfig) createTestPodSpec() *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: testPodName,
|
||||
|
125
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
125
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
@ -24,11 +24,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
func EtcdUpgrade(target_storage, target_version string) error {
|
||||
@ -67,7 +63,7 @@ func etcdUpgradeGCE(target_storage, target_version string) error {
|
||||
os.Environ(),
|
||||
"TEST_ETCD_VERSION="+target_version,
|
||||
"STORAGE_BACKEND="+target_storage,
|
||||
"TEST_ETCD_IMAGE=3.2.14")
|
||||
"TEST_ETCD_IMAGE=3.2.18-0")
|
||||
|
||||
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M")
|
||||
return err
|
||||
@ -82,11 +78,11 @@ func ingressUpgradeGCE(isUpgrade bool) error {
|
||||
command = fmt.Sprintf("sudo sed -i -re 's|(image:)(.*)|\\1 %s|' /etc/kubernetes/manifests/glbc.manifest", targetImage)
|
||||
} else {
|
||||
// Upgrade to latest HEAD image.
|
||||
command = "sudo sed -i -re 's/(image:)(.*)/\\1 gcr.io\\/k8s-ingress-image-push\\/ingress-gce-e2e-glbc-amd64:latest/' /etc/kubernetes/manifests/glbc.manifest"
|
||||
command = "sudo sed -i -re 's/(image:)(.*)/\\1 gcr.io\\/k8s-ingress-image-push\\/ingress-gce-e2e-glbc-amd64:master/' /etc/kubernetes/manifests/glbc.manifest"
|
||||
}
|
||||
} else {
|
||||
// Downgrade to latest release image.
|
||||
command = "sudo sed -i -re 's/(image:)(.*)/\\1 k8s.gcr.io\\/google_containers\\/glbc:0.9.7/' /etc/kubernetes/manifests/glbc.manifest"
|
||||
command = "sudo sed -i -re 's/(image:)(.*)/\\1 k8s.gcr.io\\/ingress-gce-glbc-amd64:v1.1.1/' /etc/kubernetes/manifests/glbc.manifest"
|
||||
}
|
||||
// Kubelet should restart glbc automatically.
|
||||
sshResult, err := NodeExec(GetMasterHost(), command)
|
||||
@ -107,7 +103,7 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error {
|
||||
env = append(env,
|
||||
"TEST_ETCD_VERSION="+TestContext.EtcdUpgradeVersion,
|
||||
"STORAGE_BACKEND="+TestContext.EtcdUpgradeStorage,
|
||||
"TEST_ETCD_IMAGE=3.2.14")
|
||||
"TEST_ETCD_IMAGE=3.2.18-0")
|
||||
} else {
|
||||
// In e2e tests, we skip the confirmation prompt about
|
||||
// implicit etcd upgrades to simulate the user entering "y".
|
||||
@ -119,17 +115,36 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func locationParamGKE() string {
|
||||
if TestContext.CloudConfig.MultiMaster {
|
||||
// GKE Regional Clusters are being tested.
|
||||
return fmt.Sprintf("--region=%s", TestContext.CloudConfig.Region)
|
||||
}
|
||||
return fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone)
|
||||
}
|
||||
|
||||
func appendContainerCommandGroupIfNeeded(args []string) []string {
|
||||
if TestContext.CloudConfig.Region != "" {
|
||||
// TODO(wojtek-t): Get rid of it once Regional Clusters go to GA.
|
||||
return append([]string{"beta"}, args...)
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
func masterUpgradeGKE(v string) error {
|
||||
Logf("Upgrading master to %q", v)
|
||||
_, _, err := RunCmd("gcloud", "container",
|
||||
args := []string{
|
||||
"container",
|
||||
"clusters",
|
||||
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
|
||||
fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone),
|
||||
locationParamGKE(),
|
||||
"upgrade",
|
||||
TestContext.CloudConfig.Cluster,
|
||||
"--master",
|
||||
fmt.Sprintf("--cluster-version=%s", v),
|
||||
"--quiet")
|
||||
"--quiet",
|
||||
}
|
||||
_, _, err := RunCmd("gcloud", appendContainerCommandGroupIfNeeded(args)...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -190,16 +205,7 @@ func NodeUpgrade(f *Framework, v string, img string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for it to complete and validate nodes are healthy.
|
||||
//
|
||||
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
|
||||
// GKE; the operation shouldn't return until they all are.
|
||||
Logf("Waiting up to %v for all nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout)
|
||||
if _, err := CheckNodesReady(f.ClientSet, RestartNodeReadyAgainTimeout, TestContext.CloudConfig.NumNodes); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return waitForNodesReadyAfterUpgrade(f)
|
||||
}
|
||||
|
||||
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
|
||||
@ -208,9 +214,20 @@ func NodeUpgradeGCEWithKubeProxyDaemonSet(f *Framework, v string, img string, en
|
||||
if err := nodeUpgradeGCE(v, img, enableKubeProxyDaemonSet); err != nil {
|
||||
return err
|
||||
}
|
||||
return waitForNodesReadyAfterUpgrade(f)
|
||||
}
|
||||
|
||||
func waitForNodesReadyAfterUpgrade(f *Framework) error {
|
||||
// Wait for it to complete and validate nodes are healthy.
|
||||
Logf("Waiting up to %v for all nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout)
|
||||
if _, err := CheckNodesReady(f.ClientSet, RestartNodeReadyAgainTimeout, TestContext.CloudConfig.NumNodes); err != nil {
|
||||
//
|
||||
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
|
||||
// GKE; the operation shouldn't return until they all are.
|
||||
numNodes, err := NumberOfRegisteredNodes(f.ClientSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't detect number of nodes")
|
||||
}
|
||||
Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes)
|
||||
if _, err := CheckNodesReady(f.ClientSet, numNodes, RestartNodeReadyAgainTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -235,7 +252,7 @@ func nodeUpgradeGKE(v string, img string) error {
|
||||
"container",
|
||||
"clusters",
|
||||
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
|
||||
fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone),
|
||||
locationParamGKE(),
|
||||
"upgrade",
|
||||
TestContext.CloudConfig.Cluster,
|
||||
fmt.Sprintf("--cluster-version=%s", v),
|
||||
@ -244,7 +261,7 @@ func nodeUpgradeGKE(v string, img string) error {
|
||||
if len(img) > 0 {
|
||||
args = append(args, fmt.Sprintf("--image-type=%s", img))
|
||||
}
|
||||
_, _, err := RunCmd("gcloud", args...)
|
||||
_, _, err := RunCmd("gcloud", appendContainerCommandGroupIfNeeded(args)...)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
@ -255,64 +272,6 @@ func nodeUpgradeGKE(v string, img string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckNodesReady waits up to nt for expect nodes accessed by c to be ready,
|
||||
// returning an error if this doesn't happen in time. It returns the names of
|
||||
// nodes it finds.
|
||||
func CheckNodesReady(c clientset.Interface, nt time.Duration, expect int) ([]string, error) {
|
||||
// First, keep getting all of the nodes until we get the number we expect.
|
||||
var nodeList *v1.NodeList
|
||||
var errLast error
|
||||
start := time.Now()
|
||||
found := wait.Poll(Poll, nt, func() (bool, error) {
|
||||
// A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver
|
||||
// knows about all of the nodes. Thus, we retry the list nodes call
|
||||
// until we get the expected number of nodes.
|
||||
nodeList, errLast = c.CoreV1().Nodes().List(metav1.ListOptions{
|
||||
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String()})
|
||||
if errLast != nil {
|
||||
return false, nil
|
||||
}
|
||||
if len(nodeList.Items) != expect {
|
||||
errLast = fmt.Errorf("expected to find %d nodes but found only %d (%v elapsed)",
|
||||
expect, len(nodeList.Items), time.Since(start))
|
||||
Logf("%v", errLast)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}) == nil
|
||||
nodeNames := make([]string, len(nodeList.Items))
|
||||
for i, n := range nodeList.Items {
|
||||
nodeNames[i] = n.ObjectMeta.Name
|
||||
}
|
||||
if !found {
|
||||
return nodeNames, fmt.Errorf("couldn't find %d nodes within %v; last error: %v",
|
||||
expect, nt, errLast)
|
||||
}
|
||||
Logf("Successfully found %d nodes", expect)
|
||||
|
||||
// Next, ensure in parallel that all the nodes are ready. We subtract the
|
||||
// time we spent waiting above.
|
||||
timeout := nt - time.Since(start)
|
||||
result := make(chan bool, len(nodeList.Items))
|
||||
for _, n := range nodeNames {
|
||||
n := n
|
||||
go func() { result <- WaitForNodeToBeReady(c, n, timeout) }()
|
||||
}
|
||||
failed := false
|
||||
// TODO(mbforbes): Change to `for range` syntax once we support only Go
|
||||
// >= 1.4.
|
||||
for i := range nodeList.Items {
|
||||
_ = i
|
||||
if !<-result {
|
||||
failed = true
|
||||
}
|
||||
}
|
||||
if failed {
|
||||
return nodeNames, fmt.Errorf("at least one node failed to be ready")
|
||||
}
|
||||
return nodeNames, nil
|
||||
}
|
||||
|
||||
// MigTemplate (GCE-only) returns the name of the MIG template that the
|
||||
// nodes of the cluster use.
|
||||
func MigTemplate() (string, error) {
|
||||
|
27
vendor/k8s.io/kubernetes/test/e2e/framework/perf_util.go
generated
vendored
27
vendor/k8s.io/kubernetes/test/e2e/framework/perf_util.go
generated
vendored
@ -53,22 +53,29 @@ func ApiCallToPerfData(apicalls *APIResponsiveness) *perftype.PerfData {
|
||||
return perfData
|
||||
}
|
||||
|
||||
// PodStartupLatencyToPerfData transforms PodStartupLatency to PerfData.
|
||||
func PodStartupLatencyToPerfData(latency *PodStartupLatency) *perftype.PerfData {
|
||||
perfData := &perftype.PerfData{Version: currentApiCallMetricsVersion}
|
||||
item := perftype.DataItem{
|
||||
func latencyToPerfData(l LatencyMetric, name string) perftype.DataItem {
|
||||
return perftype.DataItem{
|
||||
Data: map[string]float64{
|
||||
"Perc50": float64(latency.Latency.Perc50) / 1000000, // us -> ms
|
||||
"Perc90": float64(latency.Latency.Perc90) / 1000000,
|
||||
"Perc99": float64(latency.Latency.Perc99) / 1000000,
|
||||
"Perc100": float64(latency.Latency.Perc100) / 1000000,
|
||||
"Perc50": float64(l.Perc50) / 1000000, // us -> ms
|
||||
"Perc90": float64(l.Perc90) / 1000000,
|
||||
"Perc99": float64(l.Perc99) / 1000000,
|
||||
"Perc100": float64(l.Perc100) / 1000000,
|
||||
},
|
||||
Unit: "ms",
|
||||
Labels: map[string]string{
|
||||
"Metric": "pod_startup",
|
||||
"Metric": name,
|
||||
},
|
||||
}
|
||||
perfData.DataItems = append(perfData.DataItems, item)
|
||||
}
|
||||
|
||||
// PodStartupLatencyToPerfData transforms PodStartupLatency to PerfData.
|
||||
func PodStartupLatencyToPerfData(latency *PodStartupLatency) *perftype.PerfData {
|
||||
perfData := &perftype.PerfData{Version: currentApiCallMetricsVersion}
|
||||
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.CreateToScheduleLatency, "create_to_schedule"))
|
||||
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.ScheduleToRunLatency, "schedule_to_run"))
|
||||
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.RunToWatchLatency, "run_to_watch"))
|
||||
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.ScheduleToWatchLatency, "schedule_to_watch"))
|
||||
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.E2ELatency, "pod_startup"))
|
||||
return perfData
|
||||
}
|
||||
|
||||
|
49
vendor/k8s.io/kubernetes/test/e2e/framework/profile_gatherer.go
generated
vendored
49
vendor/k8s.io/kubernetes/test/e2e/framework/profile_gatherer.go
generated
vendored
@ -19,7 +19,6 @@ package framework
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
@ -69,36 +68,44 @@ func gatherProfileOfKind(profileBaseName, kind string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to execute curl command on master through SSH: %v", err)
|
||||
}
|
||||
// Write the data to a temp file.
|
||||
var tmpfile *os.File
|
||||
tmpfile, err = ioutil.TempFile("", "apiserver-profile")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create temp file for profile data: %v", err)
|
||||
}
|
||||
defer os.Remove(tmpfile.Name())
|
||||
if _, err := tmpfile.Write([]byte(sshResult.Stdout)); err != nil {
|
||||
return fmt.Errorf("Failed to write temp file with profile data: %v", err)
|
||||
}
|
||||
if err := tmpfile.Close(); err != nil {
|
||||
return fmt.Errorf("Failed to close temp file: %v", err)
|
||||
}
|
||||
// Create a graph from the data and write it to a pdf file.
|
||||
var cmd *exec.Cmd
|
||||
|
||||
var profilePrefix string
|
||||
switch {
|
||||
// TODO: Support other profile kinds if needed (e.g inuse_space, alloc_objects, mutex, etc)
|
||||
case kind == "heap":
|
||||
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", "--alloc_space", tmpfile.Name())
|
||||
profilePrefix = "ApiserverMemoryProfile_"
|
||||
case strings.HasPrefix(kind, "profile"):
|
||||
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", tmpfile.Name())
|
||||
profilePrefix = "ApiserverCPUProfile_"
|
||||
default:
|
||||
return fmt.Errorf("Unknown profile kind provided: %s", kind)
|
||||
}
|
||||
|
||||
// Write the data to a file.
|
||||
rawprofilePath := path.Join(getProfilesDirectoryPath(), profilePrefix+profileBaseName+".pprof")
|
||||
rawprofile, err := os.Create(rawprofilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create file for the profile graph: %v", err)
|
||||
}
|
||||
defer rawprofile.Close()
|
||||
|
||||
if _, err := rawprofile.Write([]byte(sshResult.Stdout)); err != nil {
|
||||
return fmt.Errorf("Failed to write file with profile data: %v", err)
|
||||
}
|
||||
if err := rawprofile.Close(); err != nil {
|
||||
return fmt.Errorf("Failed to close file: %v", err)
|
||||
}
|
||||
// Create a graph from the data and write it to a pdf file.
|
||||
var cmd *exec.Cmd
|
||||
switch {
|
||||
// TODO: Support other profile kinds if needed (e.g inuse_space, alloc_objects, mutex, etc)
|
||||
case kind == "heap":
|
||||
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", "--alloc_space", rawprofile.Name())
|
||||
case strings.HasPrefix(kind, "profile"):
|
||||
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", rawprofile.Name())
|
||||
default:
|
||||
return fmt.Errorf("Unknown profile kind provided: %s", kind)
|
||||
}
|
||||
outfilePath := path.Join(getProfilesDirectoryPath(), profilePrefix+profileBaseName+".pdf")
|
||||
var outfile *os.File
|
||||
outfile, err = os.Create(outfilePath)
|
||||
outfile, err := os.Create(outfilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create file for the profile graph: %v", err)
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/test/e2e/framework/psp_util.go
generated
vendored
1
vendor/k8s.io/kubernetes/test/e2e/framework/psp_util.go
generated
vendored
@ -71,6 +71,7 @@ func PrivilegedPSP(name string) *extensionsv1beta1.PodSecurityPolicy {
|
||||
Rule: extensionsv1beta1.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
AllowedUnsafeSysctls: []string{"*"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
127
vendor/k8s.io/kubernetes/test/e2e/framework/pv_util.go
generated
vendored
127
vendor/k8s.io/kubernetes/test/e2e/framework/pv_util.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
"google.golang.org/api/googleapi"
|
||||
"k8s.io/api/core/v1"
|
||||
@ -35,7 +36,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
@ -678,6 +678,22 @@ func DeletePDWithRetry(diskName string) error {
|
||||
return fmt.Errorf("unable to delete PD %q: %v", diskName, err)
|
||||
}
|
||||
|
||||
func newAWSClient(zone string) *ec2.EC2 {
|
||||
var cfg *aws.Config
|
||||
|
||||
if zone == "" {
|
||||
zone = TestContext.CloudConfig.Zone
|
||||
}
|
||||
if zone == "" {
|
||||
glog.Warning("No AWS zone configured!")
|
||||
cfg = nil
|
||||
} else {
|
||||
region := zone[:len(zone)-1]
|
||||
cfg = &aws.Config{Region: aws.String(region)}
|
||||
}
|
||||
return ec2.New(session.New(), cfg)
|
||||
}
|
||||
|
||||
func createPD(zone string) (string, error) {
|
||||
if zone == "" {
|
||||
zone = TestContext.CloudConfig.Zone
|
||||
@ -691,6 +707,14 @@ func createPD(zone string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if zone == "" && TestContext.CloudConfig.MultiZone {
|
||||
zones, err := gceCloud.GetAllZonesFromCloudProvider()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
zone, _ = zones.PopAny()
|
||||
}
|
||||
|
||||
tags := map[string]string{}
|
||||
err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeSSD, zone, 10 /* sizeGb */, tags)
|
||||
if err != nil {
|
||||
@ -698,8 +722,7 @@ func createPD(zone string) (string, error) {
|
||||
}
|
||||
return pdName, nil
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := ec2.New(session.New())
|
||||
|
||||
client := newAWSClient(zone)
|
||||
request := &ec2.CreateVolumeInput{}
|
||||
request.AvailabilityZone = aws.String(zone)
|
||||
request.Size = aws.Int64(10)
|
||||
@ -751,7 +774,7 @@ func deletePD(pdName string) error {
|
||||
}
|
||||
return err
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := ec2.New(session.New())
|
||||
client := newAWSClient("")
|
||||
|
||||
tokens := strings.Split(pdName, "/")
|
||||
awsVolumeID := tokens[len(tokens)-1]
|
||||
@ -792,12 +815,12 @@ func MakeWritePod(ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod
|
||||
func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod {
|
||||
if len(command) == 0 {
|
||||
command = "while true; do sleep 1; done"
|
||||
command = "trap exit TERM; while true; do sleep 1; done"
|
||||
}
|
||||
podSpec := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pvc-tester-",
|
||||
@ -833,19 +856,64 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
|
||||
return podSpec
|
||||
}
|
||||
|
||||
// Returns a pod definition based on the namespace. The pod references the PVC's
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod.
|
||||
// SELinux testing requires to pass HostIPC and HostPID as booleansi arguments.
|
||||
func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions) *v1.Pod {
|
||||
if len(command) == 0 {
|
||||
command = "while true; do sleep 1; done"
|
||||
}
|
||||
podName := "security-context-" + string(uuid.NewUUID())
|
||||
fsGroup := int64(1000)
|
||||
// Returns a pod definition based on the namespace using nginx image
|
||||
func MakeNginxPod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) *v1.Pod {
|
||||
podSpec := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pvc-tester-",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: "nginx",
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "http-server",
|
||||
ContainerPort: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
|
||||
var volumes = make([]v1.Volume, len(pvclaims))
|
||||
for index, pvclaim := range pvclaims {
|
||||
volumename := fmt.Sprintf("volume%v", index+1)
|
||||
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
|
||||
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
|
||||
}
|
||||
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
|
||||
podSpec.Spec.Volumes = volumes
|
||||
if nodeSelector != nil {
|
||||
podSpec.Spec.NodeSelector = nodeSelector
|
||||
}
|
||||
return podSpec
|
||||
}
|
||||
|
||||
// Returns a pod definition based on the namespace. The pod references the PVC's
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod.
|
||||
// SELinux testing requires to pass HostIPC and HostPID as booleansi arguments.
|
||||
func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64) *v1.Pod {
|
||||
if len(command) == 0 {
|
||||
command = "trap exit TERM; while true; do sleep 1; done"
|
||||
}
|
||||
podName := "security-context-" + string(uuid.NewUUID())
|
||||
if fsGroup == nil {
|
||||
fsGroup = func(i int64) *int64 {
|
||||
return &i
|
||||
}(1000)
|
||||
}
|
||||
podSpec := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
@ -855,7 +923,7 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bo
|
||||
HostIPC: hostIPC,
|
||||
HostPID: hostPID,
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
FSGroup: &fsGroup,
|
||||
FSGroup: fsGroup,
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
@ -911,9 +979,8 @@ func CreatePod(client clientset.Interface, namespace string, nodeSelector map[st
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// create security pod with given claims
|
||||
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions) (*v1.Pod, error) {
|
||||
pod := MakeSecPod(namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel)
|
||||
func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) (*v1.Pod, error) {
|
||||
pod := MakeNginxPod(namespace, nodeSelector, pvclaims)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
@ -931,6 +998,26 @@ func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.P
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// create security pod with given claims
|
||||
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) {
|
||||
pod := MakeSecPod(namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
// Waiting for pod to be running
|
||||
err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout)
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
|
||||
}
|
||||
// get fresh pod info
|
||||
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod Get API error: %v", err)
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
|
||||
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
|
||||
return CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
|
||||
|
21
vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go
generated
vendored
21
vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go
generated
vendored
@ -25,13 +25,12 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
@ -60,7 +59,7 @@ func RcByNameContainer(name string, replicas int32, image string, labels map[str
|
||||
return &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicationController",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -85,9 +84,7 @@ func RcByNameContainer(name string, replicas int32, image string, labels map[str
|
||||
|
||||
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
|
||||
// none are running, otherwise it does what a synchronous scale operation would do.
|
||||
//TODO(p0lyn0mial): remove internalClientset.
|
||||
//TODO(p0lyn0mial): update the callers.
|
||||
func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns string, l map[string]string, replicas uint) error {
|
||||
func ScaleRCByLabels(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns string, l map[string]string, replicas uint) error {
|
||||
listOpts := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()}
|
||||
rcs, err := clientset.CoreV1().ReplicationControllers(ns).List(listOpts)
|
||||
if err != nil {
|
||||
@ -99,7 +96,7 @@ func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalcl
|
||||
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
|
||||
for _, labelRC := range rcs.Items {
|
||||
name := labelRC.Name
|
||||
if err := ScaleRC(clientset, internalClientset, scalesGetter, ns, name, replicas, false); err != nil {
|
||||
if err := ScaleRC(clientset, scalesGetter, ns, name, replicas, false); err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := clientset.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
||||
@ -107,7 +104,7 @@ func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalcl
|
||||
return err
|
||||
}
|
||||
if replicas == 0 {
|
||||
ps, err := podStoreForSelector(clientset, rc.Namespace, labels.SelectorFromSet(rc.Spec.Selector))
|
||||
ps, err := testutils.NewPodStore(clientset, rc.Namespace, labels.SelectorFromSet(rc.Spec.Selector), fields.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -155,12 +152,8 @@ func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error {
|
||||
return DeleteResourceAndWaitForGC(c, api.Kind("ReplicationController"), ns, name)
|
||||
}
|
||||
|
||||
func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
|
||||
return DeleteResourceAndPods(clientset, internalClientset, api.Kind("ReplicationController"), ns, name)
|
||||
}
|
||||
|
||||
func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, internalClientset, scalesGetter, ns, name, size, wait, api.Kind("ReplicationController"), api.Resource("replicationcontrollers"))
|
||||
func ScaleRC(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, scalesGetter, ns, name, size, wait, api.Kind("ReplicationController"), api.Resource("replicationcontrollers"))
|
||||
}
|
||||
|
||||
func RunRC(config testutils.RCConfig) error {
|
||||
|
30
vendor/k8s.io/kubernetes/test/e2e/framework/rs_util.go
generated
vendored
30
vendor/k8s.io/kubernetes/test/e2e/framework/rs_util.go
generated
vendored
@ -21,29 +21,29 @@ import (
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
extensionsclient "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
type updateRsFunc func(d *extensions.ReplicaSet)
|
||||
type updateRsFunc func(d *apps.ReplicaSet)
|
||||
|
||||
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*extensions.ReplicaSet, error) {
|
||||
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) {
|
||||
return testutils.UpdateReplicaSetWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)
|
||||
}
|
||||
|
||||
// CheckNewRSAnnotations check if the new RS's annotation is as expected
|
||||
func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -59,7 +59,7 @@ func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, exp
|
||||
// WaitForReadyReplicaSet waits until the replicaset has all of its replicas ready.
|
||||
func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
|
||||
err := wait.Poll(Poll, pollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.ExtensionsV1beta1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
rs, err := c.AppsV1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -72,7 +72,7 @@ func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
|
||||
}
|
||||
|
||||
// WaitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas.
|
||||
func WaitForReplicaSetDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, replicaSet *extensions.ReplicaSet) error {
|
||||
func WaitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, replicaSet *apps.ReplicaSet) error {
|
||||
desiredGeneration := replicaSet.Generation
|
||||
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
|
||||
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
@ -88,10 +88,10 @@ func WaitForReplicaSetDesiredReplicas(rsClient extensionsclient.ReplicaSetsGette
|
||||
}
|
||||
|
||||
// WaitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum
|
||||
func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *extensions.ReplicaSet, targetReplicaNum int32) error {
|
||||
func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *apps.ReplicaSet, targetReplicaNum int32) error {
|
||||
desiredGeneration := replicaSet.Generation
|
||||
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.ExtensionsV1beta1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -104,10 +104,10 @@ func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *exte
|
||||
}
|
||||
|
||||
// WaitForReplicaSetTargetAvailableReplicas waits for .status.availableReplicas of a RS to equal targetReplicaNum
|
||||
func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *extensions.ReplicaSet, targetReplicaNum int32) error {
|
||||
func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *apps.ReplicaSet, targetReplicaNum int32) error {
|
||||
desiredGeneration := replicaSet.Generation
|
||||
err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) {
|
||||
rs, err := c.ExtensionsV1beta1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -126,8 +126,8 @@ func RunReplicaSet(config testutils.ReplicaSetConfig) error {
|
||||
return testutils.RunReplicaSet(config)
|
||||
}
|
||||
|
||||
func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *extensions.ReplicaSet {
|
||||
return &extensions.ReplicaSet{
|
||||
func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *apps.ReplicaSet {
|
||||
return &apps.ReplicaSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicaSet",
|
||||
APIVersion: "extensions/v1beta1",
|
||||
@ -136,7 +136,7 @@ func NewReplicaSet(name, namespace string, replicas int32, podLabels map[string]
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Spec: apps.ReplicaSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
|
147
vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go
generated
vendored
147
vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go
generated
vendored
@ -94,6 +94,10 @@ const (
|
||||
// GCPMaxInstancesInInstanceGroup is the maximum number of instances supported in
|
||||
// one instance group on GCP.
|
||||
GCPMaxInstancesInInstanceGroup = 2000
|
||||
|
||||
// AffinityConfirmCount is the number of needed continuous requests to confirm that
|
||||
// affinity is enabled.
|
||||
AffinityConfirmCount = 15
|
||||
)
|
||||
|
||||
// This should match whatever the default/configured range is
|
||||
@ -211,6 +215,20 @@ func (j *ServiceTestJig) CreateExternalNameServiceOrFail(namespace string, tweak
|
||||
return result
|
||||
}
|
||||
|
||||
// CreateServiceWithServicePort creates a new Service with ServicePort.
|
||||
func (j *ServiceTestJig) CreateServiceWithServicePort(labels map[string]string, namespace string, ports []v1.ServicePort) (*v1.Service, error) {
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: j.Name,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: labels,
|
||||
Ports: ports,
|
||||
},
|
||||
}
|
||||
return j.Client.CoreV1().Services(namespace).Create(service)
|
||||
}
|
||||
|
||||
func (j *ServiceTestJig) ChangeServiceType(namespace, name string, newType v1.ServiceType, timeout time.Duration) {
|
||||
ingressIP := ""
|
||||
svc := j.UpdateServiceOrFail(namespace, name, func(s *v1.Service) {
|
||||
@ -1192,26 +1210,13 @@ func ValidateEndpointsOrFail(c clientset.Interface, namespace, serviceName strin
|
||||
Failf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, ServiceStartTimeout)
|
||||
}
|
||||
|
||||
// StartServeHostnameService creates a replication controller that serves its hostname and a service on top of it.
|
||||
func StartServeHostnameService(c clientset.Interface, internalClient internalclientset.Interface, ns, name string, port, replicas int) ([]string, string, error) {
|
||||
// StartServeHostnameService creates a replication controller that serves its
|
||||
// hostname and a service on top of it.
|
||||
func StartServeHostnameService(c clientset.Interface, internalClient internalclientset.Interface, svc *v1.Service, ns string, replicas int) ([]string, string, error) {
|
||||
podNames := make([]string, replicas)
|
||||
|
||||
name := svc.ObjectMeta.Name
|
||||
By("creating service " + name + " in namespace " + ns)
|
||||
_, err := c.CoreV1().Services(ns).Create(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: int32(port),
|
||||
TargetPort: intstr.FromInt(9376),
|
||||
Protocol: "TCP",
|
||||
}},
|
||||
Selector: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
})
|
||||
_, err := c.CoreV1().Services(ns).Create(svc)
|
||||
if err != nil {
|
||||
return podNames, "", err
|
||||
}
|
||||
@ -1255,8 +1260,8 @@ func StartServeHostnameService(c clientset.Interface, internalClient internalcli
|
||||
return podNames, serviceIP, nil
|
||||
}
|
||||
|
||||
func StopServeHostnameService(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
|
||||
if err := DeleteRCAndPods(clientset, internalClientset, ns, name); err != nil {
|
||||
func StopServeHostnameService(clientset clientset.Interface, ns, name string) error {
|
||||
if err := DeleteRCAndWaitForGC(clientset, ns, name); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := clientset.CoreV1().Services(ns).Delete(name, nil); err != nil {
|
||||
@ -1368,17 +1373,17 @@ func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceI
|
||||
return fmt.Errorf("waiting for service to be down timed out")
|
||||
}
|
||||
|
||||
func CleanupServiceResources(c clientset.Interface, loadBalancerName, zone string) {
|
||||
func CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
CleanupServiceGCEResources(c, loadBalancerName, zone)
|
||||
CleanupServiceGCEResources(c, loadBalancerName, region, zone)
|
||||
}
|
||||
|
||||
// TODO: we need to add this function with other cloud providers, if there is a need.
|
||||
}
|
||||
|
||||
func CleanupServiceGCEResources(c clientset.Interface, loadBalancerName, zone string) {
|
||||
func CleanupServiceGCEResources(c clientset.Interface, loadBalancerName, region, zone string) {
|
||||
if pollErr := wait.Poll(5*time.Second, LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||
if err := CleanupGCEResources(c, loadBalancerName, zone); err != nil {
|
||||
if err := CleanupGCEResources(c, loadBalancerName, region, zone); err != nil {
|
||||
Logf("Still waiting for glbc to cleanup: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
@ -1450,3 +1455,97 @@ func GetServiceLoadBalancerCreationTimeout(cs clientset.Interface) time.Duration
|
||||
}
|
||||
return LoadBalancerCreateTimeoutDefault
|
||||
}
|
||||
|
||||
// affinityTracker tracks the destination of a request for the affinity tests.
|
||||
type affinityTracker struct {
|
||||
hostTrace []string
|
||||
}
|
||||
|
||||
// Record the response going to a given host.
|
||||
func (at *affinityTracker) recordHost(host string) {
|
||||
at.hostTrace = append(at.hostTrace, host)
|
||||
}
|
||||
|
||||
// Check that we got a constant count requests going to the same host.
|
||||
func (at *affinityTracker) checkHostTrace(count int) (fulfilled, affinityHolds bool) {
|
||||
fulfilled = (len(at.hostTrace) >= count)
|
||||
if len(at.hostTrace) == 0 {
|
||||
return fulfilled, true
|
||||
}
|
||||
last := at.hostTrace[0:]
|
||||
if len(at.hostTrace)-count >= 0 {
|
||||
last = at.hostTrace[len(at.hostTrace)-count:]
|
||||
}
|
||||
host := at.hostTrace[len(at.hostTrace)-1]
|
||||
for _, h := range last {
|
||||
if h != host {
|
||||
return fulfilled, false
|
||||
}
|
||||
}
|
||||
return fulfilled, true
|
||||
}
|
||||
|
||||
func checkAffinityFailed(tracker affinityTracker, err string) {
|
||||
Logf("%v", tracker.hostTrace)
|
||||
Failf(err)
|
||||
}
|
||||
|
||||
// CheckAffinity function tests whether the service affinity works as expected.
|
||||
// If affinity is expected and transitionState is true, the test will
|
||||
// return true once affinityConfirmCount number of same response observed in a
|
||||
// row. If affinity is not expected, the test will keep observe until different
|
||||
// responses observed. The function will return false only when no expected
|
||||
// responses observed before timeout. If transitionState is false, the test will
|
||||
// fail once different host is given if shouldHold is true.
|
||||
func CheckAffinity(jig *ServiceTestJig, execPod *v1.Pod, targetIp string, targetPort int, shouldHold, transitionState bool) bool {
|
||||
targetIpPort := net.JoinHostPort(targetIp, strconv.Itoa(targetPort))
|
||||
cmd := fmt.Sprintf(`wget -qO- http://%s/ -T 2`, targetIpPort)
|
||||
timeout := ServiceTestTimeout
|
||||
if execPod == nil {
|
||||
timeout = LoadBalancerPollTimeout
|
||||
}
|
||||
var tracker affinityTracker
|
||||
if pollErr := wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
||||
if execPod != nil {
|
||||
if stdout, err := RunHostCmd(execPod.Namespace, execPod.Name, cmd); err != nil {
|
||||
Logf("Failed to get response from %s. Retry until timeout", targetIpPort)
|
||||
return false, nil
|
||||
} else {
|
||||
tracker.recordHost(stdout)
|
||||
}
|
||||
} else {
|
||||
rawResponse := jig.GetHTTPContent(targetIp, targetPort, timeout, "")
|
||||
tracker.recordHost(rawResponse.String())
|
||||
}
|
||||
trackerFulfilled, affinityHolds := tracker.checkHostTrace(AffinityConfirmCount)
|
||||
if !shouldHold && !affinityHolds {
|
||||
return true, nil
|
||||
}
|
||||
if shouldHold {
|
||||
if !transitionState && !affinityHolds {
|
||||
return true, fmt.Errorf("Affintity should hold but didn't.")
|
||||
}
|
||||
if trackerFulfilled && affinityHolds {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}); pollErr != nil {
|
||||
trackerFulfilled, _ := tracker.checkHostTrace(AffinityConfirmCount)
|
||||
if pollErr != wait.ErrWaitTimeout {
|
||||
checkAffinityFailed(tracker, pollErr.Error())
|
||||
return false
|
||||
} else {
|
||||
if !trackerFulfilled {
|
||||
checkAffinityFailed(tracker, fmt.Sprintf("Connection to %s timed out or not enough responses.", targetIpPort))
|
||||
}
|
||||
if shouldHold {
|
||||
checkAffinityFailed(tracker, "Affintity should hold but didn't.")
|
||||
} else {
|
||||
checkAffinityFailed(tracker, "Affintity shouldn't hold but did.")
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
44
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
44
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -49,7 +49,12 @@ type TestContextType struct {
|
||||
RepoRoot string
|
||||
DockershimCheckpointDir string
|
||||
|
||||
Provider string
|
||||
// Provider identifies the infrastructure provider (gce, gke, aws)
|
||||
Provider string
|
||||
|
||||
// Tooling is the tooling in use (e.g. kops, gke). Provider is the cloud provider and might not uniquely identify the tooling.
|
||||
Tooling string
|
||||
|
||||
CloudConfig CloudConfig
|
||||
KubectlPath string
|
||||
OutputDir string
|
||||
@ -104,14 +109,16 @@ type TestContextType struct {
|
||||
DisableLogDump bool
|
||||
// Path to the GCS artifacts directory to dump logs from nodes. Logexporter gets enabled if this is non-empty.
|
||||
LogexporterGCSPath string
|
||||
// If the garbage collector is enabled in the kube-apiserver and kube-controller-manager.
|
||||
GarbageCollectorEnabled bool
|
||||
// featureGates is a map of feature names to bools that enable or disable alpha/experimental features.
|
||||
FeatureGates map[string]bool
|
||||
// Node e2e specific test context
|
||||
NodeTestContextType
|
||||
// Storage e2e specific test context
|
||||
StorageTestContextType
|
||||
// Monitoring solution that is used in current cluster.
|
||||
ClusterMonitoringMode string
|
||||
// Separate Prometheus monitoring deployed in cluster
|
||||
EnablePrometheusMonitoring bool
|
||||
|
||||
// Indicates what path the kubernetes-anywhere is installed on
|
||||
KubernetesAnywherePath string
|
||||
@ -155,12 +162,21 @@ type NodeTestContextType struct {
|
||||
SystemSpecName string
|
||||
}
|
||||
|
||||
// StorageConfig contains the shared settings for storage 2e2 tests.
|
||||
type StorageTestContextType struct {
|
||||
// CSIImageVersion overrides the builtin stable version numbers if set.
|
||||
CSIImageVersion string
|
||||
// CSIImageRegistry defines the image registry hosting the CSI container images.
|
||||
CSIImageRegistry string
|
||||
}
|
||||
|
||||
type CloudConfig struct {
|
||||
ApiEndpoint string
|
||||
ProjectID string
|
||||
Zone string // for multizone tests, arbitrarily chosen zone
|
||||
Region string
|
||||
MultiZone bool
|
||||
MultiMaster bool
|
||||
Cluster string
|
||||
MasterName string
|
||||
NodeInstanceGroup string // comma-delimited list of groups' names
|
||||
@ -208,7 +224,7 @@ func RegisterCommonFlags() {
|
||||
flag.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
|
||||
flag.Var(utilflag.NewMapStringBool(&TestContext.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
|
||||
flag.StringVar(&TestContext.Viper, "viper-config", "e2e", "The name of the viper config i.e. 'e2e' will read values from 'e2e.json' locally. All e2e parameters are meant to be configurable by viper.")
|
||||
flag.StringVar(&TestContext.ContainerRuntime, "container-runtime", "docker", "The container runtime of cluster VM instances (docker/rkt/remote).")
|
||||
flag.StringVar(&TestContext.ContainerRuntime, "container-runtime", "docker", "The container runtime of cluster VM instances (docker/remote).")
|
||||
flag.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "unix:///var/run/dockershim.sock", "The container runtime endpoint of cluster VM instances.")
|
||||
flag.StringVar(&TestContext.ContainerRuntimeProcessName, "container-runtime-process-name", "dockerd", "The name of the container runtime process.")
|
||||
flag.StringVar(&TestContext.ContainerRuntimePidFile, "container-runtime-pid-file", "/var/run/docker.pid", "The pid file of the container runtime.")
|
||||
@ -230,12 +246,14 @@ func RegisterClusterFlags() {
|
||||
flag.StringVar(&TestContext.CertDir, "cert-dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.")
|
||||
flag.StringVar(&TestContext.RepoRoot, "repo-root", "../../", "Root directory of kubernetes repository, for finding test files.")
|
||||
flag.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, etc.)")
|
||||
flag.StringVar(&TestContext.Tooling, "tooling", "", "The tooling in use (kops, gke, etc.)")
|
||||
flag.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.")
|
||||
flag.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.")
|
||||
flag.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.")
|
||||
flag.StringVar(&TestContext.MasterOSDistro, "master-os-distro", "debian", "The OS distribution of cluster master (debian, trusty, or coreos).")
|
||||
flag.StringVar(&TestContext.NodeOSDistro, "node-os-distro", "debian", "The OS distribution of cluster VM instances (debian, trusty, or coreos).")
|
||||
flag.StringVar(&TestContext.ClusterMonitoringMode, "cluster-monitoring-mode", "influxdb", "The monitoring solution that is used in the cluster.")
|
||||
flag.StringVar(&TestContext.ClusterMonitoringMode, "cluster-monitoring-mode", "standalone", "The monitoring solution that is used in the cluster.")
|
||||
flag.BoolVar(&TestContext.EnablePrometheusMonitoring, "prometheus-monitoring", false, "Separate Prometheus monitoring deployed in cluster.")
|
||||
|
||||
// TODO: Flags per provider? Rename gce-project/gce-zone?
|
||||
cloudConfig := &TestContext.CloudConfig
|
||||
@ -245,6 +263,7 @@ func RegisterClusterFlags() {
|
||||
flag.StringVar(&cloudConfig.Zone, "gce-zone", "", "GCE zone being used, if applicable")
|
||||
flag.StringVar(&cloudConfig.Region, "gce-region", "", "GCE region being used, if applicable")
|
||||
flag.BoolVar(&cloudConfig.MultiZone, "gce-multizone", false, "If true, start GCE cloud provider with multizone support.")
|
||||
flag.BoolVar(&cloudConfig.MultiMaster, "gce-multimaster", false, "If true, the underlying GCE/GKE cluster is assumed to be multi-master.")
|
||||
flag.StringVar(&cloudConfig.Cluster, "gke-cluster", "", "GKE name of cluster being used, if applicable")
|
||||
flag.StringVar(&cloudConfig.NodeInstanceGroup, "node-instance-group", "", "Name of the managed instance group for nodes. Valid only for gce, gke or aws. If there is more than one group: comma separated list of groups.")
|
||||
flag.StringVar(&cloudConfig.Network, "network", "e2e", "The cloud provider network for this e2e cluster.")
|
||||
@ -257,7 +276,7 @@ func RegisterClusterFlags() {
|
||||
flag.StringVar(&cloudConfig.ConfigFile, "cloud-config-file", "", "Cloud config file. Only required if provider is azure.")
|
||||
flag.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used.")
|
||||
flag.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.")
|
||||
flag.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 4*time.Hour, "Timeout for waiting for all nodes to be schedulable.")
|
||||
flag.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.")
|
||||
flag.StringVar(&TestContext.UpgradeTarget, "upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
|
||||
flag.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.")
|
||||
flag.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.")
|
||||
@ -265,7 +284,6 @@ func RegisterClusterFlags() {
|
||||
flag.StringVar(&TestContext.IngressUpgradeImage, "ingress-upgrade-image", "", "Image to upgrade to if doing an upgrade test for ingress.")
|
||||
flag.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.")
|
||||
flag.BoolVar(&TestContext.CleanStart, "clean-start", false, "If true, purge all namespaces except default and system before running tests. This serves to Cleanup test namespaces from failed/interrupted e2e runs in a long-lived cluster.")
|
||||
flag.BoolVar(&TestContext.GarbageCollectorEnabled, "garbage-collector-enabled", true, "Set to true if the garbage collector is enabled in the kube-apiserver and kube-controller-manager, then some tests will rely on the garbage collector to delete dependent resources.")
|
||||
}
|
||||
|
||||
// Register flags specific to the node e2e test suite.
|
||||
@ -275,7 +293,7 @@ func RegisterNodeFlags() {
|
||||
flag.StringVar(&TestContext.NodeName, "node-name", "", "Name of the node to run tests on.")
|
||||
// TODO(random-liu): Move kubelet start logic out of the test.
|
||||
// TODO(random-liu): Move log fetch logic out of the test.
|
||||
// There are different ways to start kubelet (systemd, initd, docker, rkt, manually started etc.)
|
||||
// There are different ways to start kubelet (systemd, initd, docker, manually started etc.)
|
||||
// and manage logs (journald, upstart etc.).
|
||||
// For different situation we need to mount different things into the container, run different commands.
|
||||
// It is hard and unnecessary to deal with the complexity inside the test suite.
|
||||
@ -285,6 +303,11 @@ func RegisterNodeFlags() {
|
||||
flag.StringVar(&TestContext.SystemSpecName, "system-spec-name", "", "The name of the system spec (e.g., gke) that's used in the node e2e test. The system specs are in test/e2e_node/system/specs/. This is used by the test framework to determine which tests to run for validating the system requirements.")
|
||||
}
|
||||
|
||||
func RegisterStorageFlags() {
|
||||
flag.StringVar(&TestContext.CSIImageVersion, "csiImageVersion", "", "overrides the default tag used for hostpathplugin/csi-attacher/csi-provisioner/driver-registrar images")
|
||||
flag.StringVar(&TestContext.CSIImageRegistry, "csiImageRegistry", "quay.io/k8scsi", "overrides the default repository used for hostpathplugin/csi-attacher/csi-provisioner/driver-registrar images")
|
||||
}
|
||||
|
||||
// ViperizeFlags sets up all flag and config processing. Future configuration info should be added to viper, not to flags.
|
||||
func ViperizeFlags() {
|
||||
|
||||
@ -293,6 +316,7 @@ func ViperizeFlags() {
|
||||
// since go test 'flag's are sort of incompatible w/ flag, glog, etc.
|
||||
RegisterCommonFlags()
|
||||
RegisterClusterFlags()
|
||||
RegisterStorageFlags()
|
||||
flag.Parse()
|
||||
|
||||
// Part 2: Set Viper provided flags.
|
||||
@ -363,4 +387,8 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
t.Host = defaultHost
|
||||
}
|
||||
}
|
||||
// Allow 1% of nodes to be unready (statistically) - relevant for large clusters.
|
||||
if t.AllowedNotReadyNodes == 0 {
|
||||
t.AllowedNotReadyNodes = t.CloudConfig.NumNodes / 100
|
||||
}
|
||||
}
|
||||
|
477
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
477
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
File diff suppressed because it is too large
Load Diff
33
vendor/k8s.io/kubernetes/test/e2e/framework/volume_util.go
generated
vendored
33
vendor/k8s.io/kubernetes/test/e2e/framework/volume_util.go
generated
vendored
@ -67,6 +67,10 @@ const (
|
||||
|
||||
// Waiting period for volume server (Ceph, ...) to initialize itself.
|
||||
VolumeServerPodStartupSleep = 20 * time.Second
|
||||
|
||||
// Waiting period for pod to be cleaned up and unmount its volumes so we
|
||||
// don't tear down containers with NFS/Ceph/Gluster server too early.
|
||||
PodCleanupTimeout = 20 * time.Second
|
||||
)
|
||||
|
||||
// Configuration of one tests. The test consist of:
|
||||
@ -182,7 +186,7 @@ func NewISCSIServer(cs clientset.Interface, namespace string) (config VolumeTest
|
||||
}
|
||||
|
||||
// CephRBD-specific wrapper for CreateStorageServer.
|
||||
func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
|
||||
func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, secret *v1.Secret, ip string) {
|
||||
config = VolumeTestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "rbd",
|
||||
@ -201,7 +205,28 @@ func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestCo
|
||||
Logf("sleeping a bit to give ceph server time to initialize")
|
||||
time.Sleep(VolumeServerPodStartupSleep)
|
||||
|
||||
return config, pod, ip
|
||||
// create secrets for the server
|
||||
secret = &v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
// from test/images/volumes-tester/rbd/keyring
|
||||
"key": []byte("AQDRrKNVbEevChAAEmRC+pW/KBVHxa0w/POILA=="),
|
||||
},
|
||||
Type: "kubernetes.io/rbd",
|
||||
}
|
||||
|
||||
secret, err := cs.CoreV1().Secrets(config.Namespace).Create(secret)
|
||||
if err != nil {
|
||||
Failf("Failed to create secrets for Ceph RBD: %v", err)
|
||||
}
|
||||
|
||||
return config, pod, secret, ip
|
||||
}
|
||||
|
||||
// Wrapper for StartVolumeServer(). A storage server config is passed in, and a pod pointer
|
||||
@ -351,8 +376,8 @@ func VolumeTestCleanup(f *Framework, config VolumeTestConfig) {
|
||||
}
|
||||
// See issue #24100.
|
||||
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
|
||||
By("sleeping a bit so client can stop and unmount")
|
||||
time.Sleep(20 * time.Second)
|
||||
By("sleeping a bit so kubelet can unmount and detach the volume")
|
||||
time.Sleep(PodCleanupTimeout)
|
||||
|
||||
err = podClient.Delete(config.Prefix+"-server", nil)
|
||||
if err != nil {
|
||||
|
21
vendor/k8s.io/kubernetes/test/e2e/generated/BUILD
generated
vendored
21
vendor/k8s.io/kubernetes/test/e2e/generated/BUILD
generated
vendored
@ -4,6 +4,7 @@ load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
load("//build:bindata.bzl", "go_bindata")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@ -18,25 +19,17 @@ go_library(
|
||||
],
|
||||
)
|
||||
|
||||
genrule(
|
||||
# IMPORTANT: if you make any changes here, you must also update hack/generate-bindata.sh.
|
||||
go_bindata(
|
||||
name = "bindata",
|
||||
srcs = [
|
||||
"//examples:all-srcs",
|
||||
"//test/images:all-srcs",
|
||||
"//test/fixtures:all-srcs",
|
||||
"//test/e2e/testing-manifests:all-srcs",
|
||||
"//test/fixtures:all-srcs",
|
||||
"//test/images:all-srcs",
|
||||
],
|
||||
outs = ["bindata.go"],
|
||||
cmd = """
|
||||
$(location //vendor/github.com/jteeuwen/go-bindata/go-bindata:go-bindata) \
|
||||
-nometadata -o "$(OUTS)" -pkg generated \
|
||||
-prefix $$(pwd) \
|
||||
-ignore .jpg -ignore .png -ignore .md \
|
||||
$(SRCS)
|
||||
""",
|
||||
tools = [
|
||||
"//vendor/github.com/jteeuwen/go-bindata/go-bindata",
|
||||
],
|
||||
compress = True,
|
||||
include_metadata = False,
|
||||
)
|
||||
|
||||
filegroup(
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/generated/gobindata_util.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/generated/gobindata_util.go
generated
vendored
@ -16,8 +16,6 @@ limitations under the License.
|
||||
|
||||
package generated
|
||||
|
||||
//go:generate ../../../hack/generate-bindata.sh
|
||||
|
||||
import "github.com/golang/glog"
|
||||
|
||||
/*
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/gke_local_ssd.go
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/gke_local_ssd.go
generated
vendored
@ -23,7 +23,6 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -74,7 +73,7 @@ func testPodWithSsd(command string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/instrumentation/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/instrumentation/OWNERS
generated
vendored
@ -1,9 +1,9 @@
|
||||
approvers:
|
||||
- crassirostris
|
||||
- DirectXMan12
|
||||
- fabxc
|
||||
- fgrzadkowski
|
||||
- piosz
|
||||
- x13n
|
||||
- kawych
|
||||
reviewers:
|
||||
- sig-instrumentation-pr-reviews
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/BUILD
generated
vendored
@ -16,7 +16,7 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/instrumentation/common:go_default_library",
|
||||
"//test/e2e/instrumentation/logging/elasticsearch:go_default_library",
|
||||
"//test/e2e/instrumentation/logging/stackdrvier:go_default_library",
|
||||
"//test/e2e/instrumentation/logging/stackdriver:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
@ -35,7 +35,7 @@ filegroup(
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/e2e/instrumentation/logging/elasticsearch:all-srcs",
|
||||
"//test/e2e/instrumentation/logging/stackdrvier:all-srcs",
|
||||
"//test/e2e/instrumentation/logging/stackdriver:all-srcs",
|
||||
"//test/e2e/instrumentation/logging/utils:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/OWNERS
generated
vendored
@ -1,8 +1,6 @@
|
||||
reviewers:
|
||||
- coffeepac
|
||||
- crassirostris
|
||||
- piosz
|
||||
approvers:
|
||||
- coffeepac
|
||||
- crassirostris
|
||||
- piosz
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/imports.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/imports.go
generated
vendored
@ -18,5 +18,5 @@ package logging
|
||||
|
||||
import (
|
||||
_ "k8s.io/kubernetes/test/e2e/instrumentation/logging/elasticsearch"
|
||||
_ "k8s.io/kubernetes/test/e2e/instrumentation/logging/stackdrvier"
|
||||
_ "k8s.io/kubernetes/test/e2e/instrumentation/logging/stackdriver"
|
||||
)
|
||||
|
@ -12,13 +12,12 @@ go_library(
|
||||
"soak.go",
|
||||
"utils.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/instrumentation/logging/stackdrvier",
|
||||
importpath = "k8s.io/kubernetes/test/e2e/instrumentation/logging/stackdriver",
|
||||
deps = [
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/instrumentation/common:go_default_library",
|
||||
"//test/e2e/instrumentation/logging/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2/google:go_default_library",
|
||||
"//vendor/google.golang.org/api/logging/v2beta1:go_default_library",
|
||||
"//vendor/google.golang.org/api/pubsub/v1:go_default_library",
|
@ -162,7 +162,11 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
|
||||
}()
|
||||
|
||||
ginkgo.By("Waiting for events to ingest")
|
||||
c := utils.NewLogChecker(p, utils.UntilFirstEntry, utils.JustTimeout, "")
|
||||
location := framework.TestContext.CloudConfig.Zone
|
||||
if framework.TestContext.CloudConfig.MultiMaster {
|
||||
location = framework.TestContext.CloudConfig.Region
|
||||
}
|
||||
c := utils.NewLogChecker(p, utils.UntilFirstEntryFromLocation(location), utils.JustTimeout, "")
|
||||
err := utils.WaitForLogs(c, ingestionInterval, ingestionTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
@ -49,7 +49,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
|
||||
maxPodCount := 10
|
||||
jobDuration := 30 * time.Minute
|
||||
linesPerPodPerSecond := 100
|
||||
// TODO(crassirostris): Increase to 21 hrs
|
||||
// TODO(instrumentation): Increase to 21 hrs
|
||||
testDuration := 3 * time.Hour
|
||||
ingestionInterval := 1 * time.Minute
|
||||
ingestionTimeout := testDuration + 30*time.Minute
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package stackdriver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@ -27,7 +28,6 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2/google"
|
||||
sd "google.golang.org/api/logging/v2beta1"
|
||||
pubsub "google.golang.org/api/pubsub/v1"
|
||||
@ -49,6 +49,9 @@ const (
|
||||
|
||||
// The parallelism level of polling logs process.
|
||||
sdLoggingPollParallelism = 10
|
||||
|
||||
// The limit on the number of stackdriver sinks that can be created within one project.
|
||||
stackdriverSinkCountLimit = 90
|
||||
)
|
||||
|
||||
type logProviderScope int
|
||||
@ -86,6 +89,10 @@ func newSdLogProvider(f *framework.Framework, scope logProviderScope) (*sdLogPro
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = ensureProjectHasSinkCapacity(sdService.Projects.Sinks, framework.TestContext.CloudConfig.ProjectID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubsubService, err := pubsub.New(hc)
|
||||
if err != nil {
|
||||
@ -104,6 +111,36 @@ func newSdLogProvider(f *framework.Framework, scope logProviderScope) (*sdLogPro
|
||||
return provider, nil
|
||||
}
|
||||
|
||||
func ensureProjectHasSinkCapacity(sinksService *sd.ProjectsSinksService, projectID string) error {
|
||||
listResponse, err := listSinks(sinksService, projectID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(listResponse.Sinks) >= stackdriverSinkCountLimit {
|
||||
framework.Logf("Reached Stackdriver sink limit. Deleting all sinks")
|
||||
deleteSinks(sinksService, projectID, listResponse.Sinks)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func listSinks(sinksService *sd.ProjectsSinksService, projectID string) (*sd.ListSinksResponse, error) {
|
||||
projectDst := fmt.Sprintf("projects/%s", projectID)
|
||||
listResponse, err := sinksService.List(projectDst).PageSize(stackdriverSinkCountLimit).Do()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list Stackdriver Logging sinks: %v", err)
|
||||
}
|
||||
return listResponse, nil
|
||||
}
|
||||
|
||||
func deleteSinks(sinksService *sd.ProjectsSinksService, projectID string, sinks []*sd.LogSink) {
|
||||
for _, sink := range sinks {
|
||||
sinkNameID := fmt.Sprintf("projects/%s/sinks/%s", projectID, sink.Name)
|
||||
if _, err := sinksService.Delete(sinkNameID).Do(); err != nil {
|
||||
framework.Logf("Failed to delete LogSink: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *sdLogProvider) Init() error {
|
||||
projectID := framework.TestContext.CloudConfig.ProjectID
|
||||
nsName := p.framework.Namespace.Name
|
2
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/utils/misc.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/utils/misc.go
generated
vendored
@ -26,7 +26,7 @@ func GetNodeIds(cs clientset.Interface) []string {
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
||||
nodeIds := []string{}
|
||||
for _, n := range nodes.Items {
|
||||
nodeIds = append(nodeIds, n.Spec.ExternalID)
|
||||
nodeIds = append(nodeIds, n.Name)
|
||||
}
|
||||
return nodeIds
|
||||
}
|
||||
|
16
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/utils/wait.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/instrumentation/logging/utils/wait.go
generated
vendored
@ -48,9 +48,19 @@ func UntilFirstEntryFromLog(log string) IngestionPred {
|
||||
return func(_ string, entries []LogEntry) (bool, error) {
|
||||
for _, e := range entries {
|
||||
if e.LogName == log {
|
||||
if e.Location != framework.TestContext.CloudConfig.Zone {
|
||||
return false, fmt.Errorf("Bad location in logs '%s' != '%d'", e.Location, framework.TestContext.CloudConfig.Zone)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// UntilFirstEntryFromLocation is a IngestionPred that checks that at least one
|
||||
// entry from the log with a given name was ingested.
|
||||
func UntilFirstEntryFromLocation(location string) IngestionPred {
|
||||
return func(_ string, entries []LogEntry) (bool, error) {
|
||||
for _, e := range entries {
|
||||
if e.Location == location {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/instrumentation/monitoring/BUILD
generated
vendored
@ -14,6 +14,7 @@ go_library(
|
||||
"custom_metrics_stackdriver.go",
|
||||
"influxdb.go",
|
||||
"metrics_grabber.go",
|
||||
"prometheus.go",
|
||||
"stackdriver.go",
|
||||
"stackdriver_metadata_agent.go",
|
||||
],
|
||||
@ -28,6 +29,7 @@ go_library(
|
||||
"//vendor/github.com/influxdata/influxdb/client/v2:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/model:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2/google:go_default_library",
|
||||
"//vendor/google.golang.org/api/monitoring/v3:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
@ -42,6 +44,7 @@ go_library(
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/external_metrics:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -18,6 +18,7 @@ package monitoring
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
gcm "google.golang.org/api/monitoring/v3"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@ -25,6 +26,7 @@ import (
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -52,6 +54,11 @@ var (
|
||||
},
|
||||
},
|
||||
}
|
||||
StagingDeploymentsLocation = "https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-stackdriver/master/custom-metrics-stackdriver-adapter/deploy/staging/"
|
||||
AdapterForOldResourceModel = "adapter_old_resource_model.yaml"
|
||||
AdapterForNewResourceModel = "adapter_new_resource_model.yaml"
|
||||
AdapterDefault = AdapterForOldResourceModel
|
||||
ClusterAdminBinding = "e2e-test-cluster-admin-binding"
|
||||
)
|
||||
|
||||
// CustomMetricContainerSpec allows to specify a config for StackdriverExporterDeployment
|
||||
@ -82,7 +89,7 @@ func SimpleStackdriverExporterDeployment(name, namespace string, replicas int32,
|
||||
func StackdriverExporterDeployment(name, namespace string, replicas int32, containers []CustomMetricContainerSpec) *extensions.Deployment {
|
||||
podSpec := corev1.PodSpec{Containers: []corev1.Container{}}
|
||||
for _, containerSpec := range containers {
|
||||
podSpec.Containers = append(podSpec.Containers, stackdriverExporterContainerSpec(containerSpec.Name, containerSpec.MetricName, containerSpec.MetricValue))
|
||||
podSpec.Containers = append(podSpec.Containers, stackdriverExporterContainerSpec(containerSpec.Name, namespace, containerSpec.MetricName, containerSpec.MetricValue))
|
||||
}
|
||||
|
||||
return &extensions.Deployment{
|
||||
@ -119,17 +126,30 @@ func StackdriverExporterPod(podName, namespace, podLabel, metricName string, met
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{stackdriverExporterContainerSpec(StackdriverExporter, metricName, metricValue)},
|
||||
Containers: []corev1.Container{stackdriverExporterContainerSpec(StackdriverExporter, namespace, metricName, metricValue)},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func stackdriverExporterContainerSpec(name string, metricName string, metricValue int64) corev1.Container {
|
||||
func stackdriverExporterContainerSpec(name string, namespace string, metricName string, metricValue int64) corev1.Container {
|
||||
return corev1.Container{
|
||||
Name: name,
|
||||
Image: "k8s.gcr.io/sd-dummy-exporter:v0.1.0",
|
||||
Image: "k8s.gcr.io/sd-dummy-exporter:v0.2.0",
|
||||
ImagePullPolicy: corev1.PullPolicy("Always"),
|
||||
Command: []string{"/sd_dummy_exporter", "--pod-id=$(POD_ID)", "--metric-name=" + metricName, fmt.Sprintf("--metric-value=%v", metricValue)},
|
||||
Command: []string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
strings.Join([]string{
|
||||
"./sd_dummy_exporter",
|
||||
"--pod-id=$(POD_ID)",
|
||||
"--pod-name=$(POD_NAME)",
|
||||
"--namespace=" + namespace,
|
||||
"--metric-name=" + metricName,
|
||||
fmt.Sprintf("--metric-value=%v", metricValue),
|
||||
"--use-old-resource-model",
|
||||
"--use-new-resource-model",
|
||||
}, " "),
|
||||
},
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "POD_ID",
|
||||
@ -139,6 +159,14 @@ func stackdriverExporterContainerSpec(name string, metricName string, metricValu
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Ports: []corev1.ContainerPort{{ContainerPort: 80}},
|
||||
}
|
||||
@ -210,9 +238,35 @@ func prometheusExporterPodSpec(metricName string, metricValue int64, port int32)
|
||||
}
|
||||
}
|
||||
|
||||
// CreateAdapter creates Custom Metrics - Stackdriver adapter.
|
||||
func CreateAdapter() error {
|
||||
stat, err := framework.RunKubectl("create", "-f", "https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-stackdriver/master/custom-metrics-stackdriver-adapter/adapter-beta.yaml")
|
||||
// CreateAdapter creates Custom Metrics - Stackdriver adapter
|
||||
// adapterDeploymentFile should be a filename for adapter deployment located in StagingDeploymentLocation
|
||||
func CreateAdapter(adapterDeploymentFile string) error {
|
||||
// A workaround to make the work on GKE. GKE doesn't normally allow to create cluster roles,
|
||||
// which the adapter deployment does. The solution is to create cluster role binding for
|
||||
// cluster-admin role and currently used service account.
|
||||
err := createClusterAdminBinding()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
adapterURL := StagingDeploymentsLocation + adapterDeploymentFile
|
||||
err = exec.Command("wget", adapterURL).Run()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stat, err := framework.RunKubectl("create", "-f", adapterURL)
|
||||
framework.Logf(stat)
|
||||
return err
|
||||
}
|
||||
|
||||
func createClusterAdminBinding() error {
|
||||
stdout, stderr, err := framework.RunCmd("gcloud", "config", "get-value", "core/account")
|
||||
if err != nil {
|
||||
framework.Logf(stderr)
|
||||
return err
|
||||
}
|
||||
serviceAccount := strings.TrimSpace(stdout)
|
||||
framework.Logf("current service account: %q", serviceAccount)
|
||||
stat, err := framework.RunKubectl("create", "clusterrolebinding", ClusterAdminBinding, "--clusterrole=cluster-admin", "--user="+serviceAccount)
|
||||
framework.Logf(stat)
|
||||
return err
|
||||
}
|
||||
@ -251,8 +305,23 @@ func CleanupDescriptors(service *gcm.Service, projectId string) {
|
||||
}
|
||||
|
||||
// CleanupAdapter deletes Custom Metrics - Stackdriver adapter deployments.
|
||||
func CleanupAdapter() error {
|
||||
stat, err := framework.RunKubectl("delete", "-f", "https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-stackdriver/master/custom-metrics-stackdriver-adapter/adapter-beta.yaml")
|
||||
func CleanupAdapter(adapterDeploymentFile string) {
|
||||
stat, err := framework.RunKubectl("delete", "-f", adapterDeploymentFile)
|
||||
framework.Logf(stat)
|
||||
return err
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete adapter deployments: %s", err)
|
||||
}
|
||||
err = exec.Command("rm", adapterDeploymentFile).Run()
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete adapter deployment file: %s", err)
|
||||
}
|
||||
cleanupClusterAdminBinding()
|
||||
}
|
||||
|
||||
func cleanupClusterAdminBinding() {
|
||||
stat, err := framework.RunKubectl("delete", "clusterrolebinding", ClusterAdminBinding)
|
||||
framework.Logf(stat)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete cluster admin binding: %s", err)
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user