mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
Fresh dep ensure
This commit is contained in:
1
vendor/k8s.io/kubernetes/test/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/test/BUILD
generated
vendored
@ -11,6 +11,7 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/cmd:all-srcs",
|
||||
"//test/conformance:all-srcs",
|
||||
"//test/e2e:all-srcs",
|
||||
"//test/e2e_kubeadm:all-srcs",
|
||||
|
4
vendor/k8s.io/kubernetes/test/OWNERS
generated
vendored
4
vendor/k8s.io/kubernetes/test/OWNERS
generated
vendored
@ -26,6 +26,7 @@ reviewers:
|
||||
- timothysc
|
||||
- zmerlynn
|
||||
- vishh
|
||||
- MaciekPytel # for test/e2e/common/autoscaling_utils.go
|
||||
approvers:
|
||||
- bowei # for test/e2e/{dns*,network}.go
|
||||
- cblecker
|
||||
@ -56,3 +57,6 @@ approvers:
|
||||
- timothysc
|
||||
- zmerlynn
|
||||
- vishh
|
||||
- MaciekPytel # for test/e2e/common/autoscaling_utils.go
|
||||
labels:
|
||||
- sig/testing
|
||||
|
20
vendor/k8s.io/kubernetes/test/cmd/BUILD
generated
vendored
Normal file
20
vendor/k8s.io/kubernetes/test/cmd/BUILD
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
sh_library(
|
||||
name = "legacy-script",
|
||||
srcs = glob(["*.sh"]),
|
||||
data = ["//pkg/kubectl/validation:testdata/v1/validPod.yaml"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
4
vendor/k8s.io/kubernetes/test/cmd/OWNERS
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/test/cmd/OWNERS
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
approvers:
|
||||
- sig-cli-maintainers
|
||||
reviewers:
|
||||
- sig-cli
|
229
vendor/k8s.io/kubernetes/test/cmd/apply.sh
generated
vendored
Executable file
229
vendor/k8s.io/kubernetes/test/cmd/apply.sh
generated
vendored
Executable file
@ -0,0 +1,229 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# Runs tests related to kubectl apply.
|
||||
run_kubectl_apply_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl apply"
|
||||
## kubectl apply should create the resource that doesn't exist yet
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command: apply a pod "test-pod" (doesn't exist) should create this pod
|
||||
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
# Post-Condition: pod "test-pod" is created
|
||||
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
|
||||
# Post-Condition: pod "test-pod" has configuration annotation
|
||||
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
# Clean up
|
||||
kubectl delete pods test-pod "${kube_flags[@]}"
|
||||
|
||||
|
||||
## kubectl apply should be able to clear defaulted fields.
|
||||
# Pre-Condition: no deployment exists
|
||||
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command: apply a deployment "test-deployment-retainkeys" (doesn't exist) should create this deployment
|
||||
kubectl apply -f hack/testdata/retainKeys/deployment/deployment-before.yaml "${kube_flags[@]}"
|
||||
# Post-Condition: deployment "test-deployment-retainkeys" created
|
||||
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}{{end}}" 'test-deployment-retainkeys'
|
||||
# Post-Condition: deployment "test-deployment-retainkeys" has defaulted fields
|
||||
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]]
|
||||
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxSurge)" ]]
|
||||
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxUnavailable)" ]]
|
||||
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]]
|
||||
# Command: apply a deployment "test-deployment-retainkeys" should clear
|
||||
# defaulted fields and successfully update the deployment
|
||||
[[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]}")" ]]
|
||||
# Post-Condition: deployment "test-deployment-retainkeys" has updated fields
|
||||
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep Recreate)" ]]
|
||||
! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]]
|
||||
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep hostPath)" ]]
|
||||
! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]]
|
||||
# Clean up
|
||||
kubectl delete deployments test-deployment-retainkeys "${kube_flags[@]}"
|
||||
|
||||
|
||||
## kubectl apply -f with label selector should only apply matching objects
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# apply
|
||||
kubectl apply -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]}"
|
||||
# check right pod exists
|
||||
kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod'
|
||||
# check wrong pod doesn't exist
|
||||
output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found'
|
||||
# cleanup
|
||||
kubectl delete pods selector-test-pod
|
||||
|
||||
## kubectl apply --server-dry-run
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
# apply dry-run
|
||||
kubectl apply --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
# No pod exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# apply non dry-run creates the pod
|
||||
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
# apply changes
|
||||
kubectl apply --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]}"
|
||||
# Post-Condition: label still has initial value
|
||||
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
|
||||
|
||||
# clean-up
|
||||
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
|
||||
## kubectl apply dry-run on CR
|
||||
# Create CRD
|
||||
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
|
||||
{
|
||||
"kind": "CustomResourceDefinition",
|
||||
"apiVersion": "apiextensions.k8s.io/v1beta1",
|
||||
"metadata": {
|
||||
"name": "resources.mygroup.example.com"
|
||||
},
|
||||
"spec": {
|
||||
"group": "mygroup.example.com",
|
||||
"version": "v1alpha1",
|
||||
"scope": "Namespaced",
|
||||
"names": {
|
||||
"plural": "resources",
|
||||
"singular": "resource",
|
||||
"kind": "Kind",
|
||||
"listKind": "KindList"
|
||||
}
|
||||
}
|
||||
}
|
||||
__EOF__
|
||||
|
||||
# Dry-run create the CR
|
||||
kubectl "${kube_flags[@]}" apply --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]}"
|
||||
# Make sure that the CR doesn't exist
|
||||
! kubectl "${kube_flags[@]}" get resource/myobj
|
||||
|
||||
# clean-up
|
||||
kubectl "${kube_flags[@]}" delete customresourcedefinition resources.mygroup.example.com
|
||||
|
||||
## kubectl apply --prune
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
# apply a
|
||||
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
|
||||
# check right pod exists
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
|
||||
# check wrong pod doesn't exist
|
||||
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'pods "b" not found'
|
||||
|
||||
# apply b
|
||||
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}"
|
||||
# check right pod exists
|
||||
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
|
||||
# check wrong pod doesn't exist
|
||||
output_message=$(! kubectl get pods a 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'pods "a" not found'
|
||||
|
||||
# cleanup
|
||||
kubectl delete pods b
|
||||
|
||||
# same thing without prune for a sanity check
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
# apply a
|
||||
kubectl apply -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
|
||||
# check right pod exists
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
|
||||
# check wrong pod doesn't exist
|
||||
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'pods "b" not found'
|
||||
|
||||
# apply b
|
||||
kubectl apply -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}"
|
||||
# check both pods exist
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
|
||||
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
|
||||
# check wrong pod doesn't exist
|
||||
|
||||
# cleanup
|
||||
kubectl delete pod/a pod/b
|
||||
|
||||
## kubectl apply --prune requires a --all flag to select everything
|
||||
output_message=$(! kubectl apply --prune -f hack/testdata/prune 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" \
|
||||
'all resources selected for prune without explicitly passing --all'
|
||||
# should apply everything
|
||||
kubectl apply --all --prune -f hack/testdata/prune
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
|
||||
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
|
||||
kubectl delete pod/a pod/b
|
||||
|
||||
## kubectl apply --prune should fallback to delete for non reapable types
|
||||
kubectl apply --all --prune -f hack/testdata/prune-reap/a.yml 2>&1 "${kube_flags[@]}"
|
||||
kube::test::get_object_assert 'pvc a-pvc' "{{${id_field}}}" 'a-pvc'
|
||||
kubectl apply --all --prune -f hack/testdata/prune-reap/b.yml 2>&1 "${kube_flags[@]}"
|
||||
kube::test::get_object_assert 'pvc b-pvc' "{{${id_field}}}" 'b-pvc'
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kubectl delete pvc b-pvc 2>&1 "${kube_flags[@]}"
|
||||
|
||||
## kubectl apply --prune --prune-whitelist
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# apply pod a
|
||||
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
|
||||
# check right pod exists
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
|
||||
# apply svc and don't prune pod a by overwriting whitelist
|
||||
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml --prune-whitelist core/v1/Service 2>&1 "${kube_flags[@]}"
|
||||
kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
|
||||
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
|
||||
# apply svc and prune pod a with default whitelist
|
||||
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml 2>&1 "${kube_flags[@]}"
|
||||
kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# cleanup
|
||||
kubectl delete svc prune-svc 2>&1 "${kube_flags[@]}"
|
||||
|
||||
|
||||
## kubectl apply -f some.yml --force
|
||||
# Pre-condition: no service exists
|
||||
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# apply service a
|
||||
kubectl apply -f hack/testdata/service-revision1.yaml "${kube_flags[@]}"
|
||||
# check right service exists
|
||||
kube::test::get_object_assert 'services a' "{{${id_field}}}" 'a'
|
||||
# change immutable field and apply service a
|
||||
output_message=$(! kubectl apply -f hack/testdata/service-revision2.yaml 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'field is immutable'
|
||||
# apply --force to recreate resources for immutable fields
|
||||
kubectl apply -f hack/testdata/service-revision2.yaml --force "${kube_flags[@]}"
|
||||
# check immutable field exists
|
||||
kube::test::get_object_assert 'services a' "{{.spec.clusterIP}}" '10.0.0.12'
|
||||
# cleanup
|
||||
kubectl delete -f hack/testdata/service-revision2.yaml "${kube_flags[@]}"
|
||||
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
657
vendor/k8s.io/kubernetes/test/cmd/apps.sh
generated
vendored
Executable file
657
vendor/k8s.io/kubernetes/test/cmd/apps.sh
generated
vendored
Executable file
@ -0,0 +1,657 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_daemonset_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl(v1:daemonsets)"
|
||||
|
||||
### Create a rolling update DaemonSet
|
||||
# Pre-condition: no DaemonSet exists
|
||||
kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
|
||||
# Template Generation should be 1
|
||||
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
|
||||
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
|
||||
# Template Generation should stay 1
|
||||
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
|
||||
# Test set commands
|
||||
kubectl set image daemonsets/bind "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd
|
||||
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '2'
|
||||
kubectl set env daemonsets/bind "${kube_flags[@]}" foo=bar
|
||||
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '3'
|
||||
kubectl set resources daemonsets/bind "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi
|
||||
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '4'
|
||||
|
||||
# Clean up
|
||||
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_daemonset_history_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl(v1:daemonsets, v1:controllerrevisions)"
|
||||
|
||||
### Test rolling back a DaemonSet
|
||||
# Pre-condition: no DaemonSet or its pods exists
|
||||
kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
# Create a DaemonSet (revision 1)
|
||||
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml --record "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset.yaml --record.*"
|
||||
# Rollback to revision 1 - should be no-op
|
||||
kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
|
||||
# Update the DaemonSet (revision 2)
|
||||
kubectl apply -f hack/testdata/rollingupdate-daemonset-rv2.yaml --record "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
|
||||
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
|
||||
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*"
|
||||
# Rollback to revision 1 with dry-run - should be no-op
|
||||
kubectl rollout undo daemonset --dry-run=true "${kube_flags[@]}"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
|
||||
# Rollback to revision 1
|
||||
kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
|
||||
# Rollback to revision 1000000 - should fail
|
||||
output_message=$(! kubectl rollout undo daemonset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" "unable to find specified revision"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
|
||||
# Rollback to last revision
|
||||
kubectl rollout undo daemonset "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
|
||||
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
|
||||
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
|
||||
# Clean up
|
||||
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_kubectl_apply_deployments_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl apply deployments"
|
||||
## kubectl apply should propagate user defined null values
|
||||
# Pre-Condition: no Deployments, ReplicaSets, Pods exist
|
||||
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# apply base deployment
|
||||
kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml "${kube_flags[@]}"
|
||||
# check right deployment exists
|
||||
kube::test::get_object_assert 'deployments my-depl' "{{${id_field}}}" 'my-depl'
|
||||
# check right labels exists
|
||||
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1'
|
||||
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1'
|
||||
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" 'l1'
|
||||
|
||||
# apply new deployment with new template labels
|
||||
kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]}"
|
||||
# check right labels exists
|
||||
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" '<no value>'
|
||||
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" '<no value>'
|
||||
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" '<no value>'
|
||||
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l2}}" 'l2'
|
||||
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l2}}" 'l2'
|
||||
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l2}}" 'l2'
|
||||
|
||||
# cleanup
|
||||
# need to explicitly remove replicasets and pods because we changed the deployment selector and orphaned things
|
||||
kubectl delete deployments,rs,pods --all --cascade=false --grace-period=0
|
||||
# Post-Condition: no Deployments, ReplicaSets, Pods exist
|
||||
kube::test::wait_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::wait_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
# kubectl apply deployment --overwrite=true --force=true
|
||||
# Pre-Condition: no deployment exists
|
||||
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# apply deployment nginx
|
||||
kubectl apply -f hack/testdata/deployment-label-change1.yaml "${kube_flags[@]}"
|
||||
# check right deployment exists
|
||||
kube::test::get_object_assert 'deployment nginx' "{{${id_field}}}" 'nginx'
|
||||
# apply deployment with new labels and a conflicting resourceVersion
|
||||
output_message=$(! kubectl apply -f hack/testdata/deployment-label-change2.yaml 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'Error from server (Conflict)'
|
||||
# apply deployment with --force and --overwrite will succeed
|
||||
kubectl apply -f hack/testdata/deployment-label-change2.yaml --overwrite=true --force=true --grace-period=10
|
||||
# check the changed deployment
|
||||
output_message=$(kubectl apply view-last-applied deploy/nginx -o json 2>&1 "${kube_flags[@]}" |grep nginx2)
|
||||
kube::test::if_has_string "${output_message}" '"name": "nginx2"'
|
||||
# applying a resource (with --force) that is both conflicting and invalid will
|
||||
# cause the server to only return a "Conflict" error when we attempt to patch.
|
||||
# This means that we will delete the existing resource after receiving 5 conflict
|
||||
# errors in a row from the server, and will attempt to create the modified
|
||||
# resource that we are passing to "apply". Since the modified resource is also
|
||||
# invalid, we will receive an invalid error when we attempt to create it, after
|
||||
# having deleted the old resource. Ensure that when this case is reached, the
|
||||
# old resource is restored once again, and the validation error is printed.
|
||||
output_message=$(! kubectl apply -f hack/testdata/deployment-label-change3.yaml --force 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'Invalid value'
|
||||
# Ensure that the old object has been restored
|
||||
kube::test::get_object_assert 'deployment nginx' "{{${template_labels}}}" 'nginx2'
|
||||
# cleanup
|
||||
kubectl delete deployments --all --grace-period=10
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_deployment_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing deployments"
|
||||
# Test kubectl create deployment (using default - old generator)
|
||||
kubectl create deployment test-nginx-extensions --image=k8s.gcr.io/nginx:test-cmd
|
||||
# Post-Condition: Deployment "nginx" is created.
|
||||
kube::test::get_object_assert 'deploy test-nginx-extensions' "{{$container_name_field}}" 'nginx'
|
||||
# and old generator was used, iow. old defaults are applied
|
||||
output_message=$(kubectl get deployment.extensions/test-nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
|
||||
kube::test::if_has_not_string "${output_message}" '2'
|
||||
# Ensure we can interact with deployments through extensions and apps endpoints
|
||||
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
|
||||
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'apps/v1'
|
||||
# Clean up
|
||||
kubectl delete deployment test-nginx-extensions "${kube_flags[@]}"
|
||||
|
||||
# Test kubectl create deployment
|
||||
kubectl create deployment test-nginx-apps --image=k8s.gcr.io/nginx:test-cmd --generator=deployment-basic/apps.v1beta1
|
||||
# Post-Condition: Deployment "nginx" is created.
|
||||
kube::test::get_object_assert 'deploy test-nginx-apps' "{{$container_name_field}}" 'nginx'
|
||||
# and new generator was used, iow. new defaults are applied
|
||||
output_message=$(kubectl get deployment/test-nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
|
||||
kube::test::if_has_string "${output_message}" '2'
|
||||
# Ensure we can interact with deployments through extensions and apps endpoints
|
||||
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
|
||||
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'apps/v1'
|
||||
# Describe command (resource only) should print detailed information
|
||||
kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Controlled By" "Replicas:" "Pods Status:" "Volumes:"
|
||||
# Describe command (resource only) should print detailed information
|
||||
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
|
||||
# Clean up
|
||||
kubectl delete deployment test-nginx-apps "${kube_flags[@]}"
|
||||
|
||||
### Test kubectl create deployment with image and command
|
||||
# Pre-Condition: No deployment exists.
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create deployment nginx-with-command --image=k8s.gcr.io/nginx:test-cmd -- /bin/sleep infinity
|
||||
# Post-Condition: Deployment "nginx" is created.
|
||||
kube::test::get_object_assert 'deploy nginx-with-command' "{{$container_name_field}}" 'nginx'
|
||||
# Clean up
|
||||
kubectl delete deployment nginx-with-command "${kube_flags[@]}"
|
||||
|
||||
### Test kubectl create deployment should not fail validation
|
||||
# Pre-Condition: No deployment exists.
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f hack/testdata/deployment-with-UnixUserID.yaml "${kube_flags[@]}"
|
||||
# Post-Condition: Deployment "deployment-with-unixuserid" is created.
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'deployment-with-unixuserid:'
|
||||
# Clean up
|
||||
kubectl delete deployment deployment-with-unixuserid "${kube_flags[@]}"
|
||||
|
||||
### Test cascading deletion
|
||||
## Test that rs is deleted when deployment is deleted.
|
||||
# Pre-condition: no deployment exists
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Create deployment
|
||||
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
|
||||
# Wait for rs to come up.
|
||||
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '3'
|
||||
# Deleting the deployment should delete the rs.
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
## Test that rs is not deleted when deployment is deleted with cascade set to false.
|
||||
# Pre-condition: no deployment and rs exist
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Create deployment
|
||||
kubectl create deployment nginx-deployment --image=k8s.gcr.io/nginx:test-cmd
|
||||
# Wait for rs to come up.
|
||||
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
|
||||
# Delete the deployment with cascade set to false.
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]}" --cascade=false
|
||||
# Wait for the deployment to be deleted and then verify that rs is not
|
||||
# deleted.
|
||||
kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
|
||||
# Cleanup
|
||||
# Find the name of the rs to be deleted.
|
||||
output_message=$(kubectl get rs "${kube_flags[@]}" -o template --template={{range.items}}{{$id_field}}{{end}})
|
||||
kubectl delete rs ${output_message} "${kube_flags[@]}"
|
||||
|
||||
### Auto scale deployment
|
||||
# Pre-condition: no deployment exists
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
|
||||
# autoscale 2~3 pods, no CPU utilization specified
|
||||
kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]}" --min=2 --max=3
|
||||
kube::test::get_object_assert 'hpa nginx-deployment' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
|
||||
# Clean up
|
||||
# Note that we should delete hpa first, otherwise it may fight with the deployment reaper.
|
||||
kubectl delete hpa nginx-deployment "${kube_flags[@]}"
|
||||
kubectl delete deployment.extensions nginx-deployment "${kube_flags[@]}"
|
||||
|
||||
### Rollback a deployment
|
||||
# Pre-condition: no deployment exists
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
# Create a deployment (revision 1)
|
||||
kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
# Rollback to revision 1 - should be no-op
|
||||
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
# Update the deployment (revision 2)
|
||||
kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
# Rollback to revision 1 with dry-run - should be no-op
|
||||
kubectl rollout undo deployment nginx --dry-run=true "${kube_flags[@]}" | grep "test-cmd"
|
||||
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
# Rollback to revision 1
|
||||
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
|
||||
sleep 1
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
# Rollback to revision 1000000 - should be no-op
|
||||
! kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
# Rollback to last revision
|
||||
kubectl rollout undo deployment nginx "${kube_flags[@]}"
|
||||
sleep 1
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
# Pause the deployment
|
||||
kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]}"
|
||||
# A paused deployment cannot be rolled back
|
||||
! kubectl rollout undo deployment nginx "${kube_flags[@]}"
|
||||
# Resume the deployment
|
||||
kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]}"
|
||||
# The resumed deployment can now be rolled back
|
||||
kubectl rollout undo deployment nginx "${kube_flags[@]}"
|
||||
# Check that the new replica set has all old revisions stored in an annotation
|
||||
newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
|
||||
kubectl get rs "${newrs}" -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3"
|
||||
# Check that trying to watch the status of a superseded revision returns an error
|
||||
! kubectl rollout status deployment/nginx --revision=3
|
||||
cat hack/testdata/deployment-revision1.yaml | ${SED} "s/name: nginx$/name: nginx2/" | kubectl create -f - "${kube_flags[@]}"
|
||||
# Deletion of both deployments should not be blocked
|
||||
kubectl delete deployment nginx2 "${kube_flags[@]}"
|
||||
# Clean up
|
||||
kubectl delete deployment nginx "${kube_flags[@]}"
|
||||
|
||||
### Set image of a deployment
|
||||
# Pre-condition: no deployment exists
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Create a deployment
|
||||
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
|
||||
# Set the deployment's image
|
||||
kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
|
||||
# Set non-existing container should fail
|
||||
! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]}"
|
||||
# Set image of deployments without specifying name
|
||||
kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
|
||||
# Set image of a deployment specified by file
|
||||
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
|
||||
# Set image of a local file without talking to the server
|
||||
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" --local -o yaml
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
|
||||
# Set image of all containers of the deployment
|
||||
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
# Set image of all containners of the deployment again when image not change
|
||||
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
# Clean up
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
|
||||
|
||||
### Set env of a deployment
|
||||
# Pre-condition: no deployment exists
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Create a deployment
|
||||
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/configmap.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/secret.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
|
||||
#configmap is special here due to controller will create kube-root-ca.crt for each namespace automatically
|
||||
kube::test::get_object_assert 'configmaps/test-set-env-config' "{{$id_field}}" 'test-set-env-config'
|
||||
kube::test::get_object_assert secret "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-secret:'
|
||||
# Set env of deployments by configmap from keys
|
||||
kubectl set env deployment nginx-deployment --keys=key-2 --from=configmap/test-set-env-config "${kube_flags[@]}"
|
||||
# Assert correct value in deployment env
|
||||
kube::test::get_object_assert 'deploy nginx-deployment' "{{ (index (index .spec.template.spec.containers 0).env 0).name}}" 'KEY_2'
|
||||
# Assert single value in deployment env
|
||||
kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '1'
|
||||
# Set env of deployments by configmap
|
||||
kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]}"
|
||||
# Assert all values in deployment env
|
||||
kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '2'
|
||||
# Set env of deployments for all container
|
||||
kubectl set env deployment nginx-deployment env=prod "${kube_flags[@]}"
|
||||
# Set env of deployments for specific container
|
||||
kubectl set env deployment nginx-deployment superenv=superprod -c=nginx "${kube_flags[@]}"
|
||||
# Set env of deployments by secret from keys
|
||||
kubectl set env deployment nginx-deployment --keys=username --from=secret/test-set-env-secret "${kube_flags[@]}"
|
||||
# Set env of deployments by secret
|
||||
kubectl set env deployment nginx-deployment --from=secret/test-set-env-secret "${kube_flags[@]}"
|
||||
# Remove specific env of deployment
|
||||
kubectl set env deployment nginx-deployment env-
|
||||
# Clean up
|
||||
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
|
||||
kubectl delete configmap test-set-env-config "${kube_flags[@]}"
|
||||
kubectl delete secret test-set-env-secret "${kube_flags[@]}"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_statefulset_history_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl(v1:statefulsets, v1:controllerrevisions)"
|
||||
|
||||
### Test rolling back a StatefulSet
|
||||
# Pre-condition: no statefulset or its pods exists
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
# Create a StatefulSet (revision 1)
|
||||
kubectl apply -f hack/testdata/rollingupdate-statefulset.yaml --record "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset.yaml --record.*"
|
||||
# Rollback to revision 1 - should be no-op
|
||||
kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
|
||||
# Update the statefulset (revision 2)
|
||||
kubectl apply -f hack/testdata/rollingupdate-statefulset-rv2.yaml --record "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
|
||||
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
|
||||
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*"
|
||||
# Rollback to revision 1 with dry-run - should be no-op
|
||||
kubectl rollout undo statefulset --dry-run=true "${kube_flags[@]}"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
|
||||
# Rollback to revision 1
|
||||
kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
|
||||
# Rollback to revision 1000000 - should fail
|
||||
output_message=$(! kubectl rollout undo statefulset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" "unable to find specified revision"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
|
||||
# Rollback to last revision
|
||||
kubectl rollout undo statefulset "${kube_flags[@]}"
|
||||
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
|
||||
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
|
||||
# Clean up - delete newest configuration
|
||||
kubectl delete -f hack/testdata/rollingupdate-statefulset-rv2.yaml "${kube_flags[@]}"
|
||||
# Post-condition: no pods from statefulset controller
|
||||
wait-for-pods-with-label "app=nginx-statefulset" ""
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_stateful_set_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl(v1:statefulsets)"
|
||||
|
||||
### Create and stop statefulset, make sure it doesn't leak pods
|
||||
# Pre-condition: no statefulset exists
|
||||
kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command: create statefulset
|
||||
kubectl create -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}"
|
||||
|
||||
### Scale statefulset test with current-replicas and replicas
|
||||
# Pre-condition: 0 replicas
|
||||
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '0'
|
||||
kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '1'
|
||||
# Command: Scale up
|
||||
kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]}"
|
||||
# Post-condition: 1 replica, named nginx-0
|
||||
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '1'
|
||||
kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '2'
|
||||
# Typically we'd wait and confirm that N>1 replicas are up, but this framework
|
||||
# doesn't start the scheduler, so pet-0 will block all others.
|
||||
# TODO: test robust scaling in an e2e.
|
||||
wait-for-pods-with-label "app=nginx-statefulset" "nginx-0"
|
||||
|
||||
### Clean up
|
||||
kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}"
|
||||
# Post-condition: no pods from statefulset controller
|
||||
wait-for-pods-with-label "app=nginx-statefulset" ""
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
|
||||
}
|
||||
|
||||
run_rs_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl(v1:replicasets)"
|
||||
|
||||
### Create and stop a replica set, make sure it doesn't leak pods
|
||||
# Pre-condition: no replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
|
||||
kube::log::status "Deleting rs"
|
||||
kubectl delete rs frontend "${kube_flags[@]}"
|
||||
# Post-condition: no pods from frontend replica set
|
||||
kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
### Create and then delete a replica set with cascade=false, make sure it doesn't delete pods.
|
||||
# Pre-condition: no replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
#TODO(mortent): Remove this workaround when ReplicaSet bug described in issue #69376 is fixed
|
||||
local replicaset_name="frontend-no-cascade"
|
||||
sed -r 's/^(\s*)(name\s*:\s*frontend\s*$)/\1name: '"${replicaset_name}"'/' hack/testdata/frontend-replicaset.yaml | kubectl create "${kube_flags[@]}" -f -
|
||||
# wait for all 3 pods to be set up
|
||||
kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
|
||||
kube::log::status "Deleting rs"
|
||||
kubectl delete rs "${replicaset_name}" "${kube_flags[@]}" --cascade=false
|
||||
# Wait for the rs to be deleted.
|
||||
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Post-condition: All 3 pods still remain from frontend replica set
|
||||
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
|
||||
# Cleanup
|
||||
kubectl delete pods -l "tier=frontend" "${kube_flags[@]}"
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
### Create replica set frontend from YAML
|
||||
# Pre-condition: no replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
|
||||
# Post-condition: frontend replica set is created
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
|
||||
# Describe command should print detailed information
|
||||
kube::test::describe_object_assert rs 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
|
||||
# Describe command should print events information by default
|
||||
kube::test::describe_object_events_assert rs 'frontend'
|
||||
# Describe command should not print events information when show-events=false
|
||||
kube::test::describe_object_events_assert rs 'frontend' false
|
||||
# Describe command should print events information when show-events=true
|
||||
kube::test::describe_object_events_assert rs 'frontend' true
|
||||
# Describe command (resource only) should print detailed information
|
||||
kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
|
||||
# Describe command should print events information by default
|
||||
kube::test::describe_resource_events_assert rs
|
||||
# Describe command should not print events information when show-events=false
|
||||
kube::test::describe_resource_events_assert rs false
|
||||
# Describe command should print events information when show-events=true
|
||||
kube::test::describe_resource_events_assert rs true
|
||||
# Describe command (resource only) should print detailed information
|
||||
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
|
||||
|
||||
### Scale replica set frontend with current-replicas and replicas
|
||||
# Pre-condition: 3 replicas
|
||||
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
|
||||
# Command
|
||||
kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]}"
|
||||
# Post-condition: 2 replicas
|
||||
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '2'
|
||||
|
||||
# Set up three deploy, two deploy have same label
|
||||
kubectl create -f hack/testdata/scale-deploy-1.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/scale-deploy-2.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/scale-deploy-3.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '1'
|
||||
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '1'
|
||||
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
|
||||
# Test kubectl scale --selector
|
||||
kubectl scale deploy --replicas=2 -l run=hello
|
||||
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '2'
|
||||
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '2'
|
||||
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
|
||||
# Test kubectl scale --all
|
||||
kubectl scale deploy --replicas=3 --all
|
||||
kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '3'
|
||||
kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '3'
|
||||
kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '3'
|
||||
# Clean-up
|
||||
kubectl delete rs frontend "${kube_flags[@]}"
|
||||
kubectl delete deploy scale-1 scale-2 scale-3 "${kube_flags[@]}"
|
||||
|
||||
### Expose replica set as service
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
|
||||
# Pre-condition: 3 replicas
|
||||
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
|
||||
# Command
|
||||
kubectl expose rs frontend --port=80 "${kube_flags[@]}"
|
||||
# Post-condition: service exists and the port is unnamed
|
||||
kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
|
||||
# Create a service using service/v1 generator
|
||||
kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]}"
|
||||
# Post-condition: service exists and the port is named default.
|
||||
kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" 'default 80'
|
||||
# Cleanup services
|
||||
kubectl delete service frontend{,-2} "${kube_flags[@]}"
|
||||
|
||||
# Test set commands
|
||||
# Pre-condition: frontend replica set exists at generation 1
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '1'
|
||||
kubectl set image rs/frontend "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '2'
|
||||
kubectl set env rs/frontend "${kube_flags[@]}" foo=bar
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '3'
|
||||
kubectl set resources rs/frontend "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi
|
||||
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '4'
|
||||
|
||||
### Delete replica set with id
|
||||
# Pre-condition: frontend replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
|
||||
# Command
|
||||
kubectl delete rs frontend "${kube_flags[@]}"
|
||||
# Post-condition: no replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
### Create two replica sets
|
||||
# Pre-condition: no replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]}"
|
||||
# Post-condition: frontend and redis-slave
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
|
||||
|
||||
### Delete multiple replica sets at once
|
||||
# Pre-condition: frontend and redis-slave
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
|
||||
# Command
|
||||
kubectl delete rs frontend redis-slave "${kube_flags[@]}" # delete multiple replica sets at once
|
||||
# Post-condition: no replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
if kube::test::if_supports_resource "${horizontalpodautoscalers}" ; then
|
||||
### Auto scale replica set
|
||||
# Pre-condition: no replica set exists
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
|
||||
# autoscale 1~2 pods, CPU utilization 70%, replica set specified by file
|
||||
kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
|
||||
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
|
||||
kubectl delete hpa frontend "${kube_flags[@]}"
|
||||
# autoscale 2~3 pods, no CPU utilization specified, replica set specified by name
|
||||
kubectl autoscale rs frontend "${kube_flags[@]}" --min=2 --max=3
|
||||
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
|
||||
kubectl delete hpa frontend "${kube_flags[@]}"
|
||||
# autoscale without specifying --max should fail
|
||||
! kubectl autoscale rs frontend "${kube_flags[@]}"
|
||||
# Clean up
|
||||
kubectl delete rs frontend "${kube_flags[@]}"
|
||||
fi
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
81
vendor/k8s.io/kubernetes/test/cmd/authorization.sh
generated
vendored
Executable file
81
vendor/k8s.io/kubernetes/test/cmd/authorization.sh
generated
vendored
Executable file
@ -0,0 +1,81 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_authorization_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing authorization"
|
||||
|
||||
# check remote authorization endpoint, kubectl doesn't actually display the returned object so this isn't super useful
|
||||
# but it proves that works
|
||||
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/sar-v1.json --validate=false
|
||||
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/sar-v1beta1.json --validate=false
|
||||
|
||||
SAR_RESULT_FILE="${KUBE_TEMP}/sar-result.json"
|
||||
curl -k -H "Content-Type:" http://localhost:8080/apis/authorization.k8s.io/v1beta1/subjectaccessreviews -XPOST -d @test/fixtures/pkg/kubectl/cmd/create/sar-v1beta1.json > "${SAR_RESULT_FILE}"
|
||||
if grep -q '"allowed": true' "${SAR_RESULT_FILE}"; then
|
||||
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" returns as expected: $(cat "${SAR_RESULT_FILE}")"
|
||||
else
|
||||
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" does not return as expected: $(cat "${SAR_RESULT_FILE}")"
|
||||
exit 1
|
||||
fi
|
||||
rm "${SAR_RESULT_FILE}"
|
||||
|
||||
SAR_RESULT_FILE="${KUBE_TEMP}/sar-result.json"
|
||||
curl -k -H "Content-Type:" http://localhost:8080/apis/authorization.k8s.io/v1/subjectaccessreviews -XPOST -d @test/fixtures/pkg/kubectl/cmd/create/sar-v1.json > "${SAR_RESULT_FILE}"
|
||||
if grep -q '"allowed": true' "${SAR_RESULT_FILE}"; then
|
||||
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" returns as expected: $(cat "${SAR_RESULT_FILE}")"
|
||||
else
|
||||
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" does not return as expected: $(cat "${SAR_RESULT_FILE}")"
|
||||
exit 1
|
||||
fi
|
||||
rm "${SAR_RESULT_FILE}"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_impersonation_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing impersonation"
|
||||
|
||||
output_message=$(! kubectl get pods "${kube_flags_with_token[@]}" --as-group=foo 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'without impersonating a user'
|
||||
|
||||
if kube::test::if_supports_resource "${csr}" ; then
|
||||
# --as
|
||||
kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" --as=user1
|
||||
kube::test::get_object_assert 'csr/foo' '{{.spec.username}}' 'user1'
|
||||
kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}}{{end}}' 'system:authenticated'
|
||||
kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]}"
|
||||
|
||||
# --as-group
|
||||
kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" --as=user1 --as-group=group2 --as-group=group1 --as-group=,,,chameleon
|
||||
kube::test::get_object_assert 'csr/foo' '{{len .spec.groups}}' '3'
|
||||
kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}} {{end}}' 'group2 group1 ,,,chameleon '
|
||||
kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]}"
|
||||
fi
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
63
vendor/k8s.io/kubernetes/test/cmd/batch.sh
generated
vendored
Executable file
63
vendor/k8s.io/kubernetes/test/cmd/batch.sh
generated
vendored
Executable file
@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_job_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing job"
|
||||
|
||||
### Create a new namespace
|
||||
# Pre-condition: the test-jobs namespace does not exist
|
||||
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-jobs\" }}found{{end}}{{end}}:' ':'
|
||||
# Command
|
||||
kubectl create namespace test-jobs
|
||||
# Post-condition: namespace 'test-jobs' is created.
|
||||
kube::test::get_object_assert 'namespaces/test-jobs' "{{$id_field}}" 'test-jobs'
|
||||
|
||||
### Create a cronjob in a specific namespace
|
||||
kubectl run pi --schedule="59 23 31 2 *" --namespace=test-jobs --generator=cronjob/v1beta1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
|
||||
# Post-Condition: assertion object exists
|
||||
kube::test::get_object_assert 'cronjob/pi --namespace=test-jobs' "{{$id_field}}" 'pi'
|
||||
kubectl get cronjob/pi --namespace=test-jobs
|
||||
kubectl describe cronjob/pi --namespace=test-jobs
|
||||
|
||||
### Create a job in dry-run mode
|
||||
output_message=$(kubectl create job test-job --from=cronjob/pi --dry-run=true --namespace=test-jobs -o name)
|
||||
# Post-condition: The text 'job.batch/test-job' should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'job.batch/test-job'
|
||||
# Post-condition: The test-job wasn't created actually
|
||||
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}{{end}}" ''
|
||||
|
||||
### Create a job in a specific namespace
|
||||
kubectl create job test-job --from=cronjob/pi --namespace=test-jobs
|
||||
# Post-Condition: assertion object exists
|
||||
kube::test::get_object_assert 'job/test-job --namespace=test-jobs' "{{$id_field}}" 'test-job'
|
||||
kubectl get job/test-job --namespace=test-jobs
|
||||
kubectl describe job/test-job --namespace=test-jobs
|
||||
#Clean up
|
||||
kubectl delete job test-job --namespace=test-jobs
|
||||
kubectl delete cronjob pi --namespace=test-jobs
|
||||
kubectl delete namespace test-jobs
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
63
vendor/k8s.io/kubernetes/test/cmd/certificate.sh
generated
vendored
Executable file
63
vendor/k8s.io/kubernetes/test/cmd/certificate.sh
generated
vendored
Executable file
@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_certificates_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing certificates"
|
||||
|
||||
# approve
|
||||
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
|
||||
kubectl certificate approve foo "${kube_flags[@]}"
|
||||
kubectl get csr "${kube_flags[@]}" -o json
|
||||
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Approved'
|
||||
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
|
||||
|
||||
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
|
||||
kubectl certificate approve -f hack/testdata/csr.yml "${kube_flags[@]}"
|
||||
kubectl get csr "${kube_flags[@]}" -o json
|
||||
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Approved'
|
||||
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
|
||||
|
||||
# deny
|
||||
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
|
||||
kubectl certificate deny foo "${kube_flags[@]}"
|
||||
kubectl get csr "${kube_flags[@]}" -o json
|
||||
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Denied'
|
||||
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
|
||||
|
||||
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
|
||||
kubectl certificate deny -f hack/testdata/csr.yml "${kube_flags[@]}"
|
||||
kubectl get csr "${kube_flags[@]}" -o json
|
||||
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Denied'
|
||||
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
1416
vendor/k8s.io/kubernetes/test/cmd/core.sh
generated
vendored
Executable file
1416
vendor/k8s.io/kubernetes/test/cmd/core.sh
generated
vendored
Executable file
File diff suppressed because it is too large
Load Diff
469
vendor/k8s.io/kubernetes/test/cmd/crd.sh
generated
vendored
Executable file
469
vendor/k8s.io/kubernetes/test/cmd/crd.sh
generated
vendored
Executable file
@ -0,0 +1,469 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_crd_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl crd"
|
||||
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
|
||||
{
|
||||
"kind": "CustomResourceDefinition",
|
||||
"apiVersion": "apiextensions.k8s.io/v1beta1",
|
||||
"metadata": {
|
||||
"name": "foos.company.com"
|
||||
},
|
||||
"spec": {
|
||||
"group": "company.com",
|
||||
"version": "v1",
|
||||
"scope": "Namespaced",
|
||||
"names": {
|
||||
"plural": "foos",
|
||||
"kind": "Foo"
|
||||
}
|
||||
}
|
||||
}
|
||||
__EOF__
|
||||
|
||||
# Post-Condition: assertion object exist
|
||||
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq $id_field \\\"foos.company.com\\\"}}{{$id_field}}:{{end}}{{end}}" 'foos.company.com:'
|
||||
|
||||
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
|
||||
{
|
||||
"kind": "CustomResourceDefinition",
|
||||
"apiVersion": "apiextensions.k8s.io/v1beta1",
|
||||
"metadata": {
|
||||
"name": "bars.company.com"
|
||||
},
|
||||
"spec": {
|
||||
"group": "company.com",
|
||||
"version": "v1",
|
||||
"scope": "Namespaced",
|
||||
"names": {
|
||||
"plural": "bars",
|
||||
"kind": "Bar"
|
||||
}
|
||||
}
|
||||
}
|
||||
__EOF__
|
||||
|
||||
# Post-Condition: assertion object exist
|
||||
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq $id_field \\\"foos.company.com\\\" \\\"bars.company.com\\\"}}{{$id_field}}:{{end}}{{end}}" 'bars.company.com:foos.company.com:'
|
||||
|
||||
# This test ensures that the name printer is able to output a resource
|
||||
# in the proper "kind.group/resource_name" format, and that the
|
||||
# resource builder is able to resolve a GVK when a kind.group pair is given.
|
||||
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
|
||||
{
|
||||
"kind": "CustomResourceDefinition",
|
||||
"apiVersion": "apiextensions.k8s.io/v1beta1",
|
||||
"metadata": {
|
||||
"name": "resources.mygroup.example.com"
|
||||
},
|
||||
"spec": {
|
||||
"group": "mygroup.example.com",
|
||||
"version": "v1alpha1",
|
||||
"scope": "Namespaced",
|
||||
"names": {
|
||||
"plural": "resources",
|
||||
"singular": "resource",
|
||||
"kind": "Kind",
|
||||
"listKind": "KindList"
|
||||
}
|
||||
}
|
||||
}
|
||||
__EOF__
|
||||
|
||||
# Post-Condition: assertion crd with non-matching kind and resource exists
|
||||
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq $id_field \\\"foos.company.com\\\" \\\"bars.company.com\\\" \\\"resources.mygroup.example.com\\\"}}{{$id_field}}:{{end}}{{end}}" 'bars.company.com:foos.company.com:resources.mygroup.example.com:'
|
||||
|
||||
# This test ensures that we can create complex validation without client-side validation complaining
|
||||
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
|
||||
{
|
||||
"kind": "CustomResourceDefinition",
|
||||
"apiVersion": "apiextensions.k8s.io/v1beta1",
|
||||
"metadata": {
|
||||
"name": "validfoos.company.com"
|
||||
},
|
||||
"spec": {
|
||||
"group": "company.com",
|
||||
"version": "v1",
|
||||
"scope": "Namespaced",
|
||||
"names": {
|
||||
"plural": "validfoos",
|
||||
"kind": "ValidFoo"
|
||||
},
|
||||
"validation": {
|
||||
"openAPIV3Schema": {
|
||||
"properties": {
|
||||
"spec": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "number"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
__EOF__
|
||||
|
||||
# Post-Condition: assertion crd with non-matching kind and resource exists
|
||||
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq $id_field \\\"foos.company.com\\\" \\\"bars.company.com\\\" \\\"resources.mygroup.example.com\\\" \\\"validfoos.company.com\\\"}}{{$id_field}}:{{end}}{{end}}" 'bars.company.com:foos.company.com:resources.mygroup.example.com:validfoos.company.com:'
|
||||
|
||||
run_non_native_resource_tests
|
||||
|
||||
# teardown
|
||||
kubectl delete customresourcedefinitions/foos.company.com "${kube_flags_with_token[@]}"
|
||||
kubectl delete customresourcedefinitions/bars.company.com "${kube_flags_with_token[@]}"
|
||||
kubectl delete customresourcedefinitions/resources.mygroup.example.com "${kube_flags_with_token[@]}"
|
||||
kubectl delete customresourcedefinitions/validfoos.company.com "${kube_flags_with_token[@]}"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
kube::util::non_native_resources() {
|
||||
local times
|
||||
local wait
|
||||
local failed
|
||||
times=30
|
||||
wait=10
|
||||
local i
|
||||
for i in $(seq 1 $times); do
|
||||
failed=""
|
||||
kubectl "${kube_flags[@]}" get --raw '/apis/company.com/v1' || failed=true
|
||||
kubectl "${kube_flags[@]}" get --raw '/apis/company.com/v1/foos' || failed=true
|
||||
kubectl "${kube_flags[@]}" get --raw '/apis/company.com/v1/bars' || failed=true
|
||||
|
||||
if [ -z "${failed}" ]; then
|
||||
return 0
|
||||
fi
|
||||
sleep ${wait}
|
||||
done
|
||||
|
||||
kube::log::error "Timed out waiting for non-native-resources; tried ${times} waiting ${wait}s between each"
|
||||
return 1
|
||||
}
|
||||
|
||||
run_non_native_resource_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl non-native resources"
|
||||
kube::util::non_native_resources
|
||||
|
||||
# Test that we can list this new CustomResource (foos)
|
||||
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
# Test that we can list this new CustomResource (bars)
|
||||
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
# Test that we can list this new CustomResource (resources)
|
||||
kube::test::get_object_assert resources "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
# Test that we can create a new resource of type Kind
|
||||
kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/resource.yaml "${kube_flags[@]}"
|
||||
|
||||
# Test that -o name returns kind.group/resourcename
|
||||
output_message=$(kubectl "${kube_flags[@]}" get resource/myobj -o name)
|
||||
kube::test::if_has_string "${output_message}" 'kind.mygroup.example.com/myobj'
|
||||
|
||||
output_message=$(kubectl "${kube_flags[@]}" get resources/myobj -o name)
|
||||
kube::test::if_has_string "${output_message}" 'kind.mygroup.example.com/myobj'
|
||||
|
||||
output_message=$(kubectl "${kube_flags[@]}" get kind.mygroup.example.com/myobj -o name)
|
||||
kube::test::if_has_string "${output_message}" 'kind.mygroup.example.com/myobj'
|
||||
|
||||
# Delete the resource with cascade.
|
||||
kubectl "${kube_flags[@]}" delete resources myobj --cascade=true
|
||||
|
||||
# Make sure it's gone
|
||||
kube::test::wait_object_assert resources "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
# Test that we can create a new resource of type Foo
|
||||
kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/foo.yaml "${kube_flags[@]}"
|
||||
|
||||
# Test that we can list this new custom resource
|
||||
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
|
||||
|
||||
# Test alternate forms
|
||||
kube::test::get_object_assert foo "{{range.items}}{{$id_field}}:{{end}}" 'test:'
|
||||
kube::test::get_object_assert foos.company.com "{{range.items}}{{$id_field}}:{{end}}" 'test:'
|
||||
kube::test::get_object_assert foos.v1.company.com "{{range.items}}{{$id_field}}:{{end}}" 'test:'
|
||||
|
||||
# Test all printers, with lists and individual items
|
||||
kube::log::status "Testing CustomResource printing"
|
||||
kubectl "${kube_flags[@]}" get foos
|
||||
kubectl "${kube_flags[@]}" get foos/test
|
||||
kubectl "${kube_flags[@]}" get foos -o name
|
||||
kubectl "${kube_flags[@]}" get foos/test -o name
|
||||
kubectl "${kube_flags[@]}" get foos -o wide
|
||||
kubectl "${kube_flags[@]}" get foos/test -o wide
|
||||
kubectl "${kube_flags[@]}" get foos -o json
|
||||
kubectl "${kube_flags[@]}" get foos/test -o json
|
||||
kubectl "${kube_flags[@]}" get foos -o yaml
|
||||
kubectl "${kube_flags[@]}" get foos/test -o yaml
|
||||
kubectl "${kube_flags[@]}" get foos -o "jsonpath={.items[*].someField}" --allow-missing-template-keys=false
|
||||
kubectl "${kube_flags[@]}" get foos/test -o "jsonpath={.someField}" --allow-missing-template-keys=false
|
||||
kubectl "${kube_flags[@]}" get foos -o "go-template={{range .items}}{{.someField}}{{end}}" --allow-missing-template-keys=false
|
||||
kubectl "${kube_flags[@]}" get foos/test -o "go-template={{.someField}}" --allow-missing-template-keys=false
|
||||
output_message=$(kubectl "${kube_flags[@]}" get foos/test -o name)
|
||||
kube::test::if_has_string "${output_message}" 'foo.company.com/test'
|
||||
|
||||
# Test patching
|
||||
kube::log::status "Testing CustomResource patching"
|
||||
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":"value1"}' --type=merge
|
||||
kube::test::get_object_assert foos/test "{{.patched}}" 'value1'
|
||||
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":"value2"}' --type=merge --record
|
||||
kube::test::get_object_assert foos/test "{{.patched}}" 'value2'
|
||||
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":null}' --type=merge --record
|
||||
kube::test::get_object_assert foos/test "{{.patched}}" '<no value>'
|
||||
# Get local version
|
||||
CRD_RESOURCE_FILE="${KUBE_TEMP}/crd-foos-test.json"
|
||||
kubectl "${kube_flags[@]}" get foos/test -o json > "${CRD_RESOURCE_FILE}"
|
||||
# cannot apply strategic patch locally
|
||||
CRD_PATCH_ERROR_FILE="${KUBE_TEMP}/crd-foos-test-error"
|
||||
! kubectl "${kube_flags[@]}" patch --local -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' 2> "${CRD_PATCH_ERROR_FILE}"
|
||||
if grep -q "try --type merge" "${CRD_PATCH_ERROR_FILE}"; then
|
||||
kube::log::status "\"kubectl patch --local\" returns error as expected for CustomResource: $(cat ${CRD_PATCH_ERROR_FILE})"
|
||||
else
|
||||
kube::log::status "\"kubectl patch --local\" returns unexpected error or non-error: $(cat ${CRD_PATCH_ERROR_FILE})"
|
||||
exit 1
|
||||
fi
|
||||
# can apply merge patch locally
|
||||
kubectl "${kube_flags[@]}" patch --local -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' --type=merge -o json
|
||||
# can apply merge patch remotely
|
||||
kubectl "${kube_flags[@]}" patch --record -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' --type=merge -o json
|
||||
kube::test::get_object_assert foos/test "{{.patched}}" 'value3'
|
||||
rm "${CRD_RESOURCE_FILE}"
|
||||
rm "${CRD_PATCH_ERROR_FILE}"
|
||||
|
||||
# Test labeling
|
||||
kube::log::status "Testing CustomResource labeling"
|
||||
kubectl "${kube_flags[@]}" label foos --all listlabel=true
|
||||
kubectl "${kube_flags[@]}" label foo/test itemlabel=true
|
||||
|
||||
# Test annotating
|
||||
kube::log::status "Testing CustomResource annotating"
|
||||
kubectl "${kube_flags[@]}" annotate foos --all listannotation=true
|
||||
kubectl "${kube_flags[@]}" annotate foo/test itemannotation=true
|
||||
|
||||
# Test describing
|
||||
kube::log::status "Testing CustomResource describing"
|
||||
kubectl "${kube_flags[@]}" describe foos
|
||||
kubectl "${kube_flags[@]}" describe foos/test
|
||||
kubectl "${kube_flags[@]}" describe foos | grep listlabel=true
|
||||
kubectl "${kube_flags[@]}" describe foos | grep itemlabel=true
|
||||
|
||||
# Delete the resource with cascade.
|
||||
kubectl "${kube_flags[@]}" delete foos test --cascade=true
|
||||
|
||||
# Make sure it's gone
|
||||
kube::test::wait_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
# Test that we can create a new resource of type Bar
|
||||
kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/bar.yaml "${kube_flags[@]}"
|
||||
|
||||
# Test that we can list this new custom resource
|
||||
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test:'
|
||||
|
||||
# Test that we can watch the resource.
|
||||
# Start watcher in background with process substitution,
|
||||
# so we can read from stdout asynchronously.
|
||||
kube::log::status "Testing CustomResource watching"
|
||||
exec 3< <(kubectl "${kube_flags[@]}" get bars --request-timeout=1m --watch-only -o name & echo $! ; wait)
|
||||
local watch_pid
|
||||
read <&3 watch_pid
|
||||
|
||||
# We can't be sure when the watch gets established,
|
||||
# so keep triggering events (in the background) until something comes through.
|
||||
local tries=0
|
||||
while [ ${tries} -lt 10 ]; do
|
||||
tries=$((tries+1))
|
||||
kubectl "${kube_flags[@]}" patch bars/test -p "{\"patched\":\"${tries}\"}" --type=merge
|
||||
sleep 1
|
||||
done &
|
||||
local patch_pid=$!
|
||||
|
||||
# Wait up to 30s for a complete line of output.
|
||||
local watch_output
|
||||
read <&3 -t 30 watch_output
|
||||
# Stop the watcher and the patch loop.
|
||||
kill -9 ${watch_pid}
|
||||
kill -9 ${patch_pid}
|
||||
kube::test::if_has_string "${watch_output}" 'bar.company.com/test'
|
||||
|
||||
# Delete the resource without cascade.
|
||||
kubectl "${kube_flags[@]}" delete bars test --cascade=false
|
||||
|
||||
# Make sure it's gone
|
||||
kube::test::wait_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
# Test that we can create single item via apply
|
||||
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo.yaml
|
||||
|
||||
# Test that we have create a foo named test
|
||||
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
|
||||
|
||||
# Test that the field has the expected value
|
||||
kube::test::get_object_assert foos/test '{{.someField}}' 'field1'
|
||||
|
||||
# Test that apply an empty patch doesn't change fields
|
||||
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo.yaml
|
||||
|
||||
# Test that the field has the same value after re-apply
|
||||
kube::test::get_object_assert foos/test '{{.someField}}' 'field1'
|
||||
|
||||
# Test that apply has updated the subfield
|
||||
kube::test::get_object_assert foos/test '{{.nestedField.someSubfield}}' 'subfield1'
|
||||
|
||||
# Update a subfield and then apply the change
|
||||
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo-updated-subfield.yaml
|
||||
|
||||
# Test that apply has updated the subfield
|
||||
kube::test::get_object_assert foos/test '{{.nestedField.someSubfield}}' 'modifiedSubfield'
|
||||
|
||||
# Test that the field has the expected value
|
||||
kube::test::get_object_assert foos/test '{{.nestedField.otherSubfield}}' 'subfield2'
|
||||
|
||||
# Delete a subfield and then apply the change
|
||||
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo-deleted-subfield.yaml
|
||||
|
||||
# Test that apply has deleted the field
|
||||
kube::test::get_object_assert foos/test '{{.nestedField.otherSubfield}}' '<no value>'
|
||||
|
||||
# Test that the field does not exist
|
||||
kube::test::get_object_assert foos/test '{{.nestedField.newSubfield}}' '<no value>'
|
||||
|
||||
# Add a field and then apply the change
|
||||
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/foo-added-subfield.yaml
|
||||
|
||||
# Test that apply has added the field
|
||||
kube::test::get_object_assert foos/test '{{.nestedField.newSubfield}}' 'subfield3'
|
||||
|
||||
# Delete the resource
|
||||
kubectl "${kube_flags[@]}" delete -f hack/testdata/CRD/foo.yaml
|
||||
|
||||
# Make sure it's gone
|
||||
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
# Test that we can create list via apply
|
||||
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list.yaml
|
||||
|
||||
# Test that we have create a foo and a bar from a list
|
||||
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test-list:'
|
||||
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test-list:'
|
||||
|
||||
# Test that the field has the expected value
|
||||
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
|
||||
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
|
||||
|
||||
# Test that re-apply an list doesn't change anything
|
||||
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list.yaml
|
||||
|
||||
# Test that the field has the same value after re-apply
|
||||
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
|
||||
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
|
||||
|
||||
# Test that the fields have the expected value
|
||||
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
|
||||
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
|
||||
|
||||
# Update fields and then apply the change
|
||||
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list-updated-field.yaml
|
||||
|
||||
# Test that apply has updated the fields
|
||||
kube::test::get_object_assert foos/test-list '{{.someField}}' 'modifiedField'
|
||||
kube::test::get_object_assert bars/test-list '{{.someField}}' 'modifiedField'
|
||||
|
||||
# Test that the field has the expected value
|
||||
kube::test::get_object_assert foos/test-list '{{.otherField}}' 'field2'
|
||||
kube::test::get_object_assert bars/test-list '{{.otherField}}' 'field2'
|
||||
|
||||
# Delete fields and then apply the change
|
||||
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list-deleted-field.yaml
|
||||
|
||||
# Test that apply has deleted the fields
|
||||
kube::test::get_object_assert foos/test-list '{{.otherField}}' '<no value>'
|
||||
kube::test::get_object_assert bars/test-list '{{.otherField}}' '<no value>'
|
||||
|
||||
# Test that the fields does not exist
|
||||
kube::test::get_object_assert foos/test-list '{{.newField}}' '<no value>'
|
||||
kube::test::get_object_assert bars/test-list '{{.newField}}' '<no value>'
|
||||
|
||||
# Add a field and then apply the change
|
||||
kubectl "${kube_flags[@]}" apply -f hack/testdata/CRD/multi-crd-list-added-field.yaml
|
||||
|
||||
# Test that apply has added the field
|
||||
kube::test::get_object_assert foos/test-list '{{.newField}}' 'field3'
|
||||
kube::test::get_object_assert bars/test-list '{{.newField}}' 'field3'
|
||||
|
||||
# Delete the resource
|
||||
kubectl "${kube_flags[@]}" delete -f hack/testdata/CRD/multi-crd-list.yaml
|
||||
|
||||
# Make sure it's gone
|
||||
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
## kubectl apply --prune
|
||||
# Test that no foo or bar exist
|
||||
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
# apply --prune on foo.yaml that has foo/test
|
||||
kubectl apply --prune -l pruneGroup=true -f hack/testdata/CRD/foo.yaml "${kube_flags[@]}" --prune-whitelist=company.com/v1/Foo --prune-whitelist=company.com/v1/Bar
|
||||
# check right crds exist
|
||||
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
|
||||
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
# apply --prune on bar.yaml that has bar/test
|
||||
kubectl apply --prune -l pruneGroup=true -f hack/testdata/CRD/bar.yaml "${kube_flags[@]}" --prune-whitelist=company.com/v1/Foo --prune-whitelist=company.com/v1/Bar
|
||||
# check right crds exist
|
||||
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test:'
|
||||
|
||||
# Delete the resource
|
||||
kubectl "${kube_flags[@]}" delete -f hack/testdata/CRD/bar.yaml
|
||||
|
||||
# Make sure it's gone
|
||||
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
# Test 'kubectl create' with namespace, and namespace cleanup.
|
||||
kubectl "${kube_flags[@]}" create namespace non-native-resources
|
||||
kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/bar.yaml --namespace=non-native-resources
|
||||
kube::test::get_object_assert bars '{{len .items}}' '1' --namespace=non-native-resources
|
||||
kubectl "${kube_flags[@]}" delete namespace non-native-resources
|
||||
# Make sure objects go away.
|
||||
kube::test::wait_object_assert bars '{{len .items}}' '0' --namespace=non-native-resources
|
||||
# Make sure namespace goes away.
|
||||
local tries=0
|
||||
while kubectl "${kube_flags[@]}" get namespace non-native-resources && [ ${tries} -lt 10 ]; do
|
||||
tries=$((tries+1))
|
||||
sleep ${tries}
|
||||
done
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
110
vendor/k8s.io/kubernetes/test/cmd/create.sh
generated
vendored
Executable file
110
vendor/k8s.io/kubernetes/test/cmd/create.sh
generated
vendored
Executable file
@ -0,0 +1,110 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# Runs tests related to kubectl create --filename(-f) --selector(-l).
|
||||
run_kubectl_create_filter_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl create filter"
|
||||
## kubectl create -f with label selector should only create matching objects
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# create
|
||||
kubectl create -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]}"
|
||||
# check right pod exists
|
||||
kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod'
|
||||
# check wrong pod doesn't exist
|
||||
output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found'
|
||||
# cleanup
|
||||
kubectl delete pods selector-test-pod
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_kubectl_create_error_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl create with error"
|
||||
|
||||
# Passing no arguments to create is an error
|
||||
! kubectl create
|
||||
|
||||
## kubectl create should not panic on empty string lists in a template
|
||||
ERROR_FILE="${KUBE_TEMP}/validation-error"
|
||||
kubectl create -f hack/testdata/invalid-rc-with-empty-args.yaml "${kube_flags[@]}" 2> "${ERROR_FILE}" || true
|
||||
# Post-condition: should get an error reporting the empty string
|
||||
if grep -q "unknown object type \"nil\" in ReplicationController" "${ERROR_FILE}"; then
|
||||
kube::log::status "\"kubectl create with empty string list returns error as expected: $(cat ${ERROR_FILE})"
|
||||
else
|
||||
kube::log::status "\"kubectl create with empty string list returns unexpected error or non-error: $(cat ${ERROR_FILE})"
|
||||
exit 1
|
||||
fi
|
||||
rm "${ERROR_FILE}"
|
||||
|
||||
# Posting a pod to namespaces should fail. Also tests --raw forcing the post location
|
||||
[ "$( kubectl convert -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -o json | kubectl create "${kube_flags[@]}" --raw /api/v1/namespaces -f - --v=8 2>&1 | grep 'cannot be handled as a Namespace: converting (v1.Pod)')" ]
|
||||
|
||||
[ "$( kubectl create "${kube_flags[@]}" --raw /api/v1/namespaces -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml --edit 2>&1 | grep 'raw and --edit are mutually exclusive')" ]
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
# Runs kubectl create job tests
|
||||
run_create_job_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
|
||||
# Test kubectl create job
|
||||
kubectl create job test-job --image=k8s.gcr.io/nginx:test-cmd
|
||||
# Post-Condition: job nginx is created
|
||||
kube::test::get_object_assert 'job test-job' "{{$image_field0}}" 'k8s.gcr.io/nginx:test-cmd'
|
||||
# Clean up
|
||||
kubectl delete job test-job "${kube_flags[@]}"
|
||||
|
||||
# Test kubectl create job with command
|
||||
kubectl create job test-job-pi "--image=$IMAGE_PERL" -- perl -Mbignum=bpi -wle 'print bpi(20)'
|
||||
kube::test::get_object_assert 'job test-job-pi' "{{$image_field0}}" $IMAGE_PERL
|
||||
# Clean up
|
||||
kubectl delete job test-job-pi
|
||||
|
||||
# Test kubectl create job from cronjob
|
||||
# Pre-Condition: create a cronjob
|
||||
kubectl run test-pi --schedule="* */5 * * *" --generator=cronjob/v1beta1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(10)'
|
||||
kubectl create job my-pi --from=cronjob/test-pi
|
||||
# Post-condition: container args contain expected command
|
||||
output_message=$(kubectl get job my-pi -o go-template='{{(index .spec.template.spec.containers 0).args}}' "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" "perl -Mbignum=bpi -wle print bpi(10)"
|
||||
|
||||
# Clean up
|
||||
kubectl delete job my-pi
|
||||
kubectl delete cronjob test-pi
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
42
vendor/k8s.io/kubernetes/test/cmd/diff.sh
generated
vendored
Executable file
42
vendor/k8s.io/kubernetes/test/cmd/diff.sh
generated
vendored
Executable file
@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# Runs tests for kubectl diff
|
||||
run_kubectl_diff_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl diff"
|
||||
|
||||
# Test that it works when the live object doesn't exist
|
||||
output_message=$(! kubectl diff -f hack/testdata/pod.yaml)
|
||||
kube::test::if_has_string "${output_message}" 'test-pod'
|
||||
|
||||
kubectl apply -f hack/testdata/pod.yaml
|
||||
|
||||
output_message=$(! kubectl diff -f hack/testdata/pod-changed.yaml)
|
||||
kube::test::if_has_string "${output_message}" 'k8s.gcr.io/pause:3.0'
|
||||
|
||||
kubectl delete -f hack/testdata/pod.yaml
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
130
vendor/k8s.io/kubernetes/test/cmd/discovery.sh
generated
vendored
Executable file
130
vendor/k8s.io/kubernetes/test/cmd/discovery.sh
generated
vendored
Executable file
@ -0,0 +1,130 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_RESTMapper_evaluation_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing RESTMapper"
|
||||
|
||||
RESTMAPPER_ERROR_FILE="${KUBE_TEMP}/restmapper-error"
|
||||
|
||||
### Non-existent resource type should give a recognizeable error
|
||||
# Pre-condition: None
|
||||
# Command
|
||||
kubectl get "${kube_flags[@]}" unknownresourcetype 2>${RESTMAPPER_ERROR_FILE} || true
|
||||
if grep -q "the server doesn't have a resource type" "${RESTMAPPER_ERROR_FILE}"; then
|
||||
kube::log::status "\"kubectl get unknownresourcetype\" returns error as expected: $(cat ${RESTMAPPER_ERROR_FILE})"
|
||||
else
|
||||
kube::log::status "\"kubectl get unknownresourcetype\" returns unexpected error or non-error: $(cat ${RESTMAPPER_ERROR_FILE})"
|
||||
exit 1
|
||||
fi
|
||||
rm "${RESTMAPPER_ERROR_FILE}"
|
||||
# Post-condition: None
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_assert_short_name_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing assert short name"
|
||||
|
||||
kube::log::status "Testing propagation of short names for resources"
|
||||
output_message=$(kubectl get --raw=/api/v1)
|
||||
|
||||
## test if a short name is exported during discovery
|
||||
kube::test::if_has_string "${output_message}" '{"name":"configmaps","singularName":"","namespaced":true,"kind":"ConfigMap","verbs":\["create","delete","deletecollection","get","list","patch","update","watch"\],"shortNames":\["cm"\]}'
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_assert_categories_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing propagation of categories for resources"
|
||||
output_message=$(kubectl get --raw=/api/v1 | grep -o '"name":"pods"[^}]*}')
|
||||
kube::test::if_has_string "${output_message}" '"categories":\["all"\]'
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_resource_aliasing_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing resource aliasing"
|
||||
kubectl create -f test/e2e/testing-manifests/statefulset/cassandra/controller.yaml "${kube_flags[@]}"
|
||||
kubectl create -f test/e2e/testing-manifests/statefulset/cassandra/service.yaml "${kube_flags[@]}"
|
||||
|
||||
object="all -l'app=cassandra'"
|
||||
request="{{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}"
|
||||
|
||||
# all 4 cassandra's might not be in the request immediately...
|
||||
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:cassandra:' || \
|
||||
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:' || \
|
||||
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:'
|
||||
|
||||
kubectl delete all -l app=cassandra "${kube_flags[@]}"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_kubectl_explain_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing kubectl(v1:explain)"
|
||||
kubectl explain pods
|
||||
# shortcuts work
|
||||
kubectl explain po
|
||||
kubectl explain po.status.message
|
||||
# cronjob work
|
||||
kubectl explain cronjob
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_swagger_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing swagger"
|
||||
|
||||
# Verify schema
|
||||
file="${KUBE_TEMP}/schema-v1.json"
|
||||
curl -s "http://127.0.0.1:${API_PORT}/swaggerapi/api/v1" > "${file}"
|
||||
[[ "$(grep "list of returned" "${file}")" ]]
|
||||
[[ "$(grep "List of services" "${file}")" ]]
|
||||
[[ "$(grep "Watch for changes to the described resources" "${file}")" ]]
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
472
vendor/k8s.io/kubernetes/test/cmd/generic-resources.sh
generated
vendored
Executable file
472
vendor/k8s.io/kubernetes/test/cmd/generic-resources.sh
generated
vendored
Executable file
@ -0,0 +1,472 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_multi_resources_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl(v1:multiple resources)"
|
||||
|
||||
FILES="hack/testdata/multi-resource-yaml
|
||||
hack/testdata/multi-resource-list
|
||||
hack/testdata/multi-resource-json
|
||||
hack/testdata/multi-resource-rclist
|
||||
hack/testdata/multi-resource-svclist"
|
||||
YAML=".yaml"
|
||||
JSON=".json"
|
||||
for file in $FILES; do
|
||||
if [ -f $file$YAML ]
|
||||
then
|
||||
file=$file$YAML
|
||||
replace_file="${file%.yaml}-modify.yaml"
|
||||
else
|
||||
file=$file$JSON
|
||||
replace_file="${file%.json}-modify.json"
|
||||
fi
|
||||
|
||||
has_svc=true
|
||||
has_rc=true
|
||||
two_rcs=false
|
||||
two_svcs=false
|
||||
if [[ "${file}" == *rclist* ]]; then
|
||||
has_svc=false
|
||||
two_rcs=true
|
||||
fi
|
||||
if [[ "${file}" == *svclist* ]]; then
|
||||
has_rc=false
|
||||
two_svcs=true
|
||||
fi
|
||||
|
||||
### Create, get, describe, replace, label, annotate, and then delete service nginxsvc and replication controller my-nginx from 5 types of files:
|
||||
### 1) YAML, separated by ---; 2) JSON, with a List type; 3) JSON, with JSON object concatenation
|
||||
### 4) JSON, with a ReplicationControllerList type; 5) JSON, with a ServiceList type
|
||||
echo "Testing with file ${file} and replace with file ${replace_file}"
|
||||
# Pre-condition: no service (other than default kubernetes services) or replication controller exists
|
||||
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f "${file}" "${kube_flags[@]}"
|
||||
# Post-condition: mock service (and mock2) exists
|
||||
if [ "$has_svc" = true ]; then
|
||||
if [ "$two_svcs" = true ]; then
|
||||
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:mock2:'
|
||||
else
|
||||
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
|
||||
fi
|
||||
fi
|
||||
# Post-condition: mock rc (and mock2) exists
|
||||
if [ "$has_rc" = true ]; then
|
||||
if [ "$two_rcs" = true ]; then
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:mock2:'
|
||||
else
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
|
||||
fi
|
||||
fi
|
||||
# Command
|
||||
kubectl get -f "${file}" "${kube_flags[@]}"
|
||||
# Command: watching multiple resources should return "not supported" error
|
||||
WATCH_ERROR_FILE="${KUBE_TEMP}/kubectl-watch-error"
|
||||
kubectl get -f "${file}" "${kube_flags[@]}" "--watch" 2> ${WATCH_ERROR_FILE} || true
|
||||
if ! grep -q "watch is only supported on individual resources and resource collections" "${WATCH_ERROR_FILE}"; then
|
||||
kube::log::error_exit "kubectl watch multiple resource returns unexpected error or non-error: $(cat ${WATCH_ERROR_FILE})" "1"
|
||||
fi
|
||||
kubectl describe -f "${file}" "${kube_flags[@]}"
|
||||
# Command
|
||||
kubectl replace -f $replace_file --force --cascade "${kube_flags[@]}"
|
||||
# Post-condition: mock service (and mock2) and mock rc (and mock2) are replaced
|
||||
if [ "$has_svc" = true ]; then
|
||||
kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'replaced'
|
||||
if [ "$two_svcs" = true ]; then
|
||||
kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'replaced'
|
||||
fi
|
||||
fi
|
||||
if [ "$has_rc" = true ]; then
|
||||
kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'replaced'
|
||||
if [ "$two_rcs" = true ]; then
|
||||
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'replaced'
|
||||
fi
|
||||
fi
|
||||
# Command: kubectl edit multiple resources
|
||||
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
|
||||
echo -e "#!/usr/bin/env bash\n${SED} -i \"s/status\:\ replaced/status\:\ edited/g\" \$@" > "${temp_editor}"
|
||||
chmod +x "${temp_editor}"
|
||||
EDITOR="${temp_editor}" kubectl edit "${kube_flags[@]}" -f "${file}"
|
||||
# Post-condition: mock service (and mock2) and mock rc (and mock2) are edited
|
||||
if [ "$has_svc" = true ]; then
|
||||
kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'edited'
|
||||
if [ "$two_svcs" = true ]; then
|
||||
kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'edited'
|
||||
fi
|
||||
fi
|
||||
if [ "$has_rc" = true ]; then
|
||||
kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'edited'
|
||||
if [ "$two_rcs" = true ]; then
|
||||
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'edited'
|
||||
fi
|
||||
fi
|
||||
# cleaning
|
||||
rm "${temp_editor}"
|
||||
# Command
|
||||
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl label"
|
||||
# fails on some, but not all, of the resources, retries will fail because it tries to modify
|
||||
# existing labels.
|
||||
kubectl-with-retry label -f $file labeled=true --overwrite "${kube_flags[@]}"
|
||||
# Post-condition: mock service and mock rc (and mock2) are labeled
|
||||
if [ "$has_svc" = true ]; then
|
||||
kube::test::get_object_assert 'services mock' "{{${labels_field}.labeled}}" 'true'
|
||||
if [ "$two_svcs" = true ]; then
|
||||
kube::test::get_object_assert 'services mock2' "{{${labels_field}.labeled}}" 'true'
|
||||
fi
|
||||
fi
|
||||
if [ "$has_rc" = true ]; then
|
||||
kube::test::get_object_assert 'rc mock' "{{${labels_field}.labeled}}" 'true'
|
||||
if [ "$two_rcs" = true ]; then
|
||||
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.labeled}}" 'true'
|
||||
fi
|
||||
fi
|
||||
# Command
|
||||
# Command
|
||||
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl annotate"
|
||||
# fails on some, but not all, of the resources, retries will fail because it tries to modify
|
||||
# existing annotations.
|
||||
kubectl-with-retry annotate -f $file annotated=true --overwrite "${kube_flags[@]}"
|
||||
# Post-condition: mock service (and mock2) and mock rc (and mock2) are annotated
|
||||
if [ "$has_svc" = true ]; then
|
||||
kube::test::get_object_assert 'services mock' "{{${annotations_field}.annotated}}" 'true'
|
||||
if [ "$two_svcs" = true ]; then
|
||||
kube::test::get_object_assert 'services mock2' "{{${annotations_field}.annotated}}" 'true'
|
||||
fi
|
||||
fi
|
||||
if [ "$has_rc" = true ]; then
|
||||
kube::test::get_object_assert 'rc mock' "{{${annotations_field}.annotated}}" 'true'
|
||||
if [ "$two_rcs" = true ]; then
|
||||
kube::test::get_object_assert 'rc mock2' "{{${annotations_field}.annotated}}" 'true'
|
||||
fi
|
||||
fi
|
||||
# Cleanup resources created
|
||||
kubectl delete -f "${file}" "${kube_flags[@]}"
|
||||
done
|
||||
|
||||
#############################
|
||||
# Multiple Resources via URL#
|
||||
#############################
|
||||
|
||||
# Pre-condition: no service (other than default kubernetes services) or replication controller exists
|
||||
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
# Command
|
||||
kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}"
|
||||
|
||||
# Post-condition: service(mock) and rc(mock) exist
|
||||
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
|
||||
|
||||
# Clean up
|
||||
kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}"
|
||||
|
||||
# Post-condition: no service (other than default kubernetes services) or replication controller exists
|
||||
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_recursive_resources_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing recursive resources"
|
||||
### Create multiple busybox PODs recursively from directory of YAML files
|
||||
# Pre-condition: no POD exists
|
||||
create_and_use_new_namespace
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
output_message=$(! kubectl create -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: busybox0 & busybox1 PODs are created, and since busybox2 is malformed, it should error
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
|
||||
|
||||
## Edit multiple busybox PODs by updating the image field of multiple PODs recursively from a directory. tmp-editor.sh is a fake editor
|
||||
# Pre-condition: busybox0 & busybox1 PODs exist
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
# Command
|
||||
echo -e '#!/usr/bin/env bash\nsed -i "s/image: busybox/image: prom\/busybox/g" $1' > /tmp/tmp-editor.sh
|
||||
chmod +x /tmp/tmp-editor.sh
|
||||
output_message=$(! EDITOR=/tmp/tmp-editor.sh kubectl edit -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: busybox0 & busybox1 PODs are not edited, and since busybox2 is malformed, it should error
|
||||
# The reason why busybox0 & busybox1 PODs are not edited is because the editor tries to load all objects in
|
||||
# a list but since it contains invalid objects, it will never open.
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'busybox:busybox:'
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
# cleaning
|
||||
rm /tmp/tmp-editor.sh
|
||||
|
||||
## Replace multiple busybox PODs recursively from directory of YAML files
|
||||
# Pre-condition: busybox0 & busybox1 PODs exist
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
# Command
|
||||
output_message=$(! kubectl replace -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: busybox0 & busybox1 PODs are replaced, and since busybox2 is malformed, it should error
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
|
||||
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
|
||||
|
||||
## Describe multiple busybox PODs recursively from directory of YAML files
|
||||
# Pre-condition: busybox0 & busybox1 PODs exist
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
# Command
|
||||
output_message=$(! kubectl describe -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: busybox0 & busybox1 PODs are described, and since busybox2 is malformed, it should error
|
||||
kube::test::if_has_string "${output_message}" "app=busybox0"
|
||||
kube::test::if_has_string "${output_message}" "app=busybox1"
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
|
||||
## Annotate multiple busybox PODs recursively from directory of YAML files
|
||||
# Pre-condition: busybox0 & busybox1 PODs exist
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
# Command
|
||||
output_message=$(! kubectl annotate -f hack/testdata/recursive/pod annotatekey='annotatevalue' --recursive 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: busybox0 & busybox1 PODs are annotated, and since busybox2 is malformed, it should error
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${annotations_field}.annotatekey}}:{{end}}" 'annotatevalue:annotatevalue:'
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
|
||||
## Apply multiple busybox PODs recursively from directory of YAML files
|
||||
# Pre-condition: busybox0 & busybox1 PODs exist
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
# Command
|
||||
output_message=$(! kubectl apply -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: busybox0 & busybox1 PODs are updated, and since busybox2 is malformed, it should error
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
|
||||
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
|
||||
|
||||
|
||||
### Convert deployment YAML file locally without affecting the live deployment.
|
||||
# Pre-condition: no deployments exist
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
# Create a deployment (revision 1)
|
||||
kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
|
||||
# Command
|
||||
output_message=$(kubectl convert --local -f hack/testdata/deployment-revision1.yaml --output-version=apps/v1beta1 -o yaml "${kube_flags[@]}")
|
||||
# Post-condition: apiVersion is still extensions/v1beta1 in the live deployment, but command output is the new value
|
||||
kube::test::get_object_assert 'deployment nginx' "{{ .apiVersion }}" 'extensions/v1beta1'
|
||||
kube::test::if_has_string "${output_message}" "apps/v1beta1"
|
||||
# Clean up
|
||||
kubectl delete deployment nginx "${kube_flags[@]}"
|
||||
|
||||
## Convert multiple busybox PODs recursively from directory of YAML files
|
||||
# Pre-condition: busybox0 & busybox1 PODs exist
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
# Command
|
||||
output_message=$(! kubectl convert -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: busybox0 & busybox1 PODs are converted, and since busybox2 is malformed, it should error
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
|
||||
## Get multiple busybox PODs recursively from directory of YAML files
|
||||
# Pre-condition: busybox0 & busybox1 PODs exist
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
# Command
|
||||
output_message=$(! kubectl get -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}" -o go-template="{{range.items}}{{$id_field}}:{{end}}")
|
||||
# Post-condition: busybox0 & busybox1 PODs are retrieved, but because busybox2 is malformed, it should not show up
|
||||
kube::test::if_has_string "${output_message}" "busybox0:busybox1:"
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
|
||||
## Label multiple busybox PODs recursively from directory of YAML files
|
||||
# Pre-condition: busybox0 & busybox1 PODs exist
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
# Command
|
||||
output_message=$(! kubectl label -f hack/testdata/recursive/pod mylabel='myvalue' --recursive 2>&1 "${kube_flags[@]}")
|
||||
echo $output_message
|
||||
# Post-condition: busybox0 & busybox1 PODs are labeled, but because busybox2 is malformed, it should not show up
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.mylabel}}:{{end}}" 'myvalue:myvalue:'
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
|
||||
## Patch multiple busybox PODs recursively from directory of YAML files
|
||||
# Pre-condition: busybox0 & busybox1 PODs exist
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
# Command
|
||||
output_message=$(! kubectl patch -f hack/testdata/recursive/pod -p='{"spec":{"containers":[{"name":"busybox","image":"prom/busybox"}]}}' --recursive 2>&1 "${kube_flags[@]}")
|
||||
echo $output_message
|
||||
# Post-condition: busybox0 & busybox1 PODs are patched, but because busybox2 is malformed, it should not show up
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'prom/busybox:prom/busybox:'
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
|
||||
### Delete multiple busybox PODs recursively from directory of YAML files
|
||||
# Pre-condition: busybox0 & busybox1 PODs exist
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
# Command
|
||||
output_message=$(! kubectl delete -f hack/testdata/recursive/pod --recursive --grace-period=0 --force 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: busybox0 & busybox1 PODs are deleted, and since busybox2 is malformed, it should error
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
|
||||
### Create replication controller recursively from directory of YAML files
|
||||
# Pre-condition: no replication controller exists
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}"
|
||||
# Post-condition: frontend replication controller is created
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
|
||||
### Autoscale multiple replication controllers recursively from directory of YAML files
|
||||
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
|
||||
# replica each
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
|
||||
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
|
||||
# Command
|
||||
output_message=$(! kubectl autoscale --min=1 --max=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: busybox0 & busybox replication controllers are autoscaled
|
||||
# with min. of 1 replica & max of 2 replicas, and since busybox2 is malformed, it should error
|
||||
kube::test::get_object_assert 'hpa busybox0' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
|
||||
kube::test::get_object_assert 'hpa busybox1' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
kubectl delete hpa busybox0 "${kube_flags[@]}"
|
||||
kubectl delete hpa busybox1 "${kube_flags[@]}"
|
||||
|
||||
### Expose multiple replication controllers as service recursively from directory of YAML files
|
||||
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
|
||||
# replica each
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
|
||||
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
|
||||
# Command
|
||||
output_message=$(! kubectl expose -f hack/testdata/recursive/rc --recursive --port=80 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: service exists and the port is unnamed
|
||||
kube::test::get_object_assert 'service busybox0' "{{$port_name}} {{$port_field}}" '<no value> 80'
|
||||
kube::test::get_object_assert 'service busybox1' "{{$port_name}} {{$port_field}}" '<no value> 80'
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
|
||||
### Scale multiple replication controllers recursively from directory of YAML files
|
||||
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
|
||||
# replica each
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
|
||||
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
|
||||
# Command
|
||||
output_message=$(! kubectl scale --current-replicas=1 --replicas=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: busybox0 & busybox1 replication controllers are scaled to 2 replicas, and since busybox2 is malformed, it should error
|
||||
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '2'
|
||||
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '2'
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
|
||||
### Delete multiple busybox replication controllers recursively from directory of YAML files
|
||||
# Pre-condition: busybox0 & busybox1 PODs exist
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
# Command
|
||||
output_message=$(! kubectl delete -f hack/testdata/recursive/rc --recursive --grace-period=0 --force 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: busybox0 & busybox1 replication controllers are deleted, and since busybox2 is malformed, it should error
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
|
||||
### Rollout on multiple deployments recursively
|
||||
# Pre-condition: no deployments exist
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
# Create deployments (revision 1) recursively from directory of YAML files
|
||||
! kubectl create -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx0-deployment:nginx1-deployment:'
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:"
|
||||
## Rollback the deployments to revision 1 recursively
|
||||
output_message=$(! kubectl rollout undo -f hack/testdata/recursive/deployment --recursive --to-revision=1 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: nginx0 & nginx1 should be a no-op, and since nginx2 is malformed, it should error
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:"
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
## Pause the deployments recursively
|
||||
PRESERVE_ERR_FILE=true
|
||||
kubectl-with-retry rollout pause -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
|
||||
output_message=$(cat ${ERROR_FILE})
|
||||
# Post-condition: nginx0 & nginx1 should both have paused set to true, and since nginx2 is malformed, it should error
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "true:true:"
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
## Resume the deployments recursively
|
||||
kubectl-with-retry rollout resume -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
|
||||
output_message=$(cat ${ERROR_FILE})
|
||||
# Post-condition: nginx0 & nginx1 should both have paused set to nothing, and since nginx2 is malformed, it should error
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "<no value>:<no value>:"
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
## Retrieve the rollout history of the deployments recursively
|
||||
output_message=$(! kubectl rollout history -f hack/testdata/recursive/deployment --recursive 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: nginx0 & nginx1 should both have a history, and since nginx2 is malformed, it should error
|
||||
kube::test::if_has_string "${output_message}" "nginx0-deployment"
|
||||
kube::test::if_has_string "${output_message}" "nginx1-deployment"
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
# Clean up
|
||||
unset PRESERVE_ERR_FILE
|
||||
rm "${ERROR_FILE}"
|
||||
! kubectl delete -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" --grace-period=0 --force
|
||||
sleep 1
|
||||
|
||||
### Rollout on multiple replication controllers recursively - these tests ensure that rollouts cannot be performed on resources that don't support it
|
||||
# Pre-condition: no replication controller exists
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
# Create replication controllers recursively from directory of YAML files
|
||||
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}"
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
|
||||
# Command
|
||||
## Attempt to rollback the replication controllers to revision 1 recursively
|
||||
output_message=$(! kubectl rollout undo -f hack/testdata/recursive/rc --recursive --to-revision=1 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
|
||||
kube::test::if_has_string "${output_message}" 'no rollbacker has been implemented for "ReplicationController"'
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
## Attempt to pause the replication controllers recursively
|
||||
output_message=$(! kubectl rollout pause -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" pausing is not supported'
|
||||
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox1" pausing is not supported'
|
||||
## Attempt to resume the replication controllers recursively
|
||||
output_message=$(! kubectl rollout resume -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
|
||||
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
|
||||
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" resuming is not supported'
|
||||
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" resuming is not supported'
|
||||
# Clean up
|
||||
! kubectl delete -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" --grace-period=0 --force
|
||||
sleep 1
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_lists_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl(v1:lists)"
|
||||
|
||||
### Create a List with objects from multiple versions
|
||||
# Command
|
||||
kubectl create -f hack/testdata/list.yaml "${kube_flags[@]}"
|
||||
|
||||
### Delete the List with objects from multiple versions
|
||||
# Command
|
||||
kubectl delete service/list-service-test deployment/list-deployment-test
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
362
vendor/k8s.io/kubernetes/test/cmd/get.sh
generated
vendored
Executable file
362
vendor/k8s.io/kubernetes/test/cmd/get.sh
generated
vendored
Executable file
@ -0,0 +1,362 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_kubectl_get_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl get"
|
||||
### Test retrieval of non-existing pods
|
||||
# Pre-condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: POD abc should error since it doesn't exist
|
||||
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
|
||||
|
||||
### Test retrieval of non-existing POD with output flag specified
|
||||
# Pre-condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o name)
|
||||
# Post-condition: POD abc should error since it doesn't exist
|
||||
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
|
||||
|
||||
### Test retrieval of pods when none exist with non-human readable output format flag specified
|
||||
# Pre-condition: no pods exist
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o json)
|
||||
# Post-condition: The text "No resources found" should not be part of the output
|
||||
kube::test::if_has_not_string "${output_message}" 'No resources found'
|
||||
# Command
|
||||
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o yaml)
|
||||
# Post-condition: The text "No resources found" should not be part of the output
|
||||
kube::test::if_has_not_string "${output_message}" 'No resources found'
|
||||
# Command
|
||||
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o name)
|
||||
# Post-condition: The text "No resources found" should not be part of the output
|
||||
kube::test::if_has_not_string "${output_message}" 'No resources found'
|
||||
# Command
|
||||
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o jsonpath='{.items}')
|
||||
# Post-condition: The text "No resources found" should not be part of the output
|
||||
kube::test::if_has_not_string "${output_message}" 'No resources found'
|
||||
# Command
|
||||
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o go-template='{{.items}}')
|
||||
# Post-condition: The text "No resources found" should not be part of the output
|
||||
kube::test::if_has_not_string "${output_message}" 'No resources found'
|
||||
# Command
|
||||
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o custom-columns=NAME:.metadata.name)
|
||||
# Post-condition: The text "No resources found" should not be part of the output
|
||||
kube::test::if_has_not_string "${output_message}" 'No resources found'
|
||||
|
||||
### Test retrieval of pods when none exist, with human-readable output format flag specified
|
||||
# Pre-condition: no pods exist
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
output_message=$(! kubectl get foobar 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "No resources found" should not be part of the output when an error occurs
|
||||
kube::test::if_has_not_string "${output_message}" 'No resources found'
|
||||
# Command
|
||||
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "No resources found" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'No resources found'
|
||||
# Command
|
||||
output_message=$(kubectl get pods --ignore-not-found 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "No resources found" should not be part of the output
|
||||
kube::test::if_has_not_string "${output_message}" 'No resources found'
|
||||
# Command
|
||||
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o wide)
|
||||
# Post-condition: The text "No resources found" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'No resources found'
|
||||
|
||||
### Test retrieval of non-existing POD with json output flag specified
|
||||
# Pre-condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o json)
|
||||
# Post-condition: POD abc should error since it doesn't exist
|
||||
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
|
||||
# Post-condition: make sure we don't display an empty List
|
||||
if kube::test::if_has_string "${output_message}" 'List'; then
|
||||
echo 'Unexpected List output'
|
||||
echo "${LINENO} $(basename $0)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
### Test kubectl get all
|
||||
output_message=$(kubectl --v=6 --namespace default get all --chunk-size=0 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: Check if we get 200 OK from all the url(s)
|
||||
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/pods 200 OK"
|
||||
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/replicationcontrollers 200 OK"
|
||||
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/services 200 OK"
|
||||
kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/daemonsets 200 OK"
|
||||
kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/deployments 200 OK"
|
||||
kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/replicasets 200 OK"
|
||||
kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/statefulsets 200 OK"
|
||||
kube::test::if_has_string "${output_message}" "/apis/autoscaling/v1/namespaces/default/horizontalpodautoscalers 200"
|
||||
kube::test::if_has_string "${output_message}" "/apis/batch/v1/namespaces/default/jobs 200 OK"
|
||||
kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/daemonsets 200 OK"
|
||||
kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/deployments 200 OK"
|
||||
kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/replicasets 200 OK"
|
||||
|
||||
### Test kubectl get chunk size
|
||||
output_message=$(kubectl --v=6 get clusterrole --chunk-size=10 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: Check if we get a limit and continue
|
||||
kube::test::if_has_string "${output_message}" "/clusterroles?limit=10 200 OK"
|
||||
kube::test::if_has_string "${output_message}" "/v1/clusterroles?continue="
|
||||
|
||||
### Test kubectl get chunk size defaults to 500
|
||||
output_message=$(kubectl --v=6 get clusterrole 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: Check if we get a limit and continue
|
||||
kube::test::if_has_string "${output_message}" "/clusterroles?limit=500 200 OK"
|
||||
|
||||
### Test kubectl get chunk size does not result in a --watch error when resource list is served in multiple chunks
|
||||
# Pre-condition: ConfigMap one two tree does not exist
|
||||
kube::test::get_object_assert 'configmaps' '{{range.items}}{{ if eq $id_field \"one\" }}found{{end}}{{end}}:' ':'
|
||||
kube::test::get_object_assert 'configmaps' '{{range.items}}{{ if eq $id_field \"two\" }}found{{end}}{{end}}:' ':'
|
||||
kube::test::get_object_assert 'configmaps' '{{range.items}}{{ if eq $id_field \"three\" }}found{{end}}{{end}}:' ':'
|
||||
|
||||
# Post-condition: Create three configmaps and ensure that we can --watch them with a --chunk-size of 1
|
||||
kubectl create cm one "${kube_flags[@]}"
|
||||
kubectl create cm two "${kube_flags[@]}"
|
||||
kubectl create cm three "${kube_flags[@]}"
|
||||
output_message=$(kubectl get configmap --chunk-size=1 --watch --request-timeout=1s 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_not_string "${output_message}" "watch is only supported on individual resources"
|
||||
output_message=$(kubectl get configmap --chunk-size=1 --watch-only --request-timeout=1s 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_not_string "${output_message}" "watch is only supported on individual resources"
|
||||
|
||||
### Test --allow-missing-template-keys
|
||||
# Pre-condition: no POD exists
|
||||
create_and_use_new_namespace
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
|
||||
# Post-condition: valid-pod POD is created
|
||||
kubectl get "${kube_flags[@]}" pods -o json
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
||||
|
||||
## check --allow-missing-template-keys defaults to true for jsonpath templates
|
||||
kubectl get "${kube_flags[@]}" pod valid-pod -o jsonpath='{.missing}'
|
||||
|
||||
## check --allow-missing-template-keys defaults to true for go templates
|
||||
kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{.missing}}'
|
||||
|
||||
## check --template flag causes go-template to be printed, even when no --output value is provided
|
||||
output_message=$(kubectl get "${kube_flags[@]}" pod valid-pod --template="{{$id_field}}:")
|
||||
kube::test::if_has_string "${output_message}" 'valid-pod:'
|
||||
|
||||
## check --allow-missing-template-keys=false results in an error for a missing key with jsonpath
|
||||
output_message=$(! kubectl get pod valid-pod --allow-missing-template-keys=false -o jsonpath='{.missing}' 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'missing is not found'
|
||||
|
||||
## check --allow-missing-template-keys=false results in an error for a missing key with go
|
||||
output_message=$(! kubectl get pod valid-pod --allow-missing-template-keys=false -o go-template='{{.missing}}' "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'map has no entry for key "missing"'
|
||||
|
||||
### Test kubectl get watch
|
||||
output_message=$(kubectl get pods -w --request-timeout=1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'STATUS' # headers
|
||||
kube::test::if_has_string "${output_message}" 'valid-pod' # pod details
|
||||
output_message=$(kubectl get pods/valid-pod -o name -w --request-timeout=1 "${kube_flags[@]}")
|
||||
kube::test::if_has_not_string "${output_message}" 'STATUS' # no headers
|
||||
kube::test::if_has_string "${output_message}" 'pod/valid-pod' # resource name
|
||||
output_message=$(kubectl get pods/valid-pod -o yaml -w --request-timeout=1 "${kube_flags[@]}")
|
||||
kube::test::if_has_not_string "${output_message}" 'STATUS' # no headers
|
||||
kube::test::if_has_string "${output_message}" 'name: valid-pod' # yaml
|
||||
output_message=$(! kubectl get pods/invalid-pod -w --request-timeout=1 "${kube_flags[@]}" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" '"invalid-pod" not found'
|
||||
|
||||
# cleanup
|
||||
kubectl delete pods valid-pod "${kube_flags[@]}"
|
||||
|
||||
### Test 'kubectl get -f <file> -o <non default printer>' prints all the items in the file's list
|
||||
# Pre-condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
|
||||
# Post-condition: PODs redis-master and valid-pod exist
|
||||
|
||||
# Check that all items in the list are printed
|
||||
output_message=$(kubectl get -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml -o jsonpath="{..metadata.name}" "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" "redis-master valid-pod"
|
||||
|
||||
# cleanup
|
||||
kubectl delete pods redis-master valid-pod "${kube_flags[@]}"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_retrieve_multiple_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
# switch back to the default namespace
|
||||
kubectl config set-context "${CONTEXT}" --namespace=""
|
||||
kube::log::status "Testing kubectl(v1:multiget)"
|
||||
kube::test::get_object_assert 'nodes/127.0.0.1 service/kubernetes' "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:kubernetes:'
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_kubectl_sort_by_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing kubectl --sort-by"
|
||||
|
||||
### sort-by should not panic if no pod exists
|
||||
# Pre-condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl get pods --sort-by="{metadata.name}"
|
||||
kubectl get pods --sort-by="{metadata.creationTimestamp}"
|
||||
|
||||
### sort-by should works if pod exists
|
||||
# Create POD
|
||||
# Pre-condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
|
||||
# Post-condition: valid-pod is created
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
||||
# Check output of sort-by
|
||||
output_message=$(kubectl get pods --sort-by="{metadata.name}")
|
||||
kube::test::if_has_string "${output_message}" "valid-pod"
|
||||
# ensure sort-by receivers objects as Table
|
||||
output_message=$(kubectl get pods --v=8 --sort-by="{metadata.name}" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" "as=Table"
|
||||
# ensure sort-by requests the full object
|
||||
kube::test::if_has_string "${output_message}" "includeObject=Object"
|
||||
### Clean up
|
||||
# Pre-condition: valid-pod exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
||||
# Command
|
||||
kubectl delete "${kube_flags[@]}" pod valid-pod --grace-period=0 --force
|
||||
# Post-condition: valid-pod doesn't exist
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
### sort-by should works by sorting by name
|
||||
# Create three PODs
|
||||
# Pre-condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create "${kube_flags[@]}" -f hack/testdata/sorted-pods/sorted-pod1.yaml
|
||||
# Post-condition: sorted-pod1 is created
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:'
|
||||
# Command
|
||||
kubectl create "${kube_flags[@]}" -f hack/testdata/sorted-pods/sorted-pod2.yaml
|
||||
# Post-condition: sorted-pod1 is created
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:'
|
||||
# Command
|
||||
kubectl create "${kube_flags[@]}" -f hack/testdata/sorted-pods/sorted-pod3.yaml
|
||||
# Post-condition: sorted-pod1 is created
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:sorted-pod3:'
|
||||
|
||||
# Check output of sort-by '{metadata.name}'
|
||||
output_message=$(kubectl get pods --sort-by="{metadata.name}")
|
||||
kube::test::if_sort_by_has_correct_order "${output_message}" "sorted-pod1:sorted-pod2:sorted-pod3:"
|
||||
|
||||
# Check output of sort-by '{metadata.labels.name}'
|
||||
output_message=$(kubectl get pods --sort-by="{metadata.labels.name}")
|
||||
kube::test::if_sort_by_has_correct_order "${output_message}" "sorted-pod3:sorted-pod2:sorted-pod1:"
|
||||
|
||||
# if sorting, we should be able to use any field in our objects
|
||||
output_message=$(kubectl get pods --sort-by="{spec.containers[0].name}")
|
||||
kube::test::if_sort_by_has_correct_order "${output_message}" "sorted-pod2:sorted-pod1:sorted-pod3:"
|
||||
|
||||
# ensure sorting by creation timestamps works
|
||||
output_message=$(kubectl get pods --sort-by="{metadata.creationTimestamp}")
|
||||
kube::test::if_sort_by_has_correct_order "${output_message}" "sorted-pod1:sorted-pod2:sorted-pod3:"
|
||||
|
||||
# ensure sorting using fallback codepath still works
|
||||
output_message=$(kubectl get pods --sort-by="{spec.containers[0].name}" --server-print=false --v=8 2>&1)
|
||||
kube::test::if_sort_by_has_correct_order "${output_message}" "sorted-pod2:sorted-pod1:sorted-pod3:"
|
||||
kube::test::if_has_not_string "${output_message}" "Table"
|
||||
|
||||
### Clean up
|
||||
# Pre-condition: valid-pod exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:sorted-pod3:'
|
||||
# Command
|
||||
kubectl delete "${kube_flags[@]}" pod --grace-period=0 --force --all
|
||||
# Post-condition: valid-pod doesn't exist
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_kubectl_all_namespace_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing kubectl --all-namespace"
|
||||
|
||||
# Pre-condition: the "default" namespace exists
|
||||
kube::test::get_object_assert namespaces "{{range.items}}{{if eq $id_field \\\"default\\\"}}{{$id_field}}:{{end}}{{end}}" 'default:'
|
||||
|
||||
### Create POD
|
||||
# Pre-condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
|
||||
# Post-condition: valid-pod is created
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
||||
|
||||
### Verify a specific namespace is ignored when all-namespaces is provided
|
||||
# Command
|
||||
kubectl get pods --all-namespaces --namespace=default
|
||||
|
||||
### Check --all-namespaces option shows namespaces
|
||||
# Create objects in multiple namespaces
|
||||
kubectl create "${kube_flags[@]}" namespace all-ns-test-1
|
||||
kubectl create "${kube_flags[@]}" serviceaccount test -n all-ns-test-1
|
||||
kubectl create "${kube_flags[@]}" namespace all-ns-test-2
|
||||
kubectl create "${kube_flags[@]}" serviceaccount test -n all-ns-test-2
|
||||
# Ensure listing across namespaces displays the namespace
|
||||
output_message=$(kubectl get serviceaccounts --all-namespaces "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" "all-ns-test-1"
|
||||
kube::test::if_has_string "${output_message}" "all-ns-test-2"
|
||||
# Clean up
|
||||
kubectl delete "${kube_flags[@]}" namespace all-ns-test-1
|
||||
kubectl delete "${kube_flags[@]}" namespace all-ns-test-2
|
||||
|
||||
### Clean up
|
||||
# Pre-condition: valid-pod exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
||||
# Command
|
||||
kubectl delete "${kube_flags[@]}" pod valid-pod --grace-period=0 --force
|
||||
# Post-condition: valid-pod doesn't exist
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
### Verify flag all-namespaces is ignored for rootScoped resources
|
||||
# Pre-condition: node exists
|
||||
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
||||
# Command
|
||||
output_message=$(kubectl get nodes --all-namespaces 2>&1)
|
||||
# Post-condition: output with no NAMESPACE field
|
||||
kube::test::if_has_not_string "${output_message}" "NAMESPACE"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
285
vendor/k8s.io/kubernetes/test/cmd/initializers.sh
generated
vendored
Executable file
285
vendor/k8s.io/kubernetes/test/cmd/initializers.sh
generated
vendored
Executable file
@ -0,0 +1,285 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_initializer_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing --include-uninitialized"
|
||||
|
||||
### Create a deployment
|
||||
kubectl create --request-timeout=1 -f hack/testdata/initializer-deployments.yaml 2>&1 "${kube_flags[@]}" || true
|
||||
|
||||
### Test kubectl get --include-uninitialized
|
||||
# Command
|
||||
output_message=$(kubectl get deployments 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "No resources found" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'No resources found'
|
||||
# Command
|
||||
output_message=$(kubectl get deployments --include-uninitialized=false 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "No resources found" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'No resources found'
|
||||
# Command
|
||||
output_message=$(kubectl get deployments --include-uninitialized 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: I assume "web" is the deployment name
|
||||
kube::test::if_has_string "${output_message}" 'web'
|
||||
# Command
|
||||
output_message=$(kubectl get deployments web 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: I assume "web" is the deployment name
|
||||
kube::test::if_has_string "${output_message}" 'web'
|
||||
# Post-condition: The text "No resources found" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'No resources found'
|
||||
|
||||
### Test kubectl describe --include-uninitialized
|
||||
# Command
|
||||
output_message=$(kubectl describe deployments 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "run=web" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'run=web'
|
||||
# Command
|
||||
output_message=$(kubectl describe deployments --include-uninitialized 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "run=web" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'run=web'
|
||||
# Command
|
||||
output_message=$(kubectl describe deployments --include-uninitialized=false 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The output should be empty
|
||||
kube::test::if_empty_string "${output_message}"
|
||||
# Command
|
||||
output_message=$(kubectl describe deployments web --include-uninitialized 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "run=web" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'run=web'
|
||||
# Command
|
||||
output_message=$(kubectl describe deployments web --include-uninitialized=false 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "run=web" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'run=web'
|
||||
|
||||
### Test kubectl label --include-uninitialized
|
||||
# Command
|
||||
output_message=$(kubectl label deployments labelkey1=labelvalue1 --all 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: web is labelled
|
||||
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
|
||||
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey1}}" 'labelvalue1'
|
||||
# Command
|
||||
output_message=$(kubectl label deployments labelkey2=labelvalue2 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The output should be empty
|
||||
kube::test::if_empty_string "${output_message}"
|
||||
# Command
|
||||
output_message=$(kubectl label deployments labelkey3=labelvalue3 -l run=web 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The output should be empty
|
||||
kube::test::if_empty_string "${output_message}"
|
||||
# Command
|
||||
output_message=$(kubectl label deployments labelkey4=labelvalue4 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: web is labelled
|
||||
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
|
||||
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey4}}" 'labelvalue4'
|
||||
# Command
|
||||
output_message=$(kubectl label deployments labelkey5=labelvalue5 -l run=web --all 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The output should be empty
|
||||
kube::test::if_empty_string "${output_message}"
|
||||
# Command
|
||||
output_message=$(kubectl label deployments labelkey6=labelvalue6 -l run=web --all --include-uninitialized 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: web is labelled
|
||||
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
|
||||
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey6}}" 'labelvalue6'
|
||||
# Command
|
||||
output_message=$(kubectl label deployments web labelkey7=labelvalue7 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: web is labelled
|
||||
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
|
||||
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey7}}" 'labelvalue7'
|
||||
# Found All Labels
|
||||
kube::test::get_object_assert 'deployments web' "{{${labels_field}}}" 'map[labelkey1:labelvalue1 labelkey4:labelvalue4 labelkey6:labelvalue6 labelkey7:labelvalue7 run:web]'
|
||||
|
||||
### Test kubectl annotate --include-uninitialized
|
||||
# Command
|
||||
output_message=$(kubectl annotate deployments annotatekey1=annotatevalue1 --all 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: DEPLOYMENT has annotation
|
||||
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
|
||||
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey1}}" 'annotatevalue1'
|
||||
# Command
|
||||
output_message=$(kubectl annotate deployments annotatekey2=annotatevalue2 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The output should be empty
|
||||
kube::test::if_empty_string "${output_message}"
|
||||
# Command
|
||||
output_message=$(kubectl annotate deployments annotatekey3=annotatevalue3 -l run=web 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The output should be empty
|
||||
kube::test::if_empty_string "${output_message}"
|
||||
# Command
|
||||
output_message=$(kubectl annotate deployments annotatekey4=annotatevalue4 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: DEPLOYMENT has annotation
|
||||
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
|
||||
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey4}}" 'annotatevalue4'
|
||||
# Command
|
||||
output_message=$(kubectl annotate deployments annotatekey5=annotatevalue5 -l run=web --all 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The output should be empty
|
||||
kube::test::if_empty_string "${output_message}"
|
||||
# Command
|
||||
output_message=$(kubectl annotate deployments annotatekey6=annotatevalue6 -l run=web --all --include-uninitialized 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: DEPLOYMENT has annotation
|
||||
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
|
||||
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey6}}" 'annotatevalue6'
|
||||
# Command
|
||||
output_message=$(kubectl annotate deployments web annotatekey7=annotatevalue7 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: web DEPLOYMENT has annotation
|
||||
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
|
||||
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey7}}" 'annotatevalue7'
|
||||
|
||||
### Test kubectl edit --include-uninitialized
|
||||
[ "$(EDITOR=cat kubectl edit deployments 2>&1 "${kube_flags[@]}" | grep 'edit cancelled, no objects found')" ]
|
||||
[ "$(EDITOR=cat kubectl edit deployments --include-uninitialized 2>&1 "${kube_flags[@]}" | grep 'Edit cancelled, no changes made.')" ]
|
||||
|
||||
### Test kubectl set image --include-uninitialized
|
||||
# Command
|
||||
output_message=$(kubectl set image deployments *=nginx:1.11 --all 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "image updated" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'image updated'
|
||||
# Command
|
||||
output_message=$(kubectl set image deployments *=nginx:1.11 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The output should be empty
|
||||
kube::test::if_empty_string "${output_message}"
|
||||
# Command
|
||||
output_message=$(kubectl set image deployments *=nginx:1.11 -l run=web 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The output should be empty
|
||||
kube::test::if_empty_string "${output_message}"
|
||||
# Command
|
||||
output_message=$(kubectl set image deployments *=nginx:1.12 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "image updated" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'image updated'
|
||||
# Command
|
||||
output_message=$(kubectl set image deployments *=nginx:1.13 -l run=web --include-uninitialized --all 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "image updated" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'image updated'
|
||||
|
||||
### Test kubectl set resources --include-uninitialized
|
||||
# Command
|
||||
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi --all 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "resource requirements updated" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'resource requirements updated'
|
||||
# Command
|
||||
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The output should be empty
|
||||
kube::test::if_empty_string "${output_message}"
|
||||
# Command
|
||||
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi -l run=web 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The output should be empty
|
||||
kube::test::if_empty_string "${output_message}"
|
||||
# Command
|
||||
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=200m,memory=256Mi -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "resource requirements updated" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'resource requirements updated'
|
||||
# Command
|
||||
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=512Mi -l run=web --include-uninitialized --all 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "resource requirements updated" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'resource requirements updated'
|
||||
|
||||
### Test kubectl set selector --include-uninitialized
|
||||
# Create a service with initializer
|
||||
kubectl create --request-timeout=1 -f hack/testdata/initializer-redis-master-service.yaml 2>&1 "${kube_flags[@]}" || true
|
||||
# Command
|
||||
output_message=$(kubectl set selector services role=padawan --all 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "selector updated" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'selector updated'
|
||||
# Command
|
||||
output_message=$(kubectl set selector services role=padawan --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The output should be empty
|
||||
kube::test::if_empty_string "${output_message}"
|
||||
|
||||
### Test kubectl set subject --include-uninitialized
|
||||
# Create a create clusterrolebinding with initializer
|
||||
kubectl create --request-timeout=1 -f hack/testdata/initializer-clusterrolebinding.yaml 2>&1 "${kube_flags[@]}" || true
|
||||
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
|
||||
# Command
|
||||
output_message=$(kubectl set subject clusterrolebinding --user=foo --all 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "subjects updated" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'subjects updated'
|
||||
# Command
|
||||
output_message=$(kubectl set subject clusterrolebinding --user=foo --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The output should be empty
|
||||
kube::test::if_empty_string "${output_message}"
|
||||
# Command
|
||||
output_message=$(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The output should be empty
|
||||
kube::test::if_empty_string "${output_message}"
|
||||
# Command
|
||||
output_message=$(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super --include-uninitialized 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "subjects updated" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'subjects updated'
|
||||
# Command
|
||||
output_message=$(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super --include-uninitialized --all 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "subjects updated" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'subjects updated'
|
||||
|
||||
### Test kubectl set serviceaccount --include-uninitialized
|
||||
# Command
|
||||
output_message=$(kubectl set serviceaccount deployment serviceaccount1 --all 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "serviceaccount updated" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'serviceaccount updated'
|
||||
# Command
|
||||
output_message=$(kubectl set serviceaccount deployment serviceaccount1 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The output should be empty
|
||||
kube::test::if_empty_string "${output_message}"
|
||||
|
||||
### Test kubectl delete --include-uninitialized
|
||||
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
|
||||
# Command
|
||||
output_message=$(kubectl delete clusterrolebinding --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "No resources found" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'No resources found'
|
||||
# Command
|
||||
output_message=$(kubectl delete clusterrolebinding --all 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: The text "deleted" should be part of the output
|
||||
kube::test::if_has_string "${output_message}" 'deleted'
|
||||
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
### Test kubectl apply --include-uninitialized
|
||||
# Pre-Condition: no POD exists
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# apply pod a
|
||||
kubectl apply --prune --request-timeout=20 --include-uninitialized=false --all -f hack/testdata/prune/a.yaml "${kube_flags[@]}" 2>&1
|
||||
# check right pod exists
|
||||
kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
|
||||
# Post-condition: Other uninitialized resources should not be pruned
|
||||
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" 'web'
|
||||
kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
|
||||
# cleanup
|
||||
kubectl delete pod a
|
||||
# apply pod a and prune uninitialized deployments web
|
||||
kubectl apply --prune --request-timeout=20 --all -f hack/testdata/prune/a.yaml "${kube_flags[@]}" 2>&1
|
||||
# check right pod exists
|
||||
kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
|
||||
# Post-condition: Other uninitialized resources should not be pruned
|
||||
kube::test::get_object_assert deployments/web "{{range.items}}{{$id_field}}:{{end}}" 'web'
|
||||
kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
|
||||
# cleanup
|
||||
kubectl delete pod a
|
||||
# apply pod a and prune uninitialized deployments web
|
||||
kubectl apply --prune --request-timeout=20 --include-uninitialized --all -f hack/testdata/prune/a.yaml "${kube_flags[@]}" 2>&1
|
||||
# check right pod exists
|
||||
kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
|
||||
# Post-condition: Other uninitialized resources should not be pruned
|
||||
kube::test::get_object_assert deployments/web "{{range.items}}{{$id_field}}:{{end}}" 'web'
|
||||
kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
|
||||
# cleanup
|
||||
kubectl delete pod a
|
||||
kubectl delete --request-timeout=1 deploy web
|
||||
kubectl delete --request-timeout=1 service redis-master
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
96
vendor/k8s.io/kubernetes/test/cmd/kubeconfig.sh
generated
vendored
Executable file
96
vendor/k8s.io/kubernetes/test/cmd/kubeconfig.sh
generated
vendored
Executable file
@ -0,0 +1,96 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_kubectl_config_set_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl(v1:config set)"
|
||||
|
||||
kubectl config set-cluster test-cluster --server="https://does-not-work"
|
||||
|
||||
# Get the api cert and add a comment to avoid flag parsing problems
|
||||
cert_data=$(echo "#Comment" && cat "${TMPDIR:-/tmp}/apiserver.crt")
|
||||
|
||||
kubectl config set clusters.test-cluster.certificate-authority-data "$cert_data" --set-raw-bytes
|
||||
r_written=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
|
||||
|
||||
encoded=$(echo -n "$cert_data" | base64)
|
||||
kubectl config set clusters.test-cluster.certificate-authority-data "$encoded"
|
||||
e_written=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
|
||||
|
||||
test "$e_written" == "$r_written"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_client_config_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing client config"
|
||||
|
||||
# Command
|
||||
# Pre-condition: kubeconfig "missing" is not a file or directory
|
||||
output_message=$(! kubectl get pod --context="" --kubeconfig=missing 2>&1)
|
||||
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
|
||||
|
||||
# Pre-condition: kubeconfig "missing" is not a file or directory
|
||||
# Command
|
||||
output_message=$(! kubectl get pod --user="" --kubeconfig=missing 2>&1)
|
||||
# Post-condition: --user contains a valid / empty value, missing config file returns error
|
||||
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
|
||||
# Command
|
||||
output_message=$(! kubectl get pod --cluster="" --kubeconfig=missing 2>&1)
|
||||
# Post-condition: --cluster contains a "valid" value, missing config file returns error
|
||||
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
|
||||
|
||||
# Pre-condition: context "missing-context" does not exist
|
||||
# Command
|
||||
output_message=$(! kubectl get pod --context="missing-context" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'context was not found for specified context: missing-context'
|
||||
# Post-condition: invalid or missing context returns error
|
||||
|
||||
# Pre-condition: cluster "missing-cluster" does not exist
|
||||
# Command
|
||||
output_message=$(! kubectl get pod --cluster="missing-cluster" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'no server found for cluster "missing-cluster"'
|
||||
# Post-condition: invalid or missing cluster returns error
|
||||
|
||||
# Pre-condition: user "missing-user" does not exist
|
||||
# Command
|
||||
output_message=$(! kubectl get pod --user="missing-user" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'auth info "missing-user" does not exist'
|
||||
# Post-condition: invalid or missing user returns error
|
||||
|
||||
# test invalid config
|
||||
kubectl config view | sed -E "s/apiVersion: .*/apiVersion: v-1/g" > "${TMPDIR:-/tmp}"/newconfig.yaml
|
||||
output_message=$(! "${KUBE_OUTPUT_HOSTBIN}/kubectl" get pods --context="" --user="" --kubeconfig="${TMPDIR:-/tmp}"/newconfig.yaml 2>&1)
|
||||
kube::test::if_has_string "${output_message}" "Error loading config file"
|
||||
|
||||
output_message=$(! kubectl get pod --kubeconfig=missing-config 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'no such file or directory'
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
841
vendor/k8s.io/kubernetes/test/cmd/legacy-script.sh
generated
vendored
Executable file
841
vendor/k8s.io/kubernetes/test/cmd/legacy-script.sh
generated
vendored
Executable file
@ -0,0 +1,841 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This contains util code for testing kubectl.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# Set locale to ensure english responses from kubectl commands
|
||||
export LANG=C
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
# Expects the following has already been done by whatever sources this script
|
||||
# source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
# source "${KUBE_ROOT}/hack/lib/test.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/apply.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/apps.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/authorization.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/batch.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/certificate.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/core.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/crd.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/create.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/diff.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/discovery.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/generic-resources.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/get.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/initializers.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/kubeconfig.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/node-management.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/old-print.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/plugins.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/proxy.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/rbac.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/request-timeout.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/run.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/save-config.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/storage.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/template-output.sh"
|
||||
source "${KUBE_ROOT}/test/cmd/version.sh"
|
||||
|
||||
|
||||
ETCD_HOST=${ETCD_HOST:-127.0.0.1}
|
||||
ETCD_PORT=${ETCD_PORT:-2379}
|
||||
API_PORT=${API_PORT:-8080}
|
||||
SECURE_API_PORT=${SECURE_API_PORT:-6443}
|
||||
API_HOST=${API_HOST:-127.0.0.1}
|
||||
KUBELET_HEALTHZ_PORT=${KUBELET_HEALTHZ_PORT:-10248}
|
||||
CTLRMGR_PORT=${CTLRMGR_PORT:-10252}
|
||||
PROXY_HOST=127.0.0.1 # kubectl only serves on localhost.
|
||||
|
||||
IMAGE_NGINX="k8s.gcr.io/nginx:1.7.9"
|
||||
IMAGE_DEPLOYMENT_R1="k8s.gcr.io/nginx:test-cmd" # deployment-revision1.yaml
|
||||
IMAGE_DEPLOYMENT_R2="$IMAGE_NGINX" # deployment-revision2.yaml
|
||||
IMAGE_PERL="k8s.gcr.io/perl"
|
||||
IMAGE_PAUSE_V2="k8s.gcr.io/pause:2.0"
|
||||
IMAGE_DAEMONSET_R2="k8s.gcr.io/pause:latest"
|
||||
IMAGE_DAEMONSET_R2_2="k8s.gcr.io/nginx:test-cmd" # rollingupdate-daemonset-rv2.yaml
|
||||
IMAGE_STATEFULSET_R1="k8s.gcr.io/nginx-slim:0.7"
|
||||
IMAGE_STATEFULSET_R2="k8s.gcr.io/nginx-slim:0.8"
|
||||
|
||||
# Expose kubectl directly for readability
|
||||
PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH
|
||||
|
||||
# Define variables for resource types to prevent typos.
|
||||
clusterroles="clusterroles"
|
||||
configmaps="configmaps"
|
||||
csr="csr"
|
||||
deployments="deployments"
|
||||
horizontalpodautoscalers="horizontalpodautoscalers"
|
||||
metrics="metrics"
|
||||
namespaces="namespaces"
|
||||
nodes="nodes"
|
||||
persistentvolumeclaims="persistentvolumeclaims"
|
||||
persistentvolumes="persistentvolumes"
|
||||
pods="pods"
|
||||
podtemplates="podtemplates"
|
||||
replicasets="replicasets"
|
||||
replicationcontrollers="replicationcontrollers"
|
||||
roles="roles"
|
||||
secrets="secrets"
|
||||
serviceaccounts="serviceaccounts"
|
||||
services="services"
|
||||
statefulsets="statefulsets"
|
||||
static="static"
|
||||
storageclass="storageclass"
|
||||
subjectaccessreviews="subjectaccessreviews"
|
||||
selfsubjectaccessreviews="selfsubjectaccessreviews"
|
||||
customresourcedefinitions="customresourcedefinitions"
|
||||
daemonsets="daemonsets"
|
||||
controllerrevisions="controllerrevisions"
|
||||
job="jobs"
|
||||
|
||||
|
||||
# include shell2junit library
|
||||
sh2ju="${KUBE_ROOT}/third_party/forked/shell2junit/sh2ju.sh"
|
||||
if [[ -f "${sh2ju}" ]]; then
|
||||
source "${sh2ju}"
|
||||
else
|
||||
echo "failed to find third_party/forked/shell2junit/sh2ju.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# record_command runs the command and records its output/error messages in junit format
|
||||
# it expects the first to be the name of the command
|
||||
# Example:
|
||||
# record_command run_kubectl_tests
|
||||
#
|
||||
# WARNING: Variable changes in the command will NOT be effective after record_command returns.
|
||||
# This is because the command runs in subshell.
|
||||
function record_command() {
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
|
||||
local name="$1"
|
||||
local output="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}"
|
||||
echo "Recording: ${name}"
|
||||
echo "Running command: $@"
|
||||
juLog -output="${output}" -class="test-cmd" -name="${name}" "$@"
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Error when running ${name}"
|
||||
foundError="${foundError}""${name}"", "
|
||||
fi
|
||||
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
}
|
||||
|
||||
# Stops the running kubectl proxy, if there is one.
|
||||
function stop-proxy()
|
||||
{
|
||||
[[ -n "${PROXY_PORT-}" ]] && kube::log::status "Stopping proxy on port ${PROXY_PORT}"
|
||||
[[ -n "${PROXY_PID-}" ]] && kill "${PROXY_PID}" 1>&2 2>/dev/null
|
||||
[[ -n "${PROXY_PORT_FILE-}" ]] && rm -f ${PROXY_PORT_FILE}
|
||||
PROXY_PID=
|
||||
PROXY_PORT=
|
||||
PROXY_PORT_FILE=
|
||||
}
|
||||
|
||||
# Starts "kubect proxy" to test the client proxy. $1: api_prefix
|
||||
function start-proxy()
|
||||
{
|
||||
stop-proxy
|
||||
|
||||
PROXY_PORT_FILE=$(mktemp proxy-port.out.XXXXX)
|
||||
kube::log::status "Starting kubectl proxy on random port; output file in ${PROXY_PORT_FILE}; args: ${1-}"
|
||||
|
||||
|
||||
if [ $# -eq 0 ]; then
|
||||
kubectl proxy --port=0 --www=. 1>${PROXY_PORT_FILE} 2>&1 &
|
||||
else
|
||||
kubectl proxy --port=0 --www=. --api-prefix="$1" 1>${PROXY_PORT_FILE} 2>&1 &
|
||||
fi
|
||||
PROXY_PID=$!
|
||||
PROXY_PORT=
|
||||
|
||||
local attempts=0
|
||||
while [[ -z ${PROXY_PORT} ]]; do
|
||||
if (( ${attempts} > 9 )); then
|
||||
kill "${PROXY_PID}"
|
||||
kube::log::error_exit "Couldn't start proxy. Failed to read port after ${attempts} tries. Got: $(cat ${PROXY_PORT_FILE})"
|
||||
fi
|
||||
sleep .5
|
||||
kube::log::status "Attempt ${attempts} to read ${PROXY_PORT_FILE}..."
|
||||
PROXY_PORT=$(sed 's/.*Starting to serve on 127.0.0.1:\([0-9]*\)$/\1/'< ${PROXY_PORT_FILE})
|
||||
attempts=$((attempts+1))
|
||||
done
|
||||
|
||||
kube::log::status "kubectl proxy running on port ${PROXY_PORT}"
|
||||
|
||||
# We try checking kubectl proxy 30 times with 1s delays to avoid occasional
|
||||
# failures.
|
||||
if [ $# -eq 0 ]; then
|
||||
kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/healthz" "kubectl proxy"
|
||||
else
|
||||
kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/$1/healthz" "kubectl proxy --api-prefix=$1"
|
||||
fi
|
||||
}
|
||||
|
||||
function cleanup()
|
||||
{
|
||||
[[ -n "${APISERVER_PID-}" ]] && kill "${APISERVER_PID}" 1>&2 2>/dev/null
|
||||
[[ -n "${CTLRMGR_PID-}" ]] && kill "${CTLRMGR_PID}" 1>&2 2>/dev/null
|
||||
[[ -n "${KUBELET_PID-}" ]] && kill "${KUBELET_PID}" 1>&2 2>/dev/null
|
||||
stop-proxy
|
||||
|
||||
kube::etcd::cleanup
|
||||
rm -rf "${KUBE_TEMP}"
|
||||
|
||||
local junit_dir="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}"
|
||||
echo "junit report dir:" ${junit_dir}
|
||||
|
||||
kube::log::status "Clean up complete"
|
||||
}
|
||||
|
||||
# Executes curl against the proxy. $1 is the path to use, $2 is the desired
|
||||
# return code. Prints a helpful message on failure.
|
||||
function check-curl-proxy-code()
|
||||
{
|
||||
local status
|
||||
local -r address=$1
|
||||
local -r desired=$2
|
||||
local -r full_address="${PROXY_HOST}:${PROXY_PORT}${address}"
|
||||
status=$(curl -w "%{http_code}" --silent --output /dev/null "${full_address}")
|
||||
if [ "${status}" == "${desired}" ]; then
|
||||
return 0
|
||||
fi
|
||||
echo "For address ${full_address}, got ${status} but wanted ${desired}"
|
||||
return 1
|
||||
}
|
||||
|
||||
# TODO: Remove this function when we do the retry inside the kubectl commands. See #15333.
|
||||
function kubectl-with-retry()
|
||||
{
|
||||
ERROR_FILE="${KUBE_TEMP}/kubectl-error"
|
||||
preserve_err_file=${PRESERVE_ERR_FILE-false}
|
||||
for count in {0..3}; do
|
||||
kubectl "$@" 2> ${ERROR_FILE} || true
|
||||
if grep -q "the object has been modified" "${ERROR_FILE}"; then
|
||||
kube::log::status "retry $1, error: $(cat ${ERROR_FILE})"
|
||||
rm "${ERROR_FILE}"
|
||||
sleep $((2**count))
|
||||
else
|
||||
if [ "$preserve_err_file" != true ] ; then
|
||||
rm "${ERROR_FILE}"
|
||||
fi
|
||||
break
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Waits for the pods with the given label to match the list of names. Don't call
|
||||
# this function unless you know the exact pod names, or expect no pods.
|
||||
# $1: label to match
|
||||
# $2: list of pod names sorted by name
|
||||
# Example invocation:
|
||||
# wait-for-pods-with-label "app=foo" "nginx-0nginx-1"
|
||||
function wait-for-pods-with-label()
|
||||
{
|
||||
local i
|
||||
for i in $(seq 1 10); do
|
||||
kubeout=`kubectl get po -l $1 --output=go-template --template='{{range.items}}{{.metadata.name}}{{end}}' --sort-by metadata.name "${kube_flags[@]}"`
|
||||
if [[ $kubeout = $2 ]]; then
|
||||
return
|
||||
fi
|
||||
echo Waiting for pods: $2, found $kubeout
|
||||
sleep $i
|
||||
done
|
||||
kube::log::error_exit "Timeout waiting for pods with label $1"
|
||||
}
|
||||
|
||||
# Code to be run before running the tests.
|
||||
setup() {
|
||||
kube::util::trap_add cleanup EXIT SIGINT
|
||||
kube::util::ensure-temp-dir
|
||||
# ensure ~/.kube/config isn't loaded by tests
|
||||
HOME="${KUBE_TEMP}"
|
||||
|
||||
kube::etcd::start
|
||||
|
||||
# Find a standard sed instance for use with edit scripts
|
||||
kube::util::ensure-gnu-sed
|
||||
|
||||
kube::log::status "Building kubectl"
|
||||
make -C "${KUBE_ROOT}" WHAT="cmd/kubectl"
|
||||
|
||||
# Check kubectl
|
||||
kube::log::status "Running kubectl with no options"
|
||||
"${KUBE_OUTPUT_HOSTBIN}/kubectl"
|
||||
|
||||
# TODO: we need to note down the current default namespace and set back to this
|
||||
# namespace after the tests are done.
|
||||
kubectl config view
|
||||
CONTEXT="test"
|
||||
kubectl config set-context "${CONTEXT}"
|
||||
kubectl config use-context "${CONTEXT}"
|
||||
|
||||
kube::log::status "Setup complete"
|
||||
}
|
||||
|
||||
# Runs all kubectl tests.
|
||||
# Requires an env var SUPPORTED_RESOURCES which is a comma separated list of
|
||||
# resources for which tests should be run.
|
||||
runTests() {
|
||||
foundError=""
|
||||
|
||||
if [ -z "${SUPPORTED_RESOURCES:-}" ]; then
|
||||
echo "Need to set SUPPORTED_RESOURCES env var. It is a list of resources that are supported and hence should be tested. Set it to (*) to test all resources"
|
||||
exit 1
|
||||
fi
|
||||
kube::log::status "Checking kubectl version"
|
||||
kubectl version
|
||||
|
||||
# Generate a random namespace name, based on the current time (to make
|
||||
# debugging slightly easier) and a random number. Don't use `date +%N`
|
||||
# because that doesn't work on OSX.
|
||||
create_and_use_new_namespace() {
|
||||
local ns_name
|
||||
ns_name="namespace-$(date +%s)-${RANDOM}"
|
||||
kube::log::status "Creating namespace ${ns_name}"
|
||||
kubectl create namespace "${ns_name}"
|
||||
kubectl config set-context "${CONTEXT}" --namespace="${ns_name}"
|
||||
}
|
||||
|
||||
kube_flags=(
|
||||
-s "http://127.0.0.1:${API_PORT}"
|
||||
)
|
||||
|
||||
# token defined in hack/testdata/auth-tokens.csv
|
||||
kube_flags_with_token=(
|
||||
-s "https://127.0.0.1:${SECURE_API_PORT}" --token=admin-token --insecure-skip-tls-verify=true
|
||||
)
|
||||
|
||||
if [[ -z "${ALLOW_SKEW:-}" ]]; then
|
||||
kube_flags+=("--match-server-version")
|
||||
kube_flags_with_token+=("--match-server-version")
|
||||
fi
|
||||
if kube::test::if_supports_resource "${nodes}" ; then
|
||||
[ "$(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")" == "v1" ]
|
||||
fi
|
||||
|
||||
id_field=".metadata.name"
|
||||
labels_field=".metadata.labels"
|
||||
annotations_field=".metadata.annotations"
|
||||
service_selector_field=".spec.selector"
|
||||
rc_replicas_field=".spec.replicas"
|
||||
rc_status_replicas_field=".status.replicas"
|
||||
rc_container_image_field=".spec.template.spec.containers"
|
||||
rs_replicas_field=".spec.replicas"
|
||||
port_field="(index .spec.ports 0).port"
|
||||
port_name="(index .spec.ports 0).name"
|
||||
second_port_field="(index .spec.ports 1).port"
|
||||
second_port_name="(index .spec.ports 1).name"
|
||||
image_field="(index .spec.containers 0).image"
|
||||
pod_container_name_field="(index .spec.containers 0).name"
|
||||
container_name_field="(index .spec.template.spec.containers 0).name"
|
||||
hpa_min_field=".spec.minReplicas"
|
||||
hpa_max_field=".spec.maxReplicas"
|
||||
hpa_cpu_field=".spec.targetCPUUtilizationPercentage"
|
||||
template_labels=".spec.template.metadata.labels.name"
|
||||
statefulset_replicas_field=".spec.replicas"
|
||||
statefulset_observed_generation=".status.observedGeneration"
|
||||
job_parallelism_field=".spec.parallelism"
|
||||
deployment_replicas=".spec.replicas"
|
||||
secret_data=".data"
|
||||
secret_type=".type"
|
||||
change_cause_annotation='.*kubernetes.io/change-cause.*'
|
||||
pdb_min_available=".spec.minAvailable"
|
||||
pdb_max_unavailable=".spec.maxUnavailable"
|
||||
generation_field=".metadata.generation"
|
||||
template_generation_field=".spec.templateGeneration"
|
||||
container_len="(len .spec.template.spec.containers)"
|
||||
image_field0="(index .spec.template.spec.containers 0).image"
|
||||
image_field1="(index .spec.template.spec.containers 1).image"
|
||||
|
||||
# Make sure "default" namespace exists.
|
||||
if kube::test::if_supports_resource "${namespaces}" ; then
|
||||
output_message=$(kubectl get "${kube_flags[@]}" namespaces)
|
||||
if [[ ! $(echo "${output_message}" | grep "default") ]]; then
|
||||
# Create default namespace
|
||||
kubectl create "${kube_flags[@]}" ns default
|
||||
fi
|
||||
fi
|
||||
|
||||
# Make sure "kubernetes" service exists.
|
||||
if kube::test::if_supports_resource "${services}" ; then
|
||||
# Attempt to create the kubernetes service, tolerating failure (since it might already exist)
|
||||
kubectl create "${kube_flags[@]}" -f hack/testdata/kubernetes-service.yaml || true
|
||||
# Require the service to exist (either we created it or the API server did)
|
||||
kubectl get "${kube_flags[@]}" -f hack/testdata/kubernetes-service.yaml
|
||||
fi
|
||||
|
||||
#########################
|
||||
# Kubectl version #
|
||||
#########################
|
||||
|
||||
record_command run_kubectl_version_tests
|
||||
|
||||
#######################
|
||||
# kubectl config set #
|
||||
#######################
|
||||
|
||||
record_command run_kubectl_config_set_tests
|
||||
|
||||
#######################
|
||||
# kubectl local proxy #
|
||||
#######################
|
||||
|
||||
record_command run_kubectl_local_proxy_tests
|
||||
|
||||
#########################
|
||||
# RESTMapper evaluation #
|
||||
#########################
|
||||
|
||||
record_command run_RESTMapper_evaluation_tests
|
||||
|
||||
# find all resources
|
||||
kubectl "${kube_flags[@]}" api-resources
|
||||
# find all namespaced resources that support list by name and get them
|
||||
kubectl "${kube_flags[@]}" api-resources --verbs=list --namespaced -o name | xargs -n 1 kubectl "${kube_flags[@]}" get -o name
|
||||
|
||||
################
|
||||
# Cluster Role #
|
||||
################
|
||||
|
||||
if kube::test::if_supports_resource "${clusterroles}" ; then
|
||||
record_command run_clusterroles_tests
|
||||
fi
|
||||
|
||||
########
|
||||
# Role #
|
||||
########
|
||||
if kube::test::if_supports_resource "${roles}" ; then
|
||||
record_command run_role_tests
|
||||
fi
|
||||
|
||||
#########################
|
||||
# Assert short name #
|
||||
#########################
|
||||
|
||||
record_command run_assert_short_name_tests
|
||||
|
||||
#########################
|
||||
# Assert categories #
|
||||
#########################
|
||||
|
||||
## test if a category is exported during discovery
|
||||
if kube::test::if_supports_resource "${pods}" ; then
|
||||
record_command run_assert_categories_tests
|
||||
fi
|
||||
|
||||
###########################
|
||||
# POD creation / deletion #
|
||||
###########################
|
||||
|
||||
if kube::test::if_supports_resource "${pods}" ; then
|
||||
record_command run_pod_tests
|
||||
fi
|
||||
|
||||
if kube::test::if_supports_resource "${pods}" ; then
|
||||
record_command run_save_config_tests
|
||||
fi
|
||||
|
||||
if kube::test::if_supports_resource "${pods}" ; then
|
||||
record_command run_kubectl_create_error_tests
|
||||
fi
|
||||
|
||||
if kube::test::if_supports_resource "${pods}" ; then
|
||||
record_command run_kubectl_apply_tests
|
||||
record_command run_kubectl_run_tests
|
||||
record_command run_kubectl_create_filter_tests
|
||||
fi
|
||||
|
||||
if kube::test::if_supports_resource "${deployments}" ; then
|
||||
record_command run_kubectl_apply_deployments_tests
|
||||
fi
|
||||
|
||||
################
|
||||
# Kubectl diff #
|
||||
################
|
||||
record_command run_kubectl_diff_tests
|
||||
|
||||
###############
|
||||
# Kubectl get #
|
||||
###############
|
||||
|
||||
if kube::test::if_supports_resource "${pods}" ; then
|
||||
record_command run_kubectl_get_tests
|
||||
record_command run_kubectl_old_print_tests
|
||||
fi
|
||||
|
||||
|
||||
######################
|
||||
# Create #
|
||||
######################
|
||||
if kube::test::if_supports_resource "${secrets}" ; then
|
||||
record_command run_create_secret_tests
|
||||
fi
|
||||
|
||||
##################
|
||||
# Global timeout #
|
||||
##################
|
||||
|
||||
if kube::test::if_supports_resource "${pods}" ; then
|
||||
record_command run_kubectl_request_timeout_tests
|
||||
fi
|
||||
|
||||
#####################################
|
||||
# CustomResourceDefinitions #
|
||||
#####################################
|
||||
|
||||
# customresourcedefinitions cleanup after themselves.
|
||||
if kube::test::if_supports_resource "${customresourcedefinitions}" ; then
|
||||
record_command run_crd_tests
|
||||
fi
|
||||
|
||||
#################
|
||||
# Run cmd w img #
|
||||
#################
|
||||
|
||||
if kube::test::if_supports_resource "${deployments}" ; then
|
||||
record_command run_cmd_with_img_tests
|
||||
fi
|
||||
|
||||
|
||||
#####################################
|
||||
# Recursive Resources via directory #
|
||||
#####################################
|
||||
|
||||
if kube::test::if_supports_resource "${pods}" ; then
|
||||
record_command run_recursive_resources_tests
|
||||
fi
|
||||
|
||||
|
||||
##############
|
||||
# Namespaces #
|
||||
##############
|
||||
if kube::test::if_supports_resource "${namespaces}" ; then
|
||||
record_command run_namespace_tests
|
||||
fi
|
||||
|
||||
|
||||
###########
|
||||
# Secrets #
|
||||
###########
|
||||
if kube::test::if_supports_resource "${namespaces}" ; then
|
||||
if kube::test::if_supports_resource "${secrets}" ; then
|
||||
record_command run_secrets_test
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
######################
|
||||
# ConfigMap #
|
||||
######################
|
||||
|
||||
if kube::test::if_supports_resource "${namespaces}"; then
|
||||
if kube::test::if_supports_resource "${configmaps}" ; then
|
||||
record_command run_configmap_tests
|
||||
fi
|
||||
fi
|
||||
|
||||
####################
|
||||
# Client Config #
|
||||
####################
|
||||
|
||||
record_command run_client_config_tests
|
||||
|
||||
####################
|
||||
# Service Accounts #
|
||||
####################
|
||||
|
||||
if kube::test::if_supports_resource "${namespaces}" && kube::test::if_supports_resource "${serviceaccounts}" ; then
|
||||
record_command run_service_accounts_tests
|
||||
fi
|
||||
|
||||
####################
|
||||
# Job #
|
||||
####################
|
||||
|
||||
if kube::test::if_supports_resource "${job}" ; then
|
||||
record_command run_job_tests
|
||||
record_command run_create_job_tests
|
||||
fi
|
||||
|
||||
#################
|
||||
# Pod templates #
|
||||
#################
|
||||
|
||||
if kube::test::if_supports_resource "${podtemplates}" ; then
|
||||
record_command run_pod_templates_tests
|
||||
fi
|
||||
|
||||
############
|
||||
# Services #
|
||||
############
|
||||
|
||||
if kube::test::if_supports_resource "${services}" ; then
|
||||
record_command run_service_tests
|
||||
fi
|
||||
|
||||
##################
|
||||
# DaemonSets #
|
||||
##################
|
||||
|
||||
if kube::test::if_supports_resource "${daemonsets}" ; then
|
||||
record_command run_daemonset_tests
|
||||
if kube::test::if_supports_resource "${controllerrevisions}"; then
|
||||
record_command run_daemonset_history_tests
|
||||
fi
|
||||
fi
|
||||
|
||||
###########################
|
||||
# Replication controllers #
|
||||
###########################
|
||||
|
||||
if kube::test::if_supports_resource "${namespaces}" ; then
|
||||
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
|
||||
record_command run_rc_tests
|
||||
fi
|
||||
fi
|
||||
|
||||
######################
|
||||
# Deployments #
|
||||
######################
|
||||
|
||||
if kube::test::if_supports_resource "${deployments}" ; then
|
||||
record_command run_deployment_tests
|
||||
fi
|
||||
|
||||
######################
|
||||
# Replica Sets #
|
||||
######################
|
||||
|
||||
if kube::test::if_supports_resource "${replicasets}" ; then
|
||||
record_command run_rs_tests
|
||||
fi
|
||||
|
||||
#################
|
||||
# Stateful Sets #
|
||||
#################
|
||||
|
||||
if kube::test::if_supports_resource "${statefulsets}" ; then
|
||||
record_command run_stateful_set_tests
|
||||
if kube::test::if_supports_resource "${controllerrevisions}"; then
|
||||
record_command run_statefulset_history_tests
|
||||
fi
|
||||
fi
|
||||
|
||||
######################
|
||||
# Lists #
|
||||
######################
|
||||
|
||||
if kube::test::if_supports_resource "${services}" ; then
|
||||
if kube::test::if_supports_resource "${deployments}" ; then
|
||||
record_command run_lists_tests
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
######################
|
||||
# Multiple Resources #
|
||||
######################
|
||||
if kube::test::if_supports_resource "${services}" ; then
|
||||
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
|
||||
record_command run_multi_resources_tests
|
||||
fi
|
||||
fi
|
||||
|
||||
######################
|
||||
# Persistent Volumes #
|
||||
######################
|
||||
|
||||
if kube::test::if_supports_resource "${persistentvolumes}" ; then
|
||||
record_command run_persistent_volumes_tests
|
||||
fi
|
||||
|
||||
############################
|
||||
# Persistent Volume Claims #
|
||||
############################
|
||||
|
||||
if kube::test::if_supports_resource "${persistentvolumeclaims}" ; then
|
||||
record_command run_persistent_volume_claims_tests
|
||||
fi
|
||||
|
||||
############################
|
||||
# Storage Classes #
|
||||
############################
|
||||
|
||||
if kube::test::if_supports_resource "${storageclass}" ; then
|
||||
record_command run_storage_class_tests
|
||||
fi
|
||||
|
||||
#########
|
||||
# Nodes #
|
||||
#########
|
||||
|
||||
if kube::test::if_supports_resource "${nodes}" ; then
|
||||
record_command run_nodes_tests
|
||||
fi
|
||||
|
||||
|
||||
########################
|
||||
# authorization.k8s.io #
|
||||
########################
|
||||
|
||||
if kube::test::if_supports_resource "${subjectaccessreviews}" ; then
|
||||
record_command run_authorization_tests
|
||||
fi
|
||||
|
||||
# kubectl auth can-i
|
||||
# kube-apiserver is started with authorization mode AlwaysAllow, so kubectl can-i always returns yes
|
||||
if kube::test::if_supports_resource "${subjectaccessreviews}" ; then
|
||||
output_message=$(kubectl auth can-i '*' '*' 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" "yes"
|
||||
|
||||
output_message=$(kubectl auth can-i get pods --subresource=log 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" "yes"
|
||||
|
||||
output_message=$(kubectl auth can-i get invalid_resource 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type"
|
||||
|
||||
output_message=$(kubectl auth can-i get /logs/ 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" "yes"
|
||||
|
||||
output_message=$(! kubectl auth can-i get /logs/ --subresource=log 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" "subresource can not be used with NonResourceURL"
|
||||
|
||||
output_message=$(kubectl auth can-i list jobs.batch/bar -n foo --quiet 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_empty_string "${output_message}"
|
||||
|
||||
output_message=$(kubectl auth can-i get pods --subresource=log 2>&1 "${kube_flags[@]}"; echo $?)
|
||||
kube::test::if_has_string "${output_message}" '0'
|
||||
|
||||
output_message=$(kubectl auth can-i get pods --subresource=log --quiet 2>&1 "${kube_flags[@]}"; echo $?)
|
||||
kube::test::if_has_string "${output_message}" '0'
|
||||
fi
|
||||
|
||||
# kubectl auth reconcile
|
||||
if kube::test::if_supports_resource "${clusterroles}" ; then
|
||||
kubectl auth reconcile "${kube_flags[@]}" -f test/fixtures/pkg/kubectl/cmd/auth/rbac-resource-plus.yaml
|
||||
kube::test::get_object_assert 'rolebindings -n some-other-random -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-RB:'
|
||||
kube::test::get_object_assert 'roles -n some-other-random -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-R:'
|
||||
kube::test::get_object_assert 'clusterrolebindings -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-CRB:'
|
||||
kube::test::get_object_assert 'clusterroles -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-CR:'
|
||||
|
||||
failure_message=$(! kubectl auth reconcile "${kube_flags[@]}" -f test/fixtures/pkg/kubectl/cmd/auth/rbac-v1beta1.yaml 2>&1 )
|
||||
kube::test::if_has_string "${failure_message}" 'only rbac.authorization.k8s.io/v1 is supported'
|
||||
|
||||
kubectl delete "${kube_flags[@]}" rolebindings,role,clusterroles,clusterrolebindings -n some-other-random -l test-cmd=auth
|
||||
fi
|
||||
|
||||
#####################
|
||||
# Retrieve multiple #
|
||||
#####################
|
||||
|
||||
if kube::test::if_supports_resource "${nodes}" ; then
|
||||
if kube::test::if_supports_resource "${services}" ; then
|
||||
record_command run_retrieve_multiple_tests
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
#####################
|
||||
# Resource aliasing #
|
||||
#####################
|
||||
|
||||
if kube::test::if_supports_resource "${services}" ; then
|
||||
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
|
||||
record_command run_resource_aliasing_tests
|
||||
fi
|
||||
fi
|
||||
|
||||
###########
|
||||
# Explain #
|
||||
###########
|
||||
|
||||
if kube::test::if_supports_resource "${pods}" ; then
|
||||
record_command run_kubectl_explain_tests
|
||||
fi
|
||||
|
||||
|
||||
###########
|
||||
# Swagger #
|
||||
###########
|
||||
|
||||
record_command run_swagger_tests
|
||||
|
||||
#####################
|
||||
# Kubectl --sort-by #
|
||||
#####################
|
||||
|
||||
if kube::test::if_supports_resource "${pods}" ; then
|
||||
record_command run_kubectl_sort_by_tests
|
||||
fi
|
||||
|
||||
############################
|
||||
# Kubectl --all-namespaces #
|
||||
############################
|
||||
|
||||
if kube::test::if_supports_resource "${pods}" ; then
|
||||
if kube::test::if_supports_resource "${nodes}" ; then
|
||||
record_command run_kubectl_all_namespace_tests
|
||||
fi
|
||||
fi
|
||||
|
||||
######################
|
||||
# kubectl --template #
|
||||
######################
|
||||
|
||||
if kube::test::if_supports_resource "${pods}" ; then
|
||||
record_command run_template_output_tests
|
||||
fi
|
||||
|
||||
################
|
||||
# Certificates #
|
||||
################
|
||||
|
||||
if kube::test::if_supports_resource "${csr}" ; then
|
||||
record_command run_certificates_tests
|
||||
fi
|
||||
|
||||
######################
|
||||
# Cluster Management #
|
||||
######################
|
||||
if kube::test::if_supports_resource "${nodes}" ; then
|
||||
record_command run_cluster_management_tests
|
||||
fi
|
||||
|
||||
###########
|
||||
# Plugins #
|
||||
###########
|
||||
|
||||
record_command run_plugins_tests
|
||||
|
||||
#################
|
||||
# Impersonation #
|
||||
#################
|
||||
record_command run_impersonation_tests
|
||||
|
||||
kube::test::clear_all
|
||||
|
||||
if [[ -n "${foundError}" ]]; then
|
||||
echo "FAILED TESTS: ""${foundError}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
149
vendor/k8s.io/kubernetes/test/cmd/node-management.sh
generated
vendored
Executable file
149
vendor/k8s.io/kubernetes/test/cmd/node-management.sh
generated
vendored
Executable file
@ -0,0 +1,149 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_cluster_management_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing cluster-management commands"
|
||||
|
||||
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
||||
|
||||
# create test pods we can work with
|
||||
kubectl create -f - "${kube_flags[@]}" << __EOF__
|
||||
{
|
||||
"kind": "Pod",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "test-pod-1",
|
||||
"labels": {
|
||||
"e": "f"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"name": "container-1",
|
||||
"resources": {},
|
||||
"image": "test-image"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
__EOF__
|
||||
|
||||
kubectl create -f - "${kube_flags[@]}" << __EOF__
|
||||
{
|
||||
"kind": "Pod",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "test-pod-2",
|
||||
"labels": {
|
||||
"c": "d"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"name": "container-1",
|
||||
"resources": {},
|
||||
"image": "test-image"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
__EOF__
|
||||
|
||||
# taint/untaint
|
||||
# Pre-condition: node doesn't have dedicated=foo:PreferNoSchedule taint
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "" # expect no output
|
||||
# taint can add a taint
|
||||
kubectl taint node 127.0.0.1 dedicated=foo:PreferNoSchedule
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "dedicated=foo:PreferNoSchedule"
|
||||
# taint can remove a taint
|
||||
kubectl taint node 127.0.0.1 dedicated-
|
||||
# Post-condition: node doesn't have dedicated=foo:PreferNoSchedule taint
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "" # expect no output
|
||||
|
||||
### kubectl cordon update with --dry-run does not mark node unschedulable
|
||||
# Pre-condition: node is schedulable
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
kubectl cordon "127.0.0.1" --dry-run
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
|
||||
### kubectl drain update with --dry-run does not mark node unschedulable
|
||||
# Pre-condition: node is schedulable
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
kubectl drain "127.0.0.1" --dry-run
|
||||
# Post-condition: node still exists, node is still schedulable
|
||||
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
|
||||
### kubectl drain with --pod-selector only evicts pods that match the given selector
|
||||
# Pre-condition: node is schedulable
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
# Pre-condition: test-pod-1 and test-pod-2 exist
|
||||
kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
|
||||
kubectl drain "127.0.0.1" --pod-selector 'e in (f)'
|
||||
# only "test-pod-1" should have been matched and deleted - test-pod-2 should still exist
|
||||
kube::test::get_object_assert "pods/test-pod-2" "{{.metadata.name}}" 'test-pod-2'
|
||||
# delete pod no longer in use
|
||||
kubectl delete pod/test-pod-2
|
||||
# Post-condition: node is schedulable
|
||||
kubectl uncordon "127.0.0.1"
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
|
||||
### kubectl uncordon update with --dry-run is a no-op
|
||||
# Pre-condition: node is already schedulable
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
response=$(kubectl uncordon "127.0.0.1" --dry-run)
|
||||
kube::test::if_has_string "${response}" 'already uncordoned'
|
||||
# Post-condition: node is still schedulable
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||
|
||||
### kubectl drain command fails when both --selector and a node argument are given
|
||||
# Pre-condition: node exists and contains label test=label
|
||||
kubectl label node "127.0.0.1" "test=label"
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" '{{.metadata.labels.test}}' 'label'
|
||||
response=$(! kubectl drain "127.0.0.1" --selector test=label 2>&1)
|
||||
kube::test::if_has_string "${response}" 'cannot specify both a node name'
|
||||
|
||||
### kubectl cordon command fails when no arguments are passed
|
||||
# Pre-condition: node exists
|
||||
response=$(! kubectl cordon 2>&1)
|
||||
kube::test::if_has_string "${response}" 'error\: USAGE\: cordon NODE'
|
||||
|
||||
### kubectl cordon selects no nodes with an empty --selector=
|
||||
# Pre-condition: node "127.0.0.1" is uncordoned
|
||||
kubectl uncordon "127.0.0.1"
|
||||
response=$(! kubectl cordon --selector= 2>&1)
|
||||
kube::test::if_has_string "${response}" 'must provide one or more resources'
|
||||
# test=label matches our node
|
||||
response=$(kubectl cordon --selector test=label)
|
||||
kube::test::if_has_string "${response}" 'node/127.0.0.1 cordoned'
|
||||
# invalid=label does not match any nodes
|
||||
response=$(kubectl cordon --selector invalid=label)
|
||||
kube::test::if_has_not_string "${response}" 'cordoned'
|
||||
# Post-condition: node "127.0.0.1" is cordoned
|
||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
140
vendor/k8s.io/kubernetes/test/cmd/old-print.sh
generated
vendored
Executable file
140
vendor/k8s.io/kubernetes/test/cmd/old-print.sh
generated
vendored
Executable file
@ -0,0 +1,140 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_kubectl_old_print_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl get --server-print=false"
|
||||
### Test retrieval of all types in discovery
|
||||
# Pre-condition: no resources exist
|
||||
output_message=$(kubectl get pods --server-print=false 2>&1 "${kube_flags[@]}")
|
||||
# Post-condition: Expect text indicating no resources were found
|
||||
kube::test::if_has_string "${output_message}" 'No resources found.'
|
||||
|
||||
### Test retrieval of pods against server-side printing
|
||||
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
|
||||
# Post-condition: valid-pod POD is created
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
||||
# Compare "old" output with experimental output and ensure both are the same
|
||||
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
|
||||
expected_output=$(kubectl get pod "${kube_flags[@]}" | awk 'NF{NF--};1')
|
||||
actual_output=$(kubectl get pod --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
|
||||
kube::test::if_has_string "${actual_output}" "${expected_output}"
|
||||
|
||||
# Test printing objects with --use-openapi-print-columns
|
||||
actual_output=$(kubectl get namespaces --use-openapi-print-columns --v=7 "${kube_flags[@]}" 2>&1)
|
||||
# it should request full objects (not server-side printing)
|
||||
kube::test::if_has_not_string "${actual_output}" 'application/json;as=Table'
|
||||
kube::test::if_has_string "${actual_output}" 'application/json'
|
||||
|
||||
### Test retrieval of daemonsets against server-side printing
|
||||
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
|
||||
# Post-condition: daemonset is created
|
||||
kube::test::get_object_assert ds "{{range.items}}{{$id_field}}:{{end}}" 'bind:'
|
||||
# Compare "old" output with experimental output and ensure both are the same
|
||||
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
|
||||
expected_output=$(kubectl get ds "${kube_flags[@]}" | awk 'NF{NF--};1')
|
||||
actual_output=$(kubectl get ds --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
|
||||
kube::test::if_has_string "${actual_output}" "${expected_output}"
|
||||
|
||||
### Test retrieval of replicationcontrollers against server-side printing
|
||||
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
|
||||
# Post-condition: frontend replication controller is created
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
|
||||
# Compare "old" output with experimental output and ensure both are the same
|
||||
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
|
||||
expected_output=$(kubectl get rc "${kube_flags[@]}" | awk 'NF{NF--};1')
|
||||
actual_output=$(kubectl get rc --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
|
||||
kube::test::if_has_string "${actual_output}" "${expected_output}"
|
||||
|
||||
### Test retrieval of replicasets against server-side printing
|
||||
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
|
||||
# Post-condition: frontend replica set is created
|
||||
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
|
||||
# Compare "old" output with experimental output and ensure both are the same
|
||||
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
|
||||
expected_output=$(kubectl get rs "${kube_flags[@]}" | awk 'NF{NF--};1')
|
||||
actual_output=$(kubectl get rs --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
|
||||
kube::test::if_has_string "${actual_output}" "${expected_output}"
|
||||
|
||||
### Test retrieval of jobs against server-side printing
|
||||
kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
|
||||
# Post-Condition: assertion object exists
|
||||
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
|
||||
# Compare "old" output with experimental output and ensure both are the same
|
||||
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
|
||||
expected_output=$(kubectl get jobs/pi "${kube_flags[@]}" | awk 'NF{NF--};1')
|
||||
actual_output=$(kubectl get jobs/pi --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
|
||||
kube::test::if_has_string "${actual_output}" "${expected_output}"
|
||||
|
||||
### Test retrieval of clusterroles against server-side printing
|
||||
kubectl create "${kube_flags[@]}" clusterrole sample-role --verb=* --resource=pods
|
||||
# Post-Condition: assertion object exists
|
||||
kube::test::get_object_assert clusterrole/sample-role "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
|
||||
# Compare "old" output with experimental output and ensure both are the same
|
||||
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
|
||||
expected_output=$(kubectl get clusterroles/sample-role "${kube_flags[@]}" | awk 'NF{NF--};1')
|
||||
actual_output=$(kubectl get clusterroles/sample-role --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
|
||||
kube::test::if_has_string "${actual_output}" "${expected_output}"
|
||||
|
||||
### Test retrieval of crds against server-side printing
|
||||
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
|
||||
{
|
||||
"kind": "CustomResourceDefinition",
|
||||
"apiVersion": "apiextensions.k8s.io/v1beta1",
|
||||
"metadata": {
|
||||
"name": "foos.company.com"
|
||||
},
|
||||
"spec": {
|
||||
"group": "company.com",
|
||||
"version": "v1",
|
||||
"scope": "Namespaced",
|
||||
"names": {
|
||||
"plural": "foos",
|
||||
"kind": "Foo"
|
||||
}
|
||||
}
|
||||
}
|
||||
__EOF__
|
||||
|
||||
# Post-Condition: assertion object exists
|
||||
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq $id_field \\\"foos.company.com\\\"}}{{$id_field}}:{{end}}{{end}}" 'foos.company.com:'
|
||||
|
||||
# Test that we can list this new CustomResource
|
||||
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Compare "old" output with experimental output and ensure both are the same
|
||||
expected_output=$(kubectl get foos "${kube_flags[@]}" | awk 'NF{NF--};1')
|
||||
actual_output=$(kubectl get foos --server-print=false "${kube_flags[@]}" | awk 'NF{NF--};1')
|
||||
kube::test::if_has_string "${actual_output}" "${expected_output}"
|
||||
|
||||
# teardown
|
||||
kubectl delete customresourcedefinitions/foos.company.com "${kube_flags_with_token[@]}"
|
||||
kubectl delete clusterroles/sample-role "${kube_flags_with_token[@]}"
|
||||
kubectl delete jobs pi "${kube_flags[@]}"
|
||||
kubectl delete rs frontend "${kube_flags[@]}"
|
||||
kubectl delete rc frontend "${kube_flags[@]}"
|
||||
kubectl delete ds bind "${kube_flags[@]}"
|
||||
kubectl delete pod valid-pod "${kube_flags[@]}"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
54
vendor/k8s.io/kubernetes/test/cmd/plugins.sh
generated
vendored
Executable file
54
vendor/k8s.io/kubernetes/test/cmd/plugins.sh
generated
vendored
Executable file
@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_plugins_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing kubectl plugins"
|
||||
|
||||
# test plugins that overwrite existing kubectl commands
|
||||
output_message=$(! PATH=${PATH}:"test/fixtures/pkg/kubectl/plugins/version" kubectl plugin list 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'kubectl-version overwrites existing command: "kubectl version"'
|
||||
|
||||
# test plugins that overwrite similarly-named plugins
|
||||
output_message=$(! PATH=${PATH}:"test/fixtures/pkg/kubectl/plugins:test/fixtures/pkg/kubectl/plugins/foo" kubectl plugin list 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'test/fixtures/pkg/kubectl/plugins/foo/kubectl-foo is overshadowed by a similarly named plugin'
|
||||
|
||||
# test plugins with no warnings
|
||||
output_message=$(PATH=${PATH}:"test/fixtures/pkg/kubectl/plugins" kubectl plugin list 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'plugins are available'
|
||||
|
||||
# no plugins
|
||||
output_message=$(! PATH=${PATH}:"test/fixtures/pkg/kubectl/plugins/empty" kubectl plugin list 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'unable to find any kubectl plugins in your PATH'
|
||||
|
||||
# attempt to run a plugin in the user's PATH
|
||||
output_message=$(PATH=${PATH}:"test/fixtures/pkg/kubectl/plugins" kubectl foo)
|
||||
kube::test::if_has_string "${output_message}" 'plugin foo'
|
||||
|
||||
# ensure that a kubectl command supersedes a plugin that overshadows it
|
||||
output_message=$(PATH=${PATH}:"test/fixtures/pkg/kubectl/plugins/version" kubectl version)
|
||||
kube::test::if_has_string "${output_message}" 'Client Version'
|
||||
kube::test::if_has_not_string "${output_message}" 'overshadows an existing plugin'
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
56
vendor/k8s.io/kubernetes/test/cmd/proxy.sh
generated
vendored
Executable file
56
vendor/k8s.io/kubernetes/test/cmd/proxy.sh
generated
vendored
Executable file
@ -0,0 +1,56 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_kubectl_local_proxy_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing kubectl local proxy"
|
||||
|
||||
start-proxy
|
||||
check-curl-proxy-code /api/kubernetes 404
|
||||
check-curl-proxy-code /api/v1/namespaces 200
|
||||
if kube::test::if_supports_resource "${metrics}" ; then
|
||||
check-curl-proxy-code /metrics 200
|
||||
fi
|
||||
if kube::test::if_supports_resource "${static}" ; then
|
||||
check-curl-proxy-code /static/ 200
|
||||
fi
|
||||
stop-proxy
|
||||
|
||||
# Make sure the in-development api is accessible by default
|
||||
start-proxy
|
||||
check-curl-proxy-code /apis 200
|
||||
check-curl-proxy-code /apis/extensions/ 200
|
||||
stop-proxy
|
||||
|
||||
# Custom paths let you see everything.
|
||||
start-proxy /custom
|
||||
check-curl-proxy-code /custom/api/kubernetes 404
|
||||
check-curl-proxy-code /custom/api/v1/namespaces 200
|
||||
if kube::test::if_supports_resource "${metrics}" ; then
|
||||
check-curl-proxy-code /custom/metrics 200
|
||||
fi
|
||||
check-curl-proxy-code /custom/api/v1/namespaces 200
|
||||
stop-proxy
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
159
vendor/k8s.io/kubernetes/test/cmd/rbac.sh
generated
vendored
Executable file
159
vendor/k8s.io/kubernetes/test/cmd/rbac.sh
generated
vendored
Executable file
@ -0,0 +1,159 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_clusterroles_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing clusterroles"
|
||||
|
||||
# make sure the server was properly bootstrapped with clusterroles and bindings
|
||||
kube::test::get_object_assert clusterroles/cluster-admin "{{.metadata.name}}" 'cluster-admin'
|
||||
kube::test::get_object_assert clusterrolebindings/cluster-admin "{{.metadata.name}}" 'cluster-admin'
|
||||
|
||||
# test `kubectl create clusterrole`
|
||||
kubectl create "${kube_flags[@]}" clusterrole pod-admin --verb=* --resource=pods
|
||||
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
|
||||
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
|
||||
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
|
||||
kubectl create "${kube_flags[@]}" clusterrole resource-reader --verb=get,list --resource=pods,deployments.extensions
|
||||
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:get:list:'
|
||||
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:deployments:'
|
||||
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':extensions:'
|
||||
kubectl create "${kube_flags[@]}" clusterrole resourcename-reader --verb=get,list --resource=pods --resource-name=foo
|
||||
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
|
||||
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
|
||||
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
|
||||
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.resourceNames}}{{.}}:{{end}}{{end}}" 'foo:'
|
||||
kubectl create "${kube_flags[@]}" clusterrole url-reader --verb=get --non-resource-url=/logs/* --non-resource-url=/healthz/*
|
||||
kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:'
|
||||
kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.nonResourceURLs}}{{.}}:{{end}}{{end}}" '/logs/\*:/healthz/\*:'
|
||||
kubectl create "${kube_flags[@]}" clusterrole aggregation-reader --aggregation-rule="foo1=foo2"
|
||||
kube::test::get_object_assert clusterrole/aggregation-reader "{{$id_field}}" 'aggregation-reader'
|
||||
|
||||
# test `kubectl create clusterrolebinding`
|
||||
# test `kubectl set subject clusterrolebinding`
|
||||
kubectl create "${kube_flags[@]}" clusterrolebinding super-admin --clusterrole=admin --user=super-admin
|
||||
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
|
||||
kubectl set subject "${kube_flags[@]}" clusterrolebinding super-admin --user=foo
|
||||
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:foo:'
|
||||
kubectl create "${kube_flags[@]}" clusterrolebinding multi-users --clusterrole=admin --user=user-1 --user=user-2
|
||||
kube::test::get_object_assert clusterrolebinding/multi-users "{{range.subjects}}{{.name}}:{{end}}" 'user-1:user-2:'
|
||||
|
||||
kubectl create "${kube_flags[@]}" clusterrolebinding super-group --clusterrole=admin --group=the-group
|
||||
kube::test::get_object_assert clusterrolebinding/super-group "{{range.subjects}}{{.name}}:{{end}}" 'the-group:'
|
||||
kubectl set subject "${kube_flags[@]}" clusterrolebinding super-group --group=foo
|
||||
kube::test::get_object_assert clusterrolebinding/super-group "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:'
|
||||
kubectl create "${kube_flags[@]}" clusterrolebinding multi-groups --clusterrole=admin --group=group-1 --group=group-2
|
||||
kube::test::get_object_assert clusterrolebinding/multi-groups "{{range.subjects}}{{.name}}:{{end}}" 'group-1:group-2:'
|
||||
|
||||
kubectl create "${kube_flags[@]}" clusterrolebinding super-sa --clusterrole=admin --serviceaccount=otherns:sa-name
|
||||
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:'
|
||||
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:'
|
||||
kubectl set subject "${kube_flags[@]}" clusterrolebinding super-sa --serviceaccount=otherfoo:foo
|
||||
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:otherfoo:'
|
||||
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:'
|
||||
|
||||
# test `kubectl set subject clusterrolebinding --all`
|
||||
kubectl set subject "${kube_flags[@]}" clusterrolebinding --all --user=test-all-user
|
||||
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:foo:test-all-user:'
|
||||
kube::test::get_object_assert clusterrolebinding/super-group "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:test-all-user:'
|
||||
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:test-all-user:'
|
||||
|
||||
# test `kubectl create rolebinding`
|
||||
# test `kubectl set subject rolebinding`
|
||||
kubectl create "${kube_flags[@]}" rolebinding admin --clusterrole=admin --user=default-admin
|
||||
kube::test::get_object_assert rolebinding/admin "{{.roleRef.kind}}" 'ClusterRole'
|
||||
kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:'
|
||||
kubectl set subject "${kube_flags[@]}" rolebinding admin --user=foo
|
||||
kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:foo:'
|
||||
|
||||
kubectl create "${kube_flags[@]}" rolebinding localrole --role=localrole --group=the-group
|
||||
kube::test::get_object_assert rolebinding/localrole "{{.roleRef.kind}}" 'Role'
|
||||
kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:'
|
||||
kubectl set subject "${kube_flags[@]}" rolebinding localrole --group=foo
|
||||
kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:'
|
||||
|
||||
kubectl create "${kube_flags[@]}" rolebinding sarole --role=localrole --serviceaccount=otherns:sa-name
|
||||
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:'
|
||||
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:'
|
||||
kubectl set subject "${kube_flags[@]}" rolebinding sarole --serviceaccount=otherfoo:foo
|
||||
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:otherfoo:'
|
||||
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:'
|
||||
|
||||
# test `kubectl set subject rolebinding --all`
|
||||
kubectl set subject "${kube_flags[@]}" rolebinding --all --user=test-all-user
|
||||
kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:foo:test-all-user:'
|
||||
kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:test-all-user:'
|
||||
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:test-all-user:'
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_role_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing role"
|
||||
|
||||
# Create Role from command (only resource)
|
||||
kubectl create "${kube_flags[@]}" role pod-admin --verb=* --resource=pods
|
||||
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
|
||||
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
|
||||
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
|
||||
output_message=$(! kubectl create "${kube_flags[@]}" role invalid-pod-admin --verb=* --resource=invalid-resource 2>&1)
|
||||
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type \"invalid-resource\""
|
||||
# Create Role from command (resource + group)
|
||||
kubectl create "${kube_flags[@]}" role group-reader --verb=get,list --resource=deployments.extensions
|
||||
kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
|
||||
kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'deployments:'
|
||||
kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" 'extensions:'
|
||||
output_message=$(! kubectl create "${kube_flags[@]}" role invalid-group --verb=get,list --resource=deployments.invalid-group 2>&1)
|
||||
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type \"deployments\" in group \"invalid-group\""
|
||||
# Create Role from command (resource / subresource)
|
||||
kubectl create "${kube_flags[@]}" role subresource-reader --verb=get,list --resource=pods/status
|
||||
kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
|
||||
kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods/status:'
|
||||
kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
|
||||
# Create Role from command (resource + group / subresource)
|
||||
kubectl create "${kube_flags[@]}" role group-subresource-reader --verb=get,list --resource=replicasets.extensions/scale
|
||||
kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
|
||||
kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'replicasets/scale:'
|
||||
kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" 'extensions:'
|
||||
output_message=$(! kubectl create "${kube_flags[@]}" role invalid-group --verb=get,list --resource=rs.invalid-group/scale 2>&1)
|
||||
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type \"rs\" in group \"invalid-group\""
|
||||
# Create Role from command (resource + resourcename)
|
||||
kubectl create "${kube_flags[@]}" role resourcename-reader --verb=get,list --resource=pods --resource-name=foo
|
||||
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
|
||||
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
|
||||
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
|
||||
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.resourceNames}}{{.}}:{{end}}{{end}}" 'foo:'
|
||||
# Create Role from command (multi-resources)
|
||||
kubectl create "${kube_flags[@]}" role resource-reader --verb=get,list --resource=pods/status,deployments.extensions
|
||||
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:get:list:'
|
||||
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods/status:deployments:'
|
||||
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':extensions:'
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
57
vendor/k8s.io/kubernetes/test/cmd/request-timeout.sh
generated
vendored
Executable file
57
vendor/k8s.io/kubernetes/test/cmd/request-timeout.sh
generated
vendored
Executable file
@ -0,0 +1,57 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_kubectl_request_timeout_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing kubectl request timeout"
|
||||
### Test global request timeout option
|
||||
# Pre-condition: no POD exists
|
||||
create_and_use_new_namespace
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
|
||||
# Post-condition: valid-pod POD is created
|
||||
kubectl get "${kube_flags[@]}" pods -o json
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
||||
|
||||
## check --request-timeout on 'get pod'
|
||||
output_message=$(kubectl get pod valid-pod --request-timeout=1)
|
||||
kube::test::if_has_string "${output_message}" 'valid-pod'
|
||||
|
||||
## check --request-timeout on 'get pod' with --watch
|
||||
output_message=$(kubectl get pod valid-pod --request-timeout=1 --watch 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'Timeout exceeded while reading body'
|
||||
|
||||
## check --request-timeout value with no time unit
|
||||
output_message=$(kubectl get pod valid-pod --request-timeout=1 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'valid-pod'
|
||||
|
||||
## check --request-timeout value with invalid time unit
|
||||
output_message=$(! kubectl get pod valid-pod --request-timeout="1p" 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'Invalid timeout value'
|
||||
|
||||
# cleanup
|
||||
kubectl delete pods valid-pod "${kube_flags[@]}"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
104
vendor/k8s.io/kubernetes/test/cmd/run.sh
generated
vendored
Executable file
104
vendor/k8s.io/kubernetes/test/cmd/run.sh
generated
vendored
Executable file
@ -0,0 +1,104 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_kubectl_run_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing kubectl run"
|
||||
## kubectl run should create deployments, jobs or cronjob
|
||||
# Pre-Condition: no Job exists
|
||||
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
|
||||
# Post-Condition: Job "pi" is created
|
||||
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
|
||||
# Describe command (resource only) should print detailed information
|
||||
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
|
||||
# Clean up
|
||||
kubectl delete jobs pi "${kube_flags[@]}"
|
||||
# Post-condition: no pods exist.
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
# Pre-Condition: no Deployment exists
|
||||
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl run nginx-extensions "--image=$IMAGE_NGINX" "${kube_flags[@]}"
|
||||
# Post-Condition: Deployment "nginx" is created
|
||||
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$id_field}}:{{end}}" 'nginx-extensions:'
|
||||
# new generator was used
|
||||
output_message=$(kubectl get deployment.apps/nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
|
||||
kube::test::if_has_string "${output_message}" '10'
|
||||
# Clean up
|
||||
kubectl delete deployment nginx-extensions "${kube_flags[@]}"
|
||||
# Command
|
||||
kubectl run nginx-apps "--image=$IMAGE_NGINX" --generator=deployment/apps.v1 "${kube_flags[@]}"
|
||||
# Post-Condition: Deployment "nginx" is created
|
||||
kube::test::get_object_assert deployment.apps "{{range.items}}{{$id_field}}:{{end}}" 'nginx-apps:'
|
||||
# and new generator was used, iow. new defaults are applied
|
||||
output_message=$(kubectl get deployment/nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
|
||||
kube::test::if_has_string "${output_message}" '10'
|
||||
# Clean up
|
||||
kubectl delete deployment nginx-apps "${kube_flags[@]}"
|
||||
|
||||
# Pre-Condition: no Job exists
|
||||
kube::test::get_object_assert cronjobs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl run pi --schedule="*/5 * * * *" --generator=cronjob/v1beta1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
|
||||
# Post-Condition: CronJob "pi" is created
|
||||
kube::test::get_object_assert cronjobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
|
||||
|
||||
# Pre-condition: cronjob has perl image, not custom image
|
||||
output_message=$(kubectl get cronjob/pi -o jsonpath='{..image}')
|
||||
kube::test::if_has_not_string "${output_message}" "custom-image"
|
||||
kube::test::if_has_string "${output_message}" "${IMAGE_PERL}"
|
||||
# Set cronjob image
|
||||
kubectl set image cronjob/pi '*=custom-image'
|
||||
# Post-condition: cronjob has custom image, not perl image
|
||||
output_message=$(kubectl get cronjob/pi -o jsonpath='{..image}')
|
||||
kube::test::if_has_string "${output_message}" "custom-image"
|
||||
kube::test::if_has_not_string "${output_message}" "${IMAGE_PERL}"
|
||||
|
||||
# Clean up
|
||||
kubectl delete cronjobs pi "${kube_flags[@]}"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_cmd_with_img_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing cmd with image"
|
||||
|
||||
# Test that a valid image reference value is provided as the value of --image in `kubectl run <name> --image`
|
||||
output_message=$(kubectl run test1 --image=validname)
|
||||
kube::test::if_has_string "${output_message}" 'deployment.apps/test1 created'
|
||||
kubectl delete deployments test1
|
||||
# test invalid image name
|
||||
output_message=$(! kubectl run test2 --image=InvalidImageName 2>&1)
|
||||
kube::test::if_has_string "${output_message}" 'error: Invalid image name "InvalidImageName": invalid reference format'
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
105
vendor/k8s.io/kubernetes/test/cmd/save-config.sh
generated
vendored
Executable file
105
vendor/k8s.io/kubernetes/test/cmd/save-config.sh
generated
vendored
Executable file
@ -0,0 +1,105 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# Runs tests for --save-config tests.
|
||||
run_save_config_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing kubectl --save-config"
|
||||
## Configuration annotations should be set when --save-config is enabled
|
||||
## 1. kubectl create --save-config should generate configuration annotation
|
||||
# Pre-Condition: no POD exists
|
||||
create_and_use_new_namespace
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command: create a pod "test-pod"
|
||||
kubectl create -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
|
||||
# Post-Condition: pod "test-pod" has configuration annotation
|
||||
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
# Clean up
|
||||
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
## 2. kubectl edit --save-config should generate configuration annotation
|
||||
# Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation
|
||||
create_and_use_new_namespace
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
# Command: edit the pod "test-pod"
|
||||
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
|
||||
echo -e "#!/usr/bin/env bash\n${SED} -i \"s/test-pod-label/test-pod-label-edited/g\" \$@" > "${temp_editor}"
|
||||
chmod +x "${temp_editor}"
|
||||
EDITOR=${temp_editor} kubectl edit pod test-pod --save-config "${kube_flags[@]}"
|
||||
# Post-Condition: pod "test-pod" has configuration annotation
|
||||
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
# Clean up
|
||||
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
## 3. kubectl replace --save-config should generate configuration annotation
|
||||
# Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation
|
||||
create_and_use_new_namespace
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
# Command: replace the pod "test-pod"
|
||||
kubectl replace -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
|
||||
# Post-Condition: pod "test-pod" has configuration annotation
|
||||
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
# Clean up
|
||||
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
## 4. kubectl run --save-config should generate configuration annotation
|
||||
# Pre-Condition: no RC exists
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command: create the rc "nginx" with image nginx
|
||||
kubectl run nginx "--image=$IMAGE_NGINX" --save-config --generator=run/v1 "${kube_flags[@]}"
|
||||
# Post-Condition: rc "nginx" has configuration annotation
|
||||
[[ "$(kubectl get rc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
## 5. kubectl expose --save-config should generate configuration annotation
|
||||
# Pre-Condition: no service exists
|
||||
kube::test::get_object_assert svc "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command: expose the rc "nginx"
|
||||
kubectl expose rc nginx --save-config --port=80 --target-port=8000 "${kube_flags[@]}"
|
||||
# Post-Condition: service "nginx" has configuration annotation
|
||||
[[ "$(kubectl get svc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
# Clean up
|
||||
kubectl delete rc,svc nginx
|
||||
## 6. kubectl autoscale --save-config should generate configuration annotation
|
||||
# Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
|
||||
! [[ "$(kubectl get rc frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
# Command: autoscale rc "frontend"
|
||||
kubectl autoscale -f hack/testdata/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2
|
||||
# Post-Condition: hpa "frontend" has configuration annotation
|
||||
[[ "$(kubectl get hpa frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
# Ensure we can interact with HPA objects in lists through autoscaling/v1 APIs
|
||||
output_message=$(kubectl get hpa -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
|
||||
output_message=$(kubectl get hpa.autoscaling -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
|
||||
# tests kubectl group prefix matching
|
||||
output_message=$(kubectl get hpa.autoscal -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
|
||||
# Clean up
|
||||
# Note that we should delete hpa first, otherwise it may fight with the rc reaper.
|
||||
kubectl delete hpa frontend "${kube_flags[@]}"
|
||||
kubectl delete rc frontend "${kube_flags[@]}"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
110
vendor/k8s.io/kubernetes/test/cmd/storage.sh
generated
vendored
Executable file
110
vendor/k8s.io/kubernetes/test/cmd/storage.sh
generated
vendored
Executable file
@ -0,0 +1,110 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_persistent_volumes_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing persistent volumes"
|
||||
|
||||
### Create and delete persistent volume examples
|
||||
# Pre-condition: no persistent volumes currently exist
|
||||
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/local-01.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0001:'
|
||||
kubectl delete pv pv0001 "${kube_flags[@]}"
|
||||
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/local-02.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0002:'
|
||||
kubectl delete pv pv0002 "${kube_flags[@]}"
|
||||
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/gce.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0003:'
|
||||
kubectl delete pv pv0003 "${kube_flags[@]}"
|
||||
# Post-condition: no PVs
|
||||
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_persistent_volume_claims_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
create_and_use_new_namespace
|
||||
kube::log::status "Testing persistent volumes claims"
|
||||
|
||||
### Create and delete persistent volume claim examples
|
||||
# Pre-condition: no persistent volume claims currently exist
|
||||
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-01.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-1:'
|
||||
kubectl delete pvc myclaim-1 "${kube_flags[@]}"
|
||||
|
||||
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-02.yaml "${kube_flags[@]}"
|
||||
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-2:'
|
||||
kubectl delete pvc myclaim-2 "${kube_flags[@]}"
|
||||
|
||||
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-03.json "${kube_flags[@]}"
|
||||
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-3:'
|
||||
kubectl delete pvc myclaim-3 "${kube_flags[@]}"
|
||||
# Post-condition: no PVCs
|
||||
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
||||
|
||||
run_storage_class_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing storage class"
|
||||
|
||||
### Create and delete storage class
|
||||
# Pre-condition: no storage classes currently exist
|
||||
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create -f - "${kube_flags[@]}" << __EOF__
|
||||
{
|
||||
"kind": "StorageClass",
|
||||
"apiVersion": "storage.k8s.io/v1",
|
||||
"metadata": {
|
||||
"name": "storage-class-name"
|
||||
},
|
||||
"provisioner": "kubernetes.io/fake-provisioner-type",
|
||||
"parameters": {
|
||||
"zone":"us-east-1b",
|
||||
"type":"ssd"
|
||||
}
|
||||
}
|
||||
__EOF__
|
||||
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" 'storage-class-name:'
|
||||
kube::test::get_object_assert sc "{{range.items}}{{$id_field}}:{{end}}" 'storage-class-name:'
|
||||
kubectl delete storageclass storage-class-name "${kube_flags[@]}"
|
||||
# Post-condition: no storage classes
|
||||
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
|
||||
}
|
250
vendor/k8s.io/kubernetes/test/cmd/template-output.sh
generated
vendored
Executable file
250
vendor/k8s.io/kubernetes/test/cmd/template-output.sh
generated
vendored
Executable file
@ -0,0 +1,250 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
run_template_output_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing --template support on commands"
|
||||
### Test global request timeout option
|
||||
# Pre-condition: no POD exists
|
||||
create_and_use_new_namespace
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command
|
||||
# check that create supports --template output
|
||||
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
|
||||
# Post-condition: valid-pod POD is created
|
||||
kubectl get "${kube_flags[@]}" pods -o json
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
||||
|
||||
# check that patch command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" patch --dry-run pods/valid-pod -p '{"patched":"value3"}' --type=merge --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'valid-pod:'
|
||||
|
||||
# check that label command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" label --dry-run pods/valid-pod label=value --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'valid-pod:'
|
||||
|
||||
# check that annotate command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" annotate --dry-run pods/valid-pod annotation=value --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'valid-pod:'
|
||||
|
||||
# check that apply command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" apply --dry-run -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'valid-pod:'
|
||||
|
||||
# check that create command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml --dry-run --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'valid-pod:'
|
||||
|
||||
# check that autoscale command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" autoscale --max=2 -f hack/testdata/scale-deploy-1.yaml --dry-run --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'scale-1:'
|
||||
|
||||
# check that expose command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" expose -f hack/testdata/redis-slave-replicaset.yaml --save-config --port=80 --target-port=8000 --dry-run --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'redis-slave:'
|
||||
|
||||
# check that convert command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" convert -f hack/testdata/deployment-revision1.yaml --output-version=apps/v1beta1 --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'nginx:'
|
||||
|
||||
# check that run command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" run --dry-run --template="{{ .metadata.name }}:" pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)')
|
||||
kube::test::if_has_string "${output_message}" 'pi:'
|
||||
|
||||
# check that taint command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" taint node 127.0.0.1 dedicated=foo:PreferNoSchedule --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" '127.0.0.1:'
|
||||
# untaint node
|
||||
kubectl taint node 127.0.0.1 dedicated-
|
||||
|
||||
# check that "apply set-last-applied" command supports --template output
|
||||
kubectl "${kube_flags[@]}" create -f test/e2e/testing-manifests/statefulset/cassandra/controller.yaml
|
||||
output_message=$(kubectl "${kube_flags[@]}" apply set-last-applied -f test/e2e/testing-manifests/statefulset/cassandra/controller.yaml --dry-run --create-annotation --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'cassandra:'
|
||||
|
||||
# check that "auth reconcile" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" auth reconcile --dry-run -f test/fixtures/pkg/kubectl/cmd/auth/rbac-resource-plus.yaml --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'testing-CR:testing-CRB:testing-RB:testing-R:'
|
||||
|
||||
# check that "create clusterrole" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create clusterrole --template="{{ .metadata.name }}:" --verb get myclusterrole --non-resource-url /logs/ --resource pods)
|
||||
kube::test::if_has_string "${output_message}" 'myclusterrole:'
|
||||
|
||||
# check that "create clusterrolebinding" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create clusterrolebinding foo --clusterrole=myclusterrole --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'foo:'
|
||||
|
||||
# check that "create configmap" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create configmap cm --dry-run --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'cm:'
|
||||
|
||||
# check that "create deployment" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create deployment deploy --image=nginx --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'deploy:'
|
||||
|
||||
# check that "create job" command supports --template output
|
||||
kubectl create "${kube_flags[@]}" -f - <<EOF
|
||||
apiVersion: batch/v1beta1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: pi
|
||||
spec:
|
||||
schedule: "*/10 * * * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
parent: "pi"
|
||||
spec:
|
||||
containers:
|
||||
- name: pi
|
||||
image: perl
|
||||
command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
|
||||
restartPolicy: OnFailure
|
||||
EOF
|
||||
output_message=$(kubectl "${kube_flags[@]}" create job foo --from=cronjob/pi --dry-run --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'foo:'
|
||||
|
||||
# check that "create namespace" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create ns bar --dry-run --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'bar:'
|
||||
|
||||
# check that "create namespace" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create rolebinding foo --clusterrole=myclusterrole --dry-run --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'foo:'
|
||||
|
||||
# check that "create role" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create role --dry-run --template="{{ .metadata.name }}:" --verb get myrole --resource pods)
|
||||
kube::test::if_has_string "${output_message}" 'myrole:'
|
||||
|
||||
# check that "create quota" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create quota foo --dry-run --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'foo:'
|
||||
|
||||
# check that "create priorityclass" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create priorityclass foo --dry-run --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'foo:'
|
||||
|
||||
# check that "create poddisruptionbudget" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create poddisruptionbudget foo --dry-run --selector=foo --min-available=1 --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'foo:'
|
||||
|
||||
# check that "create serviceaccount" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create serviceaccount foo --dry-run --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'foo:'
|
||||
|
||||
# check that "set env" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" set env pod/valid-pod --dry-run A=B --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'valid-pod:'
|
||||
|
||||
# check that "set image" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" set image pod/valid-pod --dry-run kubernetes-serve-hostname=nginx --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'valid-pod:'
|
||||
|
||||
# check that "set resources" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" set resources pod/valid-pod --limits=memory=256Mi --dry-run --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'valid-pod:'
|
||||
|
||||
# check that "set selector" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" set selector -f hack/testdata/kubernetes-service.yaml A=B --local --dry-run --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'kubernetes:'
|
||||
|
||||
# check that "set serviceaccount" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" set serviceaccount pod/valid-pod deployer --dry-run --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'valid-pod:'
|
||||
|
||||
# check that "set subject" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" set subject clusterrolebinding/foo --user=foo --dry-run --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'foo:'
|
||||
|
||||
# check that "create secret docker-registry" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create secret docker-registry foo --docker-username user --docker-password pass --docker-email foo@bar.baz --dry-run --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'foo:'
|
||||
|
||||
# check that "create secret generic" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create secret generic foo --from-literal=key1=value1 --dry-run --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'foo:'
|
||||
|
||||
# check that "create secret tls" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create secret tls --dry-run foo --key=hack/testdata/tls.key --cert=hack/testdata/tls.crt --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'foo:'
|
||||
|
||||
# check that "create service clusterip" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create service clusterip foo --dry-run --tcp=8080 --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'foo:'
|
||||
|
||||
# check that "create service externalname" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create service externalname foo --dry-run --external-name=bar --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'foo:'
|
||||
|
||||
# check that "create service loadbalancer" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create service loadbalancer foo --dry-run --tcp=8080 --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'foo:'
|
||||
|
||||
# check that "create service nodeport" command supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" create service nodeport foo --dry-run --tcp=8080 --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'foo:'
|
||||
|
||||
# check that "config view" ouputs "yaml" as its default output format
|
||||
output_message=$(kubectl "${kube_flags[@]}" config view)
|
||||
kube::test::if_has_string "${output_message}" 'kind: Config'
|
||||
|
||||
# check that "rollout pause" supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" rollout pause deploy/deploy --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'deploy:'
|
||||
|
||||
# check that "rollout history" supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" rollout history deploy/deploy --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'deploy:'
|
||||
|
||||
# check that "rollout resume" supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" rollout resume deploy/deploy --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'deploy:'
|
||||
|
||||
# check that "rollout undo" supports --template output
|
||||
output_message=$(kubectl "${kube_flags[@]}" rollout undo deploy/deploy --to-revision=1 --template="{{ .metadata.name }}:")
|
||||
kube::test::if_has_string "${output_message}" 'deploy:'
|
||||
|
||||
# check that "config view" command supports --template output
|
||||
# and that commands that set a default output (yaml in this case),
|
||||
# default to "go-template" as their output format when a --template
|
||||
# value is provided, but no explicit --output format is given.
|
||||
output_message=$(kubectl "${kube_flags[@]}" config view --template="{{ .kind }}:")
|
||||
kube::test::if_has_string "${output_message}" 'Config'
|
||||
|
||||
# check that running a command with both a --template flag and a
|
||||
# non-template --output prefers the non-template output value
|
||||
output_message=$(kubectl "${kube_flags[@]}" create configmap cm --dry-run --template="{{ .metadata.name }}:" --output yaml)
|
||||
kube::test::if_has_string "${output_message}" 'kind: ConfigMap'
|
||||
|
||||
# cleanup
|
||||
kubectl delete cronjob pi "${kube_flags[@]}"
|
||||
kubectl delete pods --all "${kube_flags[@]}"
|
||||
kubectl delete rc cassandra "${kube_flags[@]}"
|
||||
kubectl delete clusterrole myclusterrole "${kube_flags[@]}"
|
||||
kubectl delete clusterrolebinding foo "${kube_flags[@]}"
|
||||
kubectl delete deployment deploy "${kube_flags[@]}"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
69
vendor/k8s.io/kubernetes/test/cmd/version.sh
generated
vendored
Executable file
69
vendor/k8s.io/kubernetes/test/cmd/version.sh
generated
vendored
Executable file
@ -0,0 +1,69 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
########################################################
|
||||
# Kubectl version (--short, --client, --output) #
|
||||
########################################################
|
||||
run_kubectl_version_tests() {
|
||||
set -o nounset
|
||||
set -o errexit
|
||||
|
||||
kube::log::status "Testing kubectl version"
|
||||
TEMP="${KUBE_TEMP}"
|
||||
|
||||
kubectl get "${kube_flags[@]}" --raw /version
|
||||
|
||||
# create version files, one for the client, one for the server.
|
||||
# these are the files we will use to ensure that the remainder output is correct
|
||||
kube::test::version::object_to_file "Client" "" "${TEMP}/client_version_test"
|
||||
kube::test::version::object_to_file "Server" "" "${TEMP}/server_version_test"
|
||||
|
||||
kube::log::status "Testing kubectl version: check client only output matches expected output"
|
||||
kube::test::version::object_to_file "Client" "--client" "${TEMP}/client_only_version_test"
|
||||
kube::test::version::object_to_file "Client" "--client" "${TEMP}/server_client_only_version_test"
|
||||
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_only_version_test" "the flag '--client' shows correct client info"
|
||||
kube::test::version::diff_assert "${TEMP}/server_version_test" "ne" "${TEMP}/server_client_only_version_test" "the flag '--client' correctly has no server version info"
|
||||
|
||||
kube::log::status "Testing kubectl version: verify json output"
|
||||
kube::test::version::json_client_server_object_to_file "" "clientVersion" "${TEMP}/client_json_version_test"
|
||||
kube::test::version::json_client_server_object_to_file "" "serverVersion" "${TEMP}/server_json_version_test"
|
||||
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_json_version_test" "--output json has correct client info"
|
||||
kube::test::version::diff_assert "${TEMP}/server_version_test" "eq" "${TEMP}/server_json_version_test" "--output json has correct server info"
|
||||
|
||||
kube::log::status "Testing kubectl version: verify json output using additional --client flag does not contain serverVersion"
|
||||
kube::test::version::json_client_server_object_to_file "--client" "clientVersion" "${TEMP}/client_only_json_version_test"
|
||||
kube::test::version::json_client_server_object_to_file "--client" "serverVersion" "${TEMP}/server_client_only_json_version_test"
|
||||
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_only_json_version_test" "--client --output json has correct client info"
|
||||
kube::test::version::diff_assert "${TEMP}/server_version_test" "ne" "${TEMP}/server_client_only_json_version_test" "--client --output json has no server info"
|
||||
|
||||
kube::log::status "Testing kubectl version: compare json output using additional --short flag"
|
||||
kube::test::version::json_client_server_object_to_file "--short" "clientVersion" "${TEMP}/client_short_json_version_test"
|
||||
kube::test::version::json_client_server_object_to_file "--short" "serverVersion" "${TEMP}/server_short_json_version_test"
|
||||
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_short_json_version_test" "--short --output client json info is equal to non short result"
|
||||
kube::test::version::diff_assert "${TEMP}/server_version_test" "eq" "${TEMP}/server_short_json_version_test" "--short --output server json info is equal to non short result"
|
||||
|
||||
kube::log::status "Testing kubectl version: compare json output with yaml output"
|
||||
kube::test::version::json_object_to_file "" "${TEMP}/client_server_json_version_test"
|
||||
kube::test::version::yaml_object_to_file "" "${TEMP}/client_server_yaml_version_test"
|
||||
kube::test::version::diff_assert "${TEMP}/client_server_json_version_test" "eq" "${TEMP}/client_server_yaml_version_test" "--output json/yaml has identical information"
|
||||
|
||||
set +o nounset
|
||||
set +o errexit
|
||||
}
|
2
vendor/k8s.io/kubernetes/test/conformance/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/test/conformance/BUILD
generated
vendored
@ -34,7 +34,7 @@ genrule(
|
||||
"//test/e2e_node:all-srcs",
|
||||
],
|
||||
outs = ["conformance.txt"],
|
||||
cmd = "./$(location :conformance) $(locations //test/e2e:all-srcs) $(locations //test/e2e_node:all-srcs) > $@",
|
||||
cmd = "./$(location :conformance) $(locations //test/e2e:all-srcs) > $@",
|
||||
message = "Listing all conformance tests.",
|
||||
tools = [":conformance"],
|
||||
)
|
||||
|
4
vendor/k8s.io/kubernetes/test/conformance/OWNERS
generated
vendored
4
vendor/k8s.io/kubernetes/test/conformance/OWNERS
generated
vendored
@ -2,6 +2,10 @@
|
||||
reviewers:
|
||||
- mml
|
||||
- cheftako
|
||||
- spiffxp
|
||||
approvers:
|
||||
- mml
|
||||
- cheftako
|
||||
labels:
|
||||
- area/conformance
|
||||
- sig/architecture
|
||||
|
2
vendor/k8s.io/kubernetes/test/conformance/cf_header.md
generated
vendored
2
vendor/k8s.io/kubernetes/test/conformance/cf_header.md
generated
vendored
@ -2,7 +2,7 @@
|
||||
|
||||
## **Summary**
|
||||
This document provides a summary of the tests included in the Kubernetes conformance test suite.
|
||||
Each test lists a set of formal requirements that a platform that meets conformance requirements must adhere to.
|
||||
Each test lists a set of formal requirements that a platform that meets conformance requirements must adhere to.
|
||||
|
||||
The tests are a subset of the "e2e" tests that make up the Kubernetes testing infrastructure.
|
||||
Each test is identified by the presence of the `[Conformance]` keyword in the ginkgo descriptive function calls.
|
||||
|
3
vendor/k8s.io/kubernetes/test/conformance/testdata/OWNERS
generated
vendored
3
vendor/k8s.io/kubernetes/test/conformance/testdata/OWNERS
generated
vendored
@ -7,3 +7,6 @@ reviewers:
|
||||
approvers:
|
||||
- bgrant0607
|
||||
- smarterclayton
|
||||
labels:
|
||||
- area/conformance
|
||||
- sig/architecture
|
||||
|
102
vendor/k8s.io/kubernetes/test/conformance/testdata/conformance.txt
generated
vendored
102
vendor/k8s.io/kubernetes/test/conformance/testdata/conformance.txt
generated
vendored
@ -6,6 +6,8 @@ test/e2e/apimachinery/garbage_collector.go: "should orphan RS created by deploym
|
||||
test/e2e/apimachinery/garbage_collector.go: "should keep the rc around until all its pods are deleted if the deleteOptions says so"
|
||||
test/e2e/apimachinery/garbage_collector.go: "should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted"
|
||||
test/e2e/apimachinery/garbage_collector.go: "should not be blocked by dependency circle"
|
||||
test/e2e/apimachinery/namespace.go: "should ensure that all pods are removed when a namespace is deleted"
|
||||
test/e2e/apimachinery/namespace.go: "should ensure that all services are removed when a namespace is deleted"
|
||||
test/e2e/apimachinery/watch.go: "should observe add, update, and delete watch notifications on configmaps"
|
||||
test/e2e/apimachinery/watch.go: "should be able to start watching from a specific resource version"
|
||||
test/e2e/apimachinery/watch.go: "should be able to restart watching from the last resource version observed by the previous watch"
|
||||
@ -15,8 +17,16 @@ test/e2e/apps/daemon_set.go: "should run and stop complex daemon"
|
||||
test/e2e/apps/daemon_set.go: "should retry creating failed daemon pods"
|
||||
test/e2e/apps/daemon_set.go: "should update pod when spec was updated and update strategy is RollingUpdate"
|
||||
test/e2e/apps/daemon_set.go: "should rollback without unnecessary restarts"
|
||||
test/e2e/apps/deployment.go: "RollingUpdateDeployment should delete old pods and create new ones"
|
||||
test/e2e/apps/deployment.go: "RecreateDeployment should delete old pods and create new ones"
|
||||
test/e2e/apps/deployment.go: "deployment should delete old replica sets"
|
||||
test/e2e/apps/deployment.go: "deployment should support rollover"
|
||||
test/e2e/apps/deployment.go: "deployment should support proportional scaling"
|
||||
test/e2e/apps/rc.go: "should serve a basic image on each replica with a public image"
|
||||
test/e2e/apps/rc.go: "should adopt matching pods on creation"
|
||||
test/e2e/apps/rc.go: "should release no longer matching pods"
|
||||
test/e2e/apps/replica_set.go: "should serve a basic image on each replica with a public image"
|
||||
test/e2e/apps/replica_set.go: "should adopt matching pods on creation and release no longer matching pods"
|
||||
test/e2e/apps/statefulset.go: "should perform rolling updates and roll backs of template modifications"
|
||||
test/e2e/apps/statefulset.go: "should perform canary updates and phased rolling updates of template modifications"
|
||||
test/e2e/apps/statefulset.go: "Scaling should happen in predictable order and halt if any stateful pod is unhealthy"
|
||||
@ -33,6 +43,7 @@ test/e2e/common/configmap_volume.go: "should be consumable from pods in volume w
|
||||
test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with mappings and Item mode set"
|
||||
test/e2e/common/configmap_volume.go: "should be consumable from pods in volume with mappings as non-root"
|
||||
test/e2e/common/configmap_volume.go: "updates should be reflected in volume"
|
||||
test/e2e/common/configmap_volume.go: "binary data should be reflected in volume"
|
||||
test/e2e/common/configmap_volume.go: "optional updates should be reflected in volume"
|
||||
test/e2e/common/configmap_volume.go: "should be consumable in multiple volumes in the same pod"
|
||||
test/e2e/common/container_probe.go: "with readiness probe should not be ready before initial delay and never restart"
|
||||
@ -80,7 +91,20 @@ test/e2e/common/expansion.go: "should allow composing env vars into new env vars
|
||||
test/e2e/common/expansion.go: "should allow substituting values in a container's command"
|
||||
test/e2e/common/expansion.go: "should allow substituting values in a container's args"
|
||||
test/e2e/common/host_path.go: "should give a volume the correct mode"
|
||||
test/e2e/common/init_container.go: "should invoke init containers on a RestartNever pod"
|
||||
test/e2e/common/init_container.go: "should invoke init containers on a RestartAlways pod"
|
||||
test/e2e/common/init_container.go: "should not start app containers if init containers fail on a RestartAlways pod"
|
||||
test/e2e/common/init_container.go: "should not start app containers and fail the pod if init containers fail on a RestartNever pod"
|
||||
test/e2e/common/kubelet.go: "should print the output to logs"
|
||||
test/e2e/common/kubelet.go: "should have an terminated reason"
|
||||
test/e2e/common/kubelet.go: "should be possible to delete"
|
||||
test/e2e/common/kubelet.go: "should write entries to /etc/hosts"
|
||||
test/e2e/common/kubelet.go: "should not write to root filesystem"
|
||||
test/e2e/common/kubelet_etc_hosts.go: "should test kubelet managed /etc/hosts file"
|
||||
test/e2e/common/lifecycle_hook.go: "should execute poststart exec hook properly"
|
||||
test/e2e/common/lifecycle_hook.go: "should execute prestop exec hook properly"
|
||||
test/e2e/common/lifecycle_hook.go: "should execute poststart http hook properly"
|
||||
test/e2e/common/lifecycle_hook.go: "should execute prestop http hook properly"
|
||||
test/e2e/common/networking.go: "should function for intra-pod communication: http"
|
||||
test/e2e/common/networking.go: "should function for intra-pod communication: udp"
|
||||
test/e2e/common/networking.go: "should function for node-pod communication: http"
|
||||
@ -90,34 +114,37 @@ test/e2e/common/pods.go: "should be submitted and removed"
|
||||
test/e2e/common/pods.go: "should be updated"
|
||||
test/e2e/common/pods.go: "should allow activeDeadlineSeconds to be updated"
|
||||
test/e2e/common/pods.go: "should contain environment variables for services"
|
||||
test/e2e/common/projected.go: "should be consumable from pods in volume"
|
||||
test/e2e/common/projected.go: "should be consumable from pods in volume with defaultMode set"
|
||||
test/e2e/common/projected.go: "should be consumable from pods in volume as non-root with defaultMode and fsGroup set"
|
||||
test/e2e/common/projected.go: "should be consumable from pods in volume with mappings"
|
||||
test/e2e/common/projected.go: "should be consumable from pods in volume with mappings and Item Mode set"
|
||||
test/e2e/common/projected.go: "should be consumable in multiple volumes in a pod"
|
||||
test/e2e/common/projected.go: "optional updates should be reflected in volume"
|
||||
test/e2e/common/projected.go: "should be consumable from pods in volume"
|
||||
test/e2e/common/projected.go: "should be consumable from pods in volume with defaultMode set"
|
||||
test/e2e/common/projected.go: "should be consumable from pods in volume as non-root"
|
||||
test/e2e/common/projected.go: "should be consumable from pods in volume with mappings"
|
||||
test/e2e/common/projected.go: "should be consumable from pods in volume with mappings and Item mode set"
|
||||
test/e2e/common/projected.go: "should be consumable from pods in volume with mappings as non-root"
|
||||
test/e2e/common/projected.go: "updates should be reflected in volume"
|
||||
test/e2e/common/projected.go: "optional updates should be reflected in volume"
|
||||
test/e2e/common/projected.go: "should be consumable in multiple volumes in the same pod"
|
||||
test/e2e/common/projected.go: "should provide podname only"
|
||||
test/e2e/common/projected.go: "should set DefaultMode on files"
|
||||
test/e2e/common/projected.go: "should set mode on item file"
|
||||
test/e2e/common/projected.go: "should update labels on modification"
|
||||
test/e2e/common/projected.go: "should update annotations on modification"
|
||||
test/e2e/common/projected.go: "should provide container's cpu limit"
|
||||
test/e2e/common/projected.go: "should provide container's memory limit"
|
||||
test/e2e/common/projected.go: "should provide container's cpu request"
|
||||
test/e2e/common/projected.go: "should provide container's memory request"
|
||||
test/e2e/common/projected.go: "should provide node allocatable (cpu) as default cpu limit if the limit is not set"
|
||||
test/e2e/common/projected.go: "should provide node allocatable (memory) as default memory limit if the limit is not set"
|
||||
test/e2e/common/projected.go: "should project all components that make up the projection API"
|
||||
test/e2e/common/pods.go: "should support remote command execution over websockets"
|
||||
test/e2e/common/pods.go: "should support retrieving logs from the container over websockets"
|
||||
test/e2e/common/projected_combined.go: "should project all components that make up the projection API"
|
||||
test/e2e/common/projected_configmap.go: "should be consumable from pods in volume"
|
||||
test/e2e/common/projected_configmap.go: "should be consumable from pods in volume with defaultMode set"
|
||||
test/e2e/common/projected_configmap.go: "should be consumable from pods in volume as non-root"
|
||||
test/e2e/common/projected_configmap.go: "should be consumable from pods in volume with mappings"
|
||||
test/e2e/common/projected_configmap.go: "should be consumable from pods in volume with mappings and Item mode set"
|
||||
test/e2e/common/projected_configmap.go: "should be consumable from pods in volume with mappings as non-root"
|
||||
test/e2e/common/projected_configmap.go: "updates should be reflected in volume"
|
||||
test/e2e/common/projected_configmap.go: "optional updates should be reflected in volume"
|
||||
test/e2e/common/projected_configmap.go: "should be consumable in multiple volumes in the same pod"
|
||||
test/e2e/common/projected_downwardapi.go: "should provide podname only"
|
||||
test/e2e/common/projected_downwardapi.go: "should set DefaultMode on files"
|
||||
test/e2e/common/projected_downwardapi.go: "should set mode on item file"
|
||||
test/e2e/common/projected_downwardapi.go: "should update labels on modification"
|
||||
test/e2e/common/projected_downwardapi.go: "should update annotations on modification"
|
||||
test/e2e/common/projected_downwardapi.go: "should provide container's cpu limit"
|
||||
test/e2e/common/projected_downwardapi.go: "should provide container's memory limit"
|
||||
test/e2e/common/projected_downwardapi.go: "should provide container's cpu request"
|
||||
test/e2e/common/projected_downwardapi.go: "should provide container's memory request"
|
||||
test/e2e/common/projected_downwardapi.go: "should provide node allocatable (cpu) as default cpu limit if the limit is not set"
|
||||
test/e2e/common/projected_downwardapi.go: "should provide node allocatable (memory) as default memory limit if the limit is not set"
|
||||
test/e2e/common/projected_secret.go: "should be consumable from pods in volume"
|
||||
test/e2e/common/projected_secret.go: "should be consumable from pods in volume with defaultMode set"
|
||||
test/e2e/common/projected_secret.go: "should be consumable from pods in volume as non-root with defaultMode and fsGroup set"
|
||||
test/e2e/common/projected_secret.go: "should be consumable from pods in volume with mappings"
|
||||
test/e2e/common/projected_secret.go: "should be consumable from pods in volume with mappings and Item Mode set"
|
||||
test/e2e/common/projected_secret.go: "should be consumable in multiple volumes in a pod"
|
||||
test/e2e/common/projected_secret.go: "optional updates should be reflected in volume"
|
||||
test/e2e/common/runtime.go: "should run with the expected status"
|
||||
test/e2e/common/secrets.go: "should be consumable from pods in env vars"
|
||||
test/e2e/common/secrets.go: "should be consumable via the environment"
|
||||
test/e2e/common/secrets_volume.go: "should be consumable from pods in volume"
|
||||
@ -125,6 +152,7 @@ test/e2e/common/secrets_volume.go: "should be consumable from pods in volume wit
|
||||
test/e2e/common/secrets_volume.go: "should be consumable from pods in volume as non-root with defaultMode and fsGroup set"
|
||||
test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with mappings"
|
||||
test/e2e/common/secrets_volume.go: "should be consumable from pods in volume with mappings and Item Mode set"
|
||||
test/e2e/common/secrets_volume.go: "should be able to mount in a volume regardless of a different secret existing with same name in different namespace"
|
||||
test/e2e/common/secrets_volume.go: "should be consumable in multiple volumes in a pod"
|
||||
test/e2e/common/secrets_volume.go: "optional updates should be reflected in volume"
|
||||
test/e2e/kubectl/kubectl.go: "should create and stop a replication controller"
|
||||
@ -160,18 +188,14 @@ test/e2e/network/service.go: "should serve multiport endpoints from pods"
|
||||
test/e2e/network/service_latency.go: "should not be very high"
|
||||
test/e2e/node/events.go: "should be sent by kubelets and the scheduler about pods scheduling and running"
|
||||
test/e2e/node/pods.go: "should be submitted and removed"
|
||||
test/e2e/node/pods.go: "should be submitted and removed"
|
||||
test/e2e/node/pre_stop.go: "should call prestop when killing a pod"
|
||||
test/e2e/scheduling/predicates.go: "validates resource limits of pods that are allowed to run"
|
||||
test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if not matching"
|
||||
test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if matching"
|
||||
test/e2e_node/kubelet_test.go: "it should print the output to logs"
|
||||
test/e2e_node/kubelet_test.go: "it should not write to root filesystem"
|
||||
test/e2e_node/lifecycle_hook_test.go: "should execute poststart exec hook properly"
|
||||
test/e2e_node/lifecycle_hook_test.go: "should execute prestop exec hook properly"
|
||||
test/e2e_node/lifecycle_hook_test.go: "should execute poststart http hook properly"
|
||||
test/e2e_node/lifecycle_hook_test.go: "should execute prestop http hook properly"
|
||||
test/e2e_node/mirror_pod_test.go: "should be updated when static pod updated"
|
||||
test/e2e_node/mirror_pod_test.go: "should be recreated when mirror pod gracefully deleted"
|
||||
test/e2e_node/mirror_pod_test.go: "should be recreated when mirror pod forcibly deleted"
|
||||
test/e2e_node/runtime_conformance_test.go: "it should run with the expected status"
|
||||
test/e2e/storage/empty_dir_wrapper.go: "should not conflict"
|
||||
test/e2e/storage/empty_dir_wrapper.go: "should not cause race condition when used for configmaps"
|
||||
test/e2e/storage/subpath.go: "should support subpaths with secret pod"
|
||||
test/e2e/storage/subpath.go: "should support subpaths with configmap pod"
|
||||
test/e2e/storage/subpath.go: "should support subpaths with configmap pod with mountPath of existing file"
|
||||
test/e2e/storage/subpath.go: "should support subpaths with downward pod"
|
||||
test/e2e/storage/subpath.go: "should support subpaths with projected pod"
|
||||
|
35
vendor/k8s.io/kubernetes/test/e2e/BUILD
generated
vendored
35
vendor/k8s.io/kubernetes/test/e2e/BUILD
generated
vendored
@ -18,6 +18,9 @@ go_test(
|
||||
"//test/e2e/autoscaling:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/framework/viperconfig:go_default_library",
|
||||
"//test/e2e/generated:go_default_library",
|
||||
"//test/e2e/instrumentation:go_default_library",
|
||||
"//test/e2e/kubectl:go_default_library",
|
||||
"//test/e2e/lifecycle:go_default_library",
|
||||
@ -43,33 +46,33 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/test/e2e",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/logs:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/plugin/pkg/client/auth:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/generated:go_default_library",
|
||||
"//test/e2e/framework/providers/aws:go_default_library",
|
||||
"//test/e2e/framework/providers/azure:go_default_library",
|
||||
"//test/e2e/framework/providers/gce:go_default_library",
|
||||
"//test/e2e/framework/providers/kubemark:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/plugin/pkg/client/auth:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
83
vendor/k8s.io/kubernetes/test/e2e/apimachinery/BUILD
generated
vendored
83
vendor/k8s.io/kubernetes/test/e2e/apimachinery/BUILD
generated
vendored
@ -11,6 +11,7 @@ go_library(
|
||||
"aggregator.go",
|
||||
"certs.go",
|
||||
"chunking.go",
|
||||
"crd_conversion_webhook.go",
|
||||
"crd_watch.go",
|
||||
"custom_resource_definition.go",
|
||||
"etcd_failure.go",
|
||||
@ -26,9 +27,49 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/test/e2e/apimachinery",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/rbac:go_default_library",
|
||||
"//pkg/apis/rbac/v1beta1:go_default_library",
|
||||
"//pkg/printers:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/authorization/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/test/integration:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
|
||||
"//test/e2e/apps:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
@ -37,44 +78,6 @@ go_library(
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/authorization/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//vendor/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
128
vendor/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go
generated
vendored
128
vendor/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go
generated
vendored
@ -32,14 +32,13 @@ import (
|
||||
unstructuredv1 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/client-go/discovery"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
apiregistrationv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"
|
||||
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
rbacapi "k8s.io/kubernetes/pkg/apis/rbac"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
rbacv1beta1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
|
||||
@ -47,7 +46,7 @@ import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var serverAggregatorVersion = utilversion.MustParseSemantic("v1.7.0")
|
||||
var serverAggregatorVersion = utilversion.MustParseSemantic("v1.10.0")
|
||||
|
||||
var _ = SIGDescribe("Aggregator", func() {
|
||||
var ns string
|
||||
@ -73,12 +72,12 @@ var _ = SIGDescribe("Aggregator", func() {
|
||||
aggrclient = f.AggregatorClient
|
||||
})
|
||||
|
||||
It("Should be able to support the 1.7 Sample API Server using the current Aggregator", func() {
|
||||
It("Should be able to support the 1.10 Sample API Server using the current Aggregator", func() {
|
||||
// Make sure the relevant provider supports Agggregator
|
||||
framework.SkipUnlessServerVersionGTE(serverAggregatorVersion, f.ClientSet.Discovery())
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
// Testing a 1.7 version of the sample-apiserver
|
||||
// Testing a 1.10 version of the sample-apiserver
|
||||
TestSampleAPIServer(f, imageutils.GetE2EImage(imageutils.APIServer))
|
||||
})
|
||||
})
|
||||
@ -87,34 +86,29 @@ func cleanTest(client clientset.Interface, aggrclient *aggregatorclient.Clientse
|
||||
// delete the APIService first to avoid causing discovery errors
|
||||
_ = aggrclient.ApiregistrationV1beta1().APIServices().Delete("v1alpha1.wardle.k8s.io", nil)
|
||||
|
||||
_ = client.AppsV1().Deployments(namespace).Delete("sample-apiserver", nil)
|
||||
_ = client.AppsV1().Deployments(namespace).Delete("sample-apiserver-deployment", nil)
|
||||
_ = client.CoreV1().Secrets(namespace).Delete("sample-apiserver-secret", nil)
|
||||
_ = client.CoreV1().Services(namespace).Delete("sample-api", nil)
|
||||
_ = client.CoreV1().ServiceAccounts(namespace).Delete("sample-apiserver", nil)
|
||||
_ = client.RbacV1beta1().RoleBindings("kube-system").Delete("wardler-auth-reader", nil)
|
||||
_ = client.RbacV1beta1().ClusterRoles().Delete("wardler", nil)
|
||||
_ = client.RbacV1beta1().ClusterRoleBindings().Delete("wardler:"+namespace+":anonymous", nil)
|
||||
_ = client.RbacV1beta1().ClusterRoleBindings().Delete("wardler:"+namespace+":auth-delegator", nil)
|
||||
_ = client.RbacV1beta1().ClusterRoles().Delete("sample-apiserver-reader", nil)
|
||||
_ = client.RbacV1beta1().ClusterRoleBindings().Delete("wardler:"+namespace+":sample-apiserver-reader", nil)
|
||||
}
|
||||
|
||||
// A basic test if the sample-apiserver code from 1.7 and compiled against 1.7
|
||||
// A basic test if the sample-apiserver code from 1.10 and compiled against 1.10
|
||||
// will work on the current Aggregator/API-Server.
|
||||
func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
By("Registering the sample API server.")
|
||||
client := f.ClientSet
|
||||
restClient := client.Discovery().RESTClient()
|
||||
iclient := f.InternalClientset
|
||||
aggrclient := f.AggregatorClient
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
context := setupServerCert(namespace, "sample-api")
|
||||
if framework.ProviderIs("gke") {
|
||||
// kubectl create clusterrolebinding user-cluster-admin-binding --clusterrole=cluster-admin --user=user@domain.com
|
||||
authenticated := rbacv1beta1.Subject{Kind: rbacv1beta1.GroupKind, Name: user.AllAuthenticated}
|
||||
framework.BindClusterRole(client.RbacV1beta1(), "cluster-admin", namespace, authenticated)
|
||||
}
|
||||
|
||||
// kubectl create -f namespace.yaml
|
||||
// NOTE: aggregated apis should generally be set up in there own namespace. As the test framework is setting up a new namespace, we are just using that.
|
||||
// NOTE: aggregated apis should generally be set up in their own namespace. As the test framework is setting up a new namespace, we are just using that.
|
||||
|
||||
// kubectl create -f secret.yaml
|
||||
secretName := "sample-apiserver-secret"
|
||||
@ -131,9 +125,61 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
_, err := client.CoreV1().Secrets(namespace).Create(secret)
|
||||
framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace)
|
||||
|
||||
// kubectl create -f clusterrole.yaml
|
||||
_, err = client.RbacV1beta1().ClusterRoles().Create(&rbacv1beta1.ClusterRole{
|
||||
// role for listing ValidatingWebhookConfiguration/MutatingWebhookConfiguration/Namespaces
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "sample-apiserver-reader"},
|
||||
Rules: []rbacv1beta1.PolicyRule{
|
||||
rbacv1beta1helpers.NewRule("list").Groups("").Resources("namespaces").RuleOrDie(),
|
||||
rbacv1beta1helpers.NewRule("list").Groups("admissionregistration.k8s.io").Resources("*").RuleOrDie(),
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "creating cluster role %s", "sample-apiserver-reader")
|
||||
|
||||
_, err = client.RbacV1beta1().ClusterRoleBindings().Create(&rbacv1beta1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "wardler:" + namespace + ":sample-apiserver-reader",
|
||||
},
|
||||
RoleRef: rbacv1beta1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "sample-apiserver-reader",
|
||||
},
|
||||
Subjects: []rbacv1beta1.Subject{
|
||||
{
|
||||
APIGroup: "",
|
||||
Kind: "ServiceAccount",
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "creating cluster role binding %s", "wardler:"+namespace+":sample-apiserver-reader")
|
||||
|
||||
// kubectl create -f authDelegator.yaml
|
||||
_, err = client.RbacV1beta1().ClusterRoleBindings().Create(&rbacv1beta1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "wardler:" + namespace + ":auth-delegator",
|
||||
},
|
||||
RoleRef: rbacv1beta1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "system:auth-delegator",
|
||||
},
|
||||
Subjects: []rbacv1beta1.Subject{
|
||||
{
|
||||
APIGroup: "",
|
||||
Kind: "ServiceAccount",
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "creating cluster role binding %s", "wardler:"+namespace+":auth-delegator")
|
||||
|
||||
// kubectl create -f deploy.yaml
|
||||
deploymentName := "sample-apiserver-deployment"
|
||||
etcdImage := "quay.io/coreos/etcd:v3.2.18"
|
||||
etcdImage := "quay.io/coreos/etcd:v3.2.24"
|
||||
podLabels := map[string]string{"app": "sample-apiserver", "apiserver": "true"}
|
||||
replicas := int32(1)
|
||||
zero := int64(0)
|
||||
@ -230,48 +276,6 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
_, err = client.CoreV1().ServiceAccounts(namespace).Create(sa)
|
||||
framework.ExpectNoError(err, "creating service account %s in namespace %s", "sample-apiserver", namespace)
|
||||
|
||||
// kubectl create -f authDelegator.yaml
|
||||
_, err = client.RbacV1beta1().ClusterRoleBindings().Create(&rbacv1beta1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "wardler:" + namespace + ":anonymous",
|
||||
},
|
||||
RoleRef: rbacv1beta1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "wardler",
|
||||
},
|
||||
Subjects: []rbacv1beta1.Subject{
|
||||
{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "User",
|
||||
Name: namespace + ":anonymous",
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "creating cluster role binding %s", "wardler:"+namespace+":anonymous")
|
||||
|
||||
// kubectl create -f role.yaml
|
||||
resourceRule, err := rbacapi.NewRule("create", "delete", "deletecollection", "get", "list", "patch", "update", "watch").Groups("wardle.k8s.io").Resources("flunders").Rule()
|
||||
framework.ExpectNoError(err, "creating cluster resource rule")
|
||||
urlRule, err := rbacapi.NewRule("get").URLs("*").Rule()
|
||||
framework.ExpectNoError(err, "creating cluster url rule")
|
||||
err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
roleLabels := map[string]string{"kubernetes.io/bootstrapping": "wardle-default"}
|
||||
role := rbacapi.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "wardler",
|
||||
Labels: roleLabels,
|
||||
},
|
||||
Rules: []rbacapi.PolicyRule{resourceRule, urlRule},
|
||||
}
|
||||
_, err = iclient.Rbac().ClusterRoles().Create(&role)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "creating cluster role wardler - may not have permissions")
|
||||
|
||||
// kubectl create -f auth-reader.yaml
|
||||
_, err = client.RbacV1beta1().RoleBindings("kube-system").Create(&rbacv1beta1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -297,7 +301,7 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
|
||||
// Wait for the extension apiserver to be up and healthy
|
||||
// kubectl get deployments -n <aggregated-api-namespace> && status == Running
|
||||
// NOTE: aggregated apis should generally be set up in there own namespace (<aggregated-api-namespace>). As the test framework
|
||||
// NOTE: aggregated apis should generally be set up in their own namespace (<aggregated-api-namespace>). As the test framework
|
||||
// is setting up a new namespace, we are just using that.
|
||||
err = framework.WaitForDeploymentComplete(client, deployment)
|
||||
framework.ExpectNoError(err, "deploying extension apiserver in namespace %s", namespace)
|
||||
@ -436,7 +440,7 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
unstruct := &unstructuredv1.Unstructured{}
|
||||
err = unstruct.UnmarshalJSON(jsonFlunder)
|
||||
framework.ExpectNoError(err, "unmarshalling test-flunder as unstructured for create using dynamic client")
|
||||
unstruct, err = dynamicClient.Create(unstruct)
|
||||
unstruct, err = dynamicClient.Create(unstruct, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "listing flunders using dynamic client")
|
||||
|
||||
// kubectl get flunders
|
||||
|
104
vendor/k8s.io/kubernetes/test/e2e/apimachinery/chunking.go
generated
vendored
104
vendor/k8s.io/kubernetes/test/e2e/apimachinery/chunking.go
generated
vendored
@ -17,14 +17,20 @@ limitations under the License.
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
@ -34,13 +40,12 @@ const numberOfTotalResources = 400
|
||||
var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
f := framework.NewDefaultFramework("chunking")
|
||||
|
||||
It("should return chunks of results for list calls", func() {
|
||||
BeforeEach(func() {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
client := c.CoreV1().PodTemplates(ns)
|
||||
|
||||
By("creating a large number of resources")
|
||||
workqueue.Parallelize(20, numberOfTotalResources, func(i int) {
|
||||
workqueue.ParallelizeUntil(context.TODO(), 20, numberOfTotalResources, func(i int) {
|
||||
for tries := 3; tries >= 0; tries-- {
|
||||
_, err := client.Create(&v1.PodTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -61,7 +66,12 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
}
|
||||
Fail("Unable to create template %d, exiting", i)
|
||||
})
|
||||
})
|
||||
|
||||
It("should return chunks of results for list calls", func() {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
client := c.CoreV1().PodTemplates(ns)
|
||||
By("retrieving those results in paged fashion several times")
|
||||
for i := 0; i < 3; i++ {
|
||||
opts := metav1.ListOptions{}
|
||||
@ -70,20 +80,14 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
for {
|
||||
opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1)
|
||||
list, err := client.List(opts)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||
// TODO: kops PR job is still using etcd2, which prevents this feature from working. Remove this check when kops is upgraded to etcd3
|
||||
if len(list.Items) > int(opts.Limit) {
|
||||
framework.Skipf("ERROR: This cluster does not support chunking, which means it is running etcd2 and not supported.")
|
||||
}
|
||||
Expect(len(list.Items)).To(BeNumerically("<=", opts.Limit))
|
||||
|
||||
if len(lastRV) == 0 {
|
||||
lastRV = list.ResourceVersion
|
||||
}
|
||||
if lastRV != list.ResourceVersion {
|
||||
Expect(list.ResourceVersion).To(Equal(lastRV))
|
||||
}
|
||||
Expect(list.ResourceVersion).To(Equal(lastRV))
|
||||
for _, item := range list.Items {
|
||||
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
|
||||
found++
|
||||
@ -97,8 +101,82 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
}
|
||||
|
||||
By("retrieving those results all at once")
|
||||
list, err := client.List(metav1.ListOptions{Limit: numberOfTotalResources + 1})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
opts := metav1.ListOptions{Limit: numberOfTotalResources + 1}
|
||||
list, err := client.List(opts)
|
||||
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||
Expect(list.Items).To(HaveLen(numberOfTotalResources))
|
||||
})
|
||||
|
||||
It("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent", func() {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
client := c.CoreV1().PodTemplates(ns)
|
||||
|
||||
By("retrieving the first page")
|
||||
oneTenth := int64(numberOfTotalResources / 10)
|
||||
opts := metav1.ListOptions{}
|
||||
opts.Limit = oneTenth
|
||||
list, err := client.List(opts)
|
||||
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||
firstToken := list.Continue
|
||||
firstRV := list.ResourceVersion
|
||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken)
|
||||
|
||||
By("retrieving the second page until the token expires")
|
||||
opts.Continue = firstToken
|
||||
var inconsistentToken string
|
||||
wait.Poll(20*time.Second, 2*storagebackend.DefaultCompactInterval, func() (bool, error) {
|
||||
_, err := client.List(opts)
|
||||
if err == nil {
|
||||
framework.Logf("Token %s has not expired yet", firstToken)
|
||||
return false, nil
|
||||
}
|
||||
if err != nil && !errors.IsResourceExpired(err) {
|
||||
return false, err
|
||||
}
|
||||
framework.Logf("got error %s", err)
|
||||
status, ok := err.(errors.APIStatus)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("expect error to implement the APIStatus interface, got %v", reflect.TypeOf(err))
|
||||
}
|
||||
inconsistentToken = status.Status().ListMeta.Continue
|
||||
if len(inconsistentToken) == 0 {
|
||||
return false, fmt.Errorf("expect non empty continue token")
|
||||
}
|
||||
framework.Logf("Retrieved inconsistent continue %s", inconsistentToken)
|
||||
return true, nil
|
||||
})
|
||||
|
||||
By("retrieving the second page again with the token received with the error message")
|
||||
opts.Continue = inconsistentToken
|
||||
list, err = client.List(opts)
|
||||
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given inconsistent continue token %s and limit: %d", ns, opts.Continue, opts.Limit)
|
||||
Expect(list.ResourceVersion).ToNot(Equal(firstRV))
|
||||
Expect(len(list.Items)).To(BeNumerically("==", opts.Limit))
|
||||
found := oneTenth
|
||||
for _, item := range list.Items {
|
||||
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
|
||||
found++
|
||||
}
|
||||
|
||||
By("retrieving all remaining pages")
|
||||
opts.Continue = list.Continue
|
||||
lastRV := list.ResourceVersion
|
||||
for {
|
||||
list, err := client.List(opts)
|
||||
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||
Expect(len(list.Items)).To(BeNumerically("<=", opts.Limit))
|
||||
Expect(list.ResourceVersion).To(Equal(lastRV))
|
||||
for _, item := range list.Items {
|
||||
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
|
||||
found++
|
||||
}
|
||||
if len(list.Continue) == 0 {
|
||||
break
|
||||
}
|
||||
opts.Continue = list.Continue
|
||||
}
|
||||
Expect(found).To(BeNumerically("==", numberOfTotalResources))
|
||||
})
|
||||
})
|
||||
|
396
vendor/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go
generated
vendored
Normal file
396
vendor/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go
generated
vendored
Normal file
@ -0,0 +1,396 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
"k8s.io/apiextensions-apiserver/test/integration"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
_ "github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
secretCRDName = "sample-custom-resource-conversion-webhook-secret"
|
||||
deploymentCRDName = "sample-crd-conversion-webhook-deployment"
|
||||
serviceCRDName = "e2e-test-crd-conversion-webhook"
|
||||
roleBindingCRDName = "crd-conversion-webhook-auth-reader"
|
||||
)
|
||||
|
||||
var serverCRDConversionWebhookVersion = utilversion.MustParseSemantic("v1.13.0-alpha")
|
||||
|
||||
var apiVersions = []v1beta1.CustomResourceDefinitionVersion{
|
||||
{
|
||||
Name: "v1",
|
||||
Served: true,
|
||||
Storage: true,
|
||||
},
|
||||
{
|
||||
Name: "v2",
|
||||
Served: true,
|
||||
Storage: false,
|
||||
},
|
||||
}
|
||||
|
||||
var alternativeApiVersions = []v1beta1.CustomResourceDefinitionVersion{
|
||||
{
|
||||
Name: "v1",
|
||||
Served: true,
|
||||
Storage: false,
|
||||
},
|
||||
{
|
||||
Name: "v2",
|
||||
Served: true,
|
||||
Storage: true,
|
||||
},
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("CustomResourceConversionWebhook [Feature:CustomResourceWebhookConversion]", func() {
|
||||
var context *certContext
|
||||
f := framework.NewDefaultFramework("crd-webhook")
|
||||
|
||||
var client clientset.Interface
|
||||
var namespaceName string
|
||||
|
||||
BeforeEach(func() {
|
||||
client = f.ClientSet
|
||||
namespaceName = f.Namespace.Name
|
||||
|
||||
// Make sure the relevant provider supports conversion webhook
|
||||
framework.SkipUnlessServerVersionGTE(serverCRDConversionWebhookVersion, f.ClientSet.Discovery())
|
||||
|
||||
By("Setting up server cert")
|
||||
context = setupServerCert(f.Namespace.Name, serviceCRDName)
|
||||
createAuthReaderRoleBindingForCRDConversion(f, f.Namespace.Name)
|
||||
|
||||
deployCustomResourceWebhookAndService(f, imageutils.GetE2EImage(imageutils.CRDConversionWebhook), context)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
cleanCRDWebhookTest(client, namespaceName)
|
||||
})
|
||||
|
||||
It("Should be able to convert from CR v1 to CR v2", func() {
|
||||
testcrd, err := framework.CreateMultiVersionTestCRD(f, "stable.example.com", apiVersions,
|
||||
&v1beta1.WebhookClientConfig{
|
||||
CABundle: context.signingCert,
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: serviceCRDName,
|
||||
Path: strPtr("/crdconvert"),
|
||||
}})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer testcrd.CleanUp()
|
||||
testCustomResourceConversionWebhook(f, testcrd.Crd, testcrd.DynamicClients)
|
||||
})
|
||||
|
||||
It("Should be able to convert a non homogeneous list of CRs", func() {
|
||||
testcrd, err := framework.CreateMultiVersionTestCRD(f, "stable.example.com", apiVersions,
|
||||
&v1beta1.WebhookClientConfig{
|
||||
CABundle: context.signingCert,
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: serviceCRDName,
|
||||
Path: strPtr("/crdconvert"),
|
||||
}})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer testcrd.CleanUp()
|
||||
testCRListConversion(f, testcrd)
|
||||
})
|
||||
})
|
||||
|
||||
func cleanCRDWebhookTest(client clientset.Interface, namespaceName string) {
|
||||
_ = client.CoreV1().Services(namespaceName).Delete(serviceCRDName, nil)
|
||||
_ = client.AppsV1().Deployments(namespaceName).Delete(deploymentCRDName, nil)
|
||||
_ = client.CoreV1().Secrets(namespaceName).Delete(secretCRDName, nil)
|
||||
_ = client.RbacV1().RoleBindings("kube-system").Delete(roleBindingCRDName, nil)
|
||||
}
|
||||
|
||||
func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespace string) {
|
||||
By("Create role binding to let cr conversion webhook read extension-apiserver-authentication")
|
||||
client := f.ClientSet
|
||||
// Create the role binding to allow the webhook read the extension-apiserver-authentication configmap
|
||||
_, err := client.RbacV1().RoleBindings("kube-system").Create(&rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleBindingCRDName,
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "",
|
||||
Kind: "Role",
|
||||
Name: "extension-apiserver-authentication-reader",
|
||||
},
|
||||
// Webhook uses the default service account.
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil && errors.IsAlreadyExists(err) {
|
||||
framework.Logf("role binding %s already exists", roleBindingCRDName)
|
||||
} else {
|
||||
framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace)
|
||||
}
|
||||
}
|
||||
|
||||
func deployCustomResourceWebhookAndService(f *framework.Framework, image string, context *certContext) {
|
||||
By("Deploying the custom resource conversion webhook pod")
|
||||
client := f.ClientSet
|
||||
|
||||
// Creating the secret that contains the webhook's cert.
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secretCRDName,
|
||||
},
|
||||
Type: v1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
"tls.crt": context.cert,
|
||||
"tls.key": context.key,
|
||||
},
|
||||
}
|
||||
namespace := f.Namespace.Name
|
||||
_, err := client.CoreV1().Secrets(namespace).Create(secret)
|
||||
framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace)
|
||||
|
||||
// Create the deployment of the webhook
|
||||
podLabels := map[string]string{"app": "sample-crd-conversion-webhook", "crd-webhook": "true"}
|
||||
replicas := int32(1)
|
||||
zero := int64(0)
|
||||
mounts := []v1.VolumeMount{
|
||||
{
|
||||
Name: "crd-conversion-webhook-certs",
|
||||
ReadOnly: true,
|
||||
MountPath: "/webhook.local.config/certificates",
|
||||
},
|
||||
}
|
||||
volumes := []v1.Volume{
|
||||
{
|
||||
Name: "crd-conversion-webhook-certs",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{SecretName: secretCRDName},
|
||||
},
|
||||
},
|
||||
}
|
||||
containers := []v1.Container{
|
||||
{
|
||||
Name: "sample-crd-conversion-webhook",
|
||||
VolumeMounts: mounts,
|
||||
Args: []string{
|
||||
"--tls-cert-file=/webhook.local.config/certificates/tls.crt",
|
||||
"--tls-private-key-file=/webhook.local.config/certificates/tls.key",
|
||||
"--alsologtostderr",
|
||||
"-v=4",
|
||||
"2>&1",
|
||||
},
|
||||
Image: image,
|
||||
},
|
||||
}
|
||||
d := &apps.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentCRDName,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: apps.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: podLabels,
|
||||
},
|
||||
Strategy: apps.DeploymentStrategy{
|
||||
Type: apps.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: containers,
|
||||
Volumes: volumes,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deployment, err := client.AppsV1().Deployments(namespace).Create(d)
|
||||
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace)
|
||||
By("Wait for the deployment to be ready")
|
||||
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentCRDName, "1", image)
|
||||
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
|
||||
err = framework.WaitForDeploymentComplete(client, deployment)
|
||||
framework.ExpectNoError(err, "waiting for the deployment status valid", image, deploymentCRDName, namespace)
|
||||
|
||||
By("Deploying the webhook service")
|
||||
|
||||
serviceLabels := map[string]string{"crd-webhook": "true"}
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: serviceCRDName,
|
||||
Labels: map[string]string{"test": "crd-webhook"},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: serviceLabels,
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Protocol: "TCP",
|
||||
Port: 443,
|
||||
TargetPort: intstr.FromInt(443),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = client.CoreV1().Services(namespace).Create(service)
|
||||
framework.ExpectNoError(err, "creating service %s in namespace %s", serviceCRDName, namespace)
|
||||
|
||||
By("Verifying the service has paired with the endpoint")
|
||||
err = framework.WaitForServiceEndpointsNum(client, namespace, serviceCRDName, 1, 1*time.Second, 30*time.Second)
|
||||
framework.ExpectNoError(err, "waiting for service %s/%s have %d endpoint", namespace, serviceCRDName, 1)
|
||||
}
|
||||
|
||||
func verifyV1Object(f *framework.Framework, crd *v1beta1.CustomResourceDefinition, obj *unstructured.Unstructured) {
|
||||
Expect(obj.GetAPIVersion()).To(BeEquivalentTo(crd.Spec.Group + "/v1"))
|
||||
hostPort, exists := obj.Object["hostPort"]
|
||||
Expect(exists).To(BeTrue())
|
||||
Expect(hostPort).To(BeEquivalentTo("localhost:8080"))
|
||||
_, hostExists := obj.Object["host"]
|
||||
Expect(hostExists).To(BeFalse())
|
||||
_, portExists := obj.Object["port"]
|
||||
Expect(portExists).To(BeFalse())
|
||||
}
|
||||
|
||||
func verifyV2Object(f *framework.Framework, crd *v1beta1.CustomResourceDefinition, obj *unstructured.Unstructured) {
|
||||
Expect(obj.GetAPIVersion()).To(BeEquivalentTo(crd.Spec.Group + "/v2"))
|
||||
_, hostPortExists := obj.Object["hostPort"]
|
||||
Expect(hostPortExists).To(BeFalse())
|
||||
host, hostExists := obj.Object["host"]
|
||||
Expect(hostExists).To(BeTrue())
|
||||
Expect(host).To(BeEquivalentTo("localhost"))
|
||||
port, portExists := obj.Object["port"]
|
||||
Expect(portExists).To(BeTrue())
|
||||
Expect(port).To(BeEquivalentTo("8080"))
|
||||
}
|
||||
|
||||
func testCustomResourceConversionWebhook(f *framework.Framework, crd *v1beta1.CustomResourceDefinition, customResourceClients map[string]dynamic.ResourceInterface) {
|
||||
name := "cr-instance-1"
|
||||
By("Creating a v1 custom resource")
|
||||
crInstance := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": crd.Spec.Names.Kind,
|
||||
"apiVersion": crd.Spec.Group + "/v1",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": name,
|
||||
"namespace": f.Namespace.Name,
|
||||
},
|
||||
"hostPort": "localhost:8080",
|
||||
},
|
||||
}
|
||||
_, err := customResourceClients["v1"].Create(crInstance, metav1.CreateOptions{})
|
||||
Expect(err).To(BeNil())
|
||||
By("v2 custom resource should be converted")
|
||||
v2crd, err := customResourceClients["v2"].Get(name, metav1.GetOptions{})
|
||||
verifyV2Object(f, crd, v2crd)
|
||||
}
|
||||
|
||||
func testCRListConversion(f *framework.Framework, testCrd *framework.TestCrd) {
|
||||
crd := testCrd.Crd
|
||||
customResourceClients := testCrd.DynamicClients
|
||||
name1 := "cr-instance-1"
|
||||
name2 := "cr-instance-2"
|
||||
By("Creating a v1 custom resource")
|
||||
crInstance := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": crd.Spec.Names.Kind,
|
||||
"apiVersion": crd.Spec.Group + "/v1",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": name1,
|
||||
"namespace": f.Namespace.Name,
|
||||
},
|
||||
"hostPort": "localhost:8080",
|
||||
},
|
||||
}
|
||||
_, err := customResourceClients["v1"].Create(crInstance, metav1.CreateOptions{})
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Now cr-instance-1 is stored as v1. lets change storage version
|
||||
crd, err = integration.UpdateCustomResourceDefinitionWithRetry(testCrd.ApiExtensionClient, crd.Name, func(c *v1beta1.CustomResourceDefinition) {
|
||||
c.Spec.Versions = alternativeApiVersions
|
||||
})
|
||||
Expect(err).To(BeNil())
|
||||
By("Create a v2 custom resource")
|
||||
crInstance = &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": crd.Spec.Names.Kind,
|
||||
"apiVersion": crd.Spec.Group + "/v1",
|
||||
"metadata": map[string]interface{}{
|
||||
"name": name2,
|
||||
"namespace": f.Namespace.Name,
|
||||
},
|
||||
"hostPort": "localhost:8080",
|
||||
},
|
||||
}
|
||||
|
||||
// After changing a CRD, the resources for versions will be re-created that can be result in
|
||||
// cancelled connection (e.g. "grpc connection closed" or "context canceled").
|
||||
// Just retrying fixes that.
|
||||
for i := 0; i < 5; i++ {
|
||||
_, err = customResourceClients["v1"].Create(crInstance, metav1.CreateOptions{})
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Now that we have a v1 and v2 object, both list operation in v1 and v2 should work as expected.
|
||||
|
||||
By("List CRs in v1")
|
||||
list, err := customResourceClients["v1"].List(metav1.ListOptions{})
|
||||
Expect(err).To(BeNil())
|
||||
Expect(len(list.Items)).To(BeIdenticalTo(2))
|
||||
Expect((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) ||
|
||||
(list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1)).To(BeTrue())
|
||||
verifyV1Object(f, crd, &list.Items[0])
|
||||
verifyV1Object(f, crd, &list.Items[1])
|
||||
|
||||
By("List CRs in v2")
|
||||
list, err = customResourceClients["v2"].List(metav1.ListOptions{})
|
||||
Expect(err).To(BeNil())
|
||||
Expect(len(list.Items)).To(BeIdenticalTo(2))
|
||||
Expect((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) ||
|
||||
(list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1)).To(BeTrue())
|
||||
verifyV2Object(f, crd, &list.Items[0])
|
||||
verifyV2Object(f, crd, &list.Items[1])
|
||||
}
|
26
vendor/k8s.io/kubernetes/test/e2e/apimachinery/crd_watch.go
generated
vendored
26
vendor/k8s.io/kubernetes/test/e2e/apimachinery/crd_watch.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/testserver"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
@ -63,14 +63,14 @@ var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
|
||||
framework.Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
}
|
||||
|
||||
noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
|
||||
noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, f.DynamicClient)
|
||||
noxuDefinition := fixtures.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
|
||||
noxuDefinition, err = fixtures.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, f.DynamicClient)
|
||||
if err != nil {
|
||||
framework.Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = testserver.DeleteCustomResourceDefinition(noxuDefinition, apiExtensionClient)
|
||||
err = fixtures.DeleteCustomResourceDefinition(noxuDefinition, apiExtensionClient)
|
||||
if err != nil {
|
||||
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
|
||||
}
|
||||
@ -80,35 +80,35 @@ var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
|
||||
noxuResourceClient := newNamespacedCustomResourceClient(ns, f.DynamicClient, noxuDefinition)
|
||||
|
||||
watchA, err := watchCRWithName(noxuResourceClient, watchCRNameA)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to watch custom resource: %s", watchCRNameA)
|
||||
|
||||
watchB, err := watchCRWithName(noxuResourceClient, watchCRNameB)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to watch custom resource: %s", watchCRNameB)
|
||||
|
||||
testCrA := testserver.NewNoxuInstance(ns, watchCRNameA)
|
||||
testCrB := testserver.NewNoxuInstance(ns, watchCRNameB)
|
||||
testCrA := fixtures.NewNoxuInstance(ns, watchCRNameA)
|
||||
testCrB := fixtures.NewNoxuInstance(ns, watchCRNameB)
|
||||
|
||||
By("Creating first CR ")
|
||||
testCrA, err = instantiateCustomResource(testCrA, noxuResourceClient, noxuDefinition)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to instantiate custom resource: %+v", testCrA)
|
||||
expectEvent(watchA, watch.Added, testCrA)
|
||||
expectNoEvent(watchB, watch.Added, testCrA)
|
||||
|
||||
By("Creating second CR")
|
||||
testCrB, err = instantiateCustomResource(testCrB, noxuResourceClient, noxuDefinition)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to instantiate custom resource: %+v", testCrB)
|
||||
expectEvent(watchB, watch.Added, testCrB)
|
||||
expectNoEvent(watchA, watch.Added, testCrB)
|
||||
|
||||
By("Deleting first CR")
|
||||
err = deleteCustomResource(noxuResourceClient, watchCRNameA)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete custom resource: %s", watchCRNameA)
|
||||
expectEvent(watchA, watch.Deleted, nil)
|
||||
expectNoEvent(watchB, watch.Deleted, nil)
|
||||
|
||||
By("Deleting second CR")
|
||||
err = deleteCustomResource(noxuResourceClient, watchCRNameB)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete custom resource: %s", watchCRNameB)
|
||||
expectEvent(watchB, watch.Deleted, nil)
|
||||
expectNoEvent(watchA, watch.Deleted, nil)
|
||||
})
|
||||
@ -125,7 +125,7 @@ func watchCRWithName(crdResourceClient dynamic.ResourceInterface, name string) (
|
||||
}
|
||||
|
||||
func instantiateCustomResource(instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1beta1.CustomResourceDefinition) (*unstructured.Unstructured, error) {
|
||||
createdInstance, err := client.Create(instanceToCreate)
|
||||
createdInstance, err := client.Create(instanceToCreate, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
16
vendor/k8s.io/kubernetes/test/e2e/apimachinery/custom_resource_definition.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/apimachinery/custom_resource_definition.go
generated
vendored
@ -19,8 +19,8 @@ package apimachinery
|
||||
import (
|
||||
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/testserver"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -34,9 +34,9 @@ var _ = SIGDescribe("CustomResourceDefinition resources", func() {
|
||||
|
||||
Context("Simple CustomResourceDefinition", func() {
|
||||
/*
|
||||
Testname: crd-creation-test
|
||||
Description: Create a random Custom Resource Definition and make sure
|
||||
the API returns success.
|
||||
Release : v1.9
|
||||
Testname: Custom Resource Definition, create
|
||||
Description: Create a API extension client, define a random custom resource definition, create the custom resource. API server MUST be able to create the custom resource.
|
||||
*/
|
||||
framework.ConformanceIt("creating/deleting custom resource definition objects works ", func() {
|
||||
|
||||
@ -52,16 +52,16 @@ var _ = SIGDescribe("CustomResourceDefinition resources", func() {
|
||||
framework.Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
}
|
||||
|
||||
randomDefinition := testserver.NewRandomNameCustomResourceDefinition(v1beta1.ClusterScoped)
|
||||
randomDefinition := fixtures.NewRandomNameCustomResourceDefinition(v1beta1.ClusterScoped)
|
||||
|
||||
//create CRD and waits for the resource to be recognized and available.
|
||||
randomDefinition, err = testserver.CreateNewCustomResourceDefinition(randomDefinition, apiExtensionClient, f.DynamicClient)
|
||||
randomDefinition, err = fixtures.CreateNewCustomResourceDefinition(randomDefinition, apiExtensionClient, f.DynamicClient)
|
||||
if err != nil {
|
||||
framework.Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = testserver.DeleteCustomResourceDefinition(randomDefinition, apiExtensionClient)
|
||||
err = fixtures.DeleteCustomResourceDefinition(randomDefinition, apiExtensionClient)
|
||||
if err != nil {
|
||||
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
|
||||
}
|
||||
|
9
vendor/k8s.io/kubernetes/test/e2e/apimachinery/etcd_failure.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/apimachinery/etcd_failure.go
generated
vendored
@ -94,8 +94,9 @@ func doEtcdFailure(failCommand, fixCommand string) {
|
||||
}
|
||||
|
||||
func masterExec(cmd string) {
|
||||
result, err := framework.SSH(cmd, framework.GetMasterHost()+":22", framework.TestContext.Provider)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
host := framework.GetMasterHost() + ":22"
|
||||
result, err := framework.SSH(cmd, host, framework.TestContext.Provider)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd)
|
||||
if result.Code != 0 {
|
||||
framework.LogSSHResult(result)
|
||||
framework.Failf("master exec command returned non-zero")
|
||||
@ -120,7 +121,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
}
|
||||
framework.Logf("apiserver has recovered")
|
||||
return true, nil
|
||||
@ -130,7 +131,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
|
||||
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String())
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) {
|
||||
return true, nil
|
||||
|
273
vendor/k8s.io/kubernetes/test/e2e/apimachinery/garbage_collector.go
generated
vendored
273
vendor/k8s.io/kubernetes/test/e2e/apimachinery/garbage_collector.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/testserver"
|
||||
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
@ -104,7 +104,7 @@ func getPodTemplateSpec(labels map[string]string) v1.PodTemplateSpec {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -128,10 +128,6 @@ func newOwnerDeployment(f *framework.Framework, deploymentName string, labels ma
|
||||
}
|
||||
}
|
||||
|
||||
func getSelector() map[string]string {
|
||||
return map[string]string{"app": "gc-test"}
|
||||
}
|
||||
|
||||
func newOwnerRC(f *framework.Framework, name string, replicas int32, labels map[string]string) *v1.ReplicationController {
|
||||
template := getPodTemplateSpec(labels)
|
||||
return &v1.ReplicationController{
|
||||
@ -151,45 +147,6 @@ func newOwnerRC(f *framework.Framework, name string, replicas int32, labels map[
|
||||
}
|
||||
}
|
||||
|
||||
// verifyRemainingDeploymentsReplicaSetsPods verifies if the number
|
||||
// of the remaining deployments, replica set and pods are deploymentNum,
|
||||
// rsNum and podNum. It returns error if the communication with the API
|
||||
// server fails.
|
||||
func verifyRemainingDeploymentsReplicaSetsPods(
|
||||
f *framework.Framework,
|
||||
clientSet clientset.Interface,
|
||||
deployment *v1beta1.Deployment,
|
||||
deploymentNum, rsNum, podNum int,
|
||||
) (bool, error) {
|
||||
var ret = true
|
||||
rs, err := clientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list rs: %v", err)
|
||||
}
|
||||
if len(rs.Items) != rsNum {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d rs, got %d rs", rsNum, len(rs.Items)))
|
||||
}
|
||||
deployments, err := clientSet.ExtensionsV1beta1().Deployments(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list deployments: %v", err)
|
||||
}
|
||||
if len(deployments.Items) != deploymentNum {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d Deployments, got %d Deployments", deploymentNum, len(deployments.Items)))
|
||||
}
|
||||
pods, err := clientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
if len(pods.Items) != podNum {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %v Pods, got %d Pods", podNum, len(pods.Items)))
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func newGCPod(name string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@ -204,69 +161,77 @@ func newGCPod(name string) *v1.Pod {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// verifyRemainingReplicationControllersPods verifies if the number of the remaining replication
|
||||
// controllers and pods are rcNum and podNum. It returns error if the
|
||||
// communication with the API server fails.
|
||||
func verifyRemainingReplicationControllersPods(f *framework.Framework, clientSet clientset.Interface, rcNum, podNum int) (bool, error) {
|
||||
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
|
||||
pods, err := clientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
var ret = true
|
||||
if len(pods.Items) != podNum {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d pods, got %d pods", podNum, len(pods.Items)))
|
||||
}
|
||||
rcs, err := rcClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
|
||||
}
|
||||
if len(rcs.Items) != rcNum {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d RCs, got %d RCs", rcNum, len(rcs.Items)))
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// verifyRemainingCronJobsJobsPods verifies if the number of remaining cronjobs,
|
||||
// jobs and pods. It returns error if the communication with the API server fails.
|
||||
func verifyRemainingCronJobsJobsPods(f *framework.Framework, clientSet clientset.Interface,
|
||||
cjNum, jobNum, podNum int) (bool, error) {
|
||||
// verifyRemainingObjects verifies if the number of remaining objects.
|
||||
// It returns error if the communication with the API server fails.
|
||||
func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (bool, error) {
|
||||
var ret = true
|
||||
|
||||
cronJobs, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list cronjobs: %v", err)
|
||||
}
|
||||
if len(cronJobs.Items) != cjNum {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", cjNum, len(cronJobs.Items)))
|
||||
}
|
||||
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list jobs: %v", err)
|
||||
}
|
||||
if len(jobs.Items) != jobNum {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d jobs, got %d jobs", jobNum, len(jobs.Items)))
|
||||
}
|
||||
|
||||
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Failed to list pods: %v", err)
|
||||
}
|
||||
if len(pods.Items) != podNum {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d pods, got %d pods", podNum, len(pods.Items)))
|
||||
for object, num := range objects {
|
||||
switch object {
|
||||
case "Pods":
|
||||
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list pods: %v", err)
|
||||
}
|
||||
if len(pods.Items) != num {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d pods, got %d pods", num, len(pods.Items)))
|
||||
}
|
||||
case "Deployments":
|
||||
deployments, err := f.ClientSet.ExtensionsV1beta1().Deployments(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list deployments: %v", err)
|
||||
}
|
||||
if len(deployments.Items) != num {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d Deployments, got %d Deployments", num, len(deployments.Items)))
|
||||
}
|
||||
case "ReplicaSets":
|
||||
rs, err := f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list rs: %v", err)
|
||||
}
|
||||
if len(rs.Items) != num {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d rs, got %d rs", num, len(rs.Items)))
|
||||
}
|
||||
case "ReplicationControllers":
|
||||
rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list replication controllers: %v", err)
|
||||
}
|
||||
if len(rcs.Items) != num {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d RCs, got %d RCs", num, len(rcs.Items)))
|
||||
}
|
||||
case "CronJobs":
|
||||
cronJobs, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list cronjobs: %v", err)
|
||||
}
|
||||
if len(cronJobs.Items) != num {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", num, len(cronJobs.Items)))
|
||||
}
|
||||
case "Jobs":
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list jobs: %v", err)
|
||||
}
|
||||
if len(jobs.Items) != num {
|
||||
ret = false
|
||||
By(fmt.Sprintf("expected %d jobs, got %d jobs", num, len(jobs.Items)))
|
||||
}
|
||||
default:
|
||||
return false, fmt.Errorf("object %s is not supported", object)
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
@ -312,7 +277,7 @@ func newCronJob(name, schedule string) *batchv1beta1.CronJob {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "c",
|
||||
Image: "busybox",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"sleep", "300"},
|
||||
},
|
||||
},
|
||||
@ -336,10 +301,9 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
f := framework.NewDefaultFramework("gc")
|
||||
|
||||
/*
|
||||
Testname: garbage-collector-delete-rc--propagation-background
|
||||
Description: Ensure that if deleteOptions.PropagationPolicy is set to Background,
|
||||
then deleting a ReplicationController should cause pods created
|
||||
by that RC to also be deleted.
|
||||
Release : v1.9
|
||||
Testname: Garbage Collector, delete replication controller, propagation policy background
|
||||
Description: Create a replication controller with 2 Pods. Once RC is created and the first Pod is created, delete RC with deleteOptions.PropagationPolicy set to Background. Deleting the Replication Controller MUST cause pods created by that RC to be deleted.
|
||||
*/
|
||||
framework.ConformanceIt("should delete pods created by rc when not orphaning", func() {
|
||||
clientSet := f.ClientSet
|
||||
@ -380,7 +344,8 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
By("wait for all pods to be garbage collected")
|
||||
// wait for the RCs and Pods to reach the expected numbers.
|
||||
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||
return verifyRemainingReplicationControllersPods(f, clientSet, 0, 0)
|
||||
objects := map[string]int{"ReplicationControllers": 0, "Pods": 0}
|
||||
return verifyRemainingObjects(f, objects)
|
||||
}); err != nil {
|
||||
framework.Failf("failed to wait for all pods to be deleted: %v", err)
|
||||
remainingPods, err := podClient.List(metav1.ListOptions{})
|
||||
@ -394,10 +359,9 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: garbage-collector-delete-rc--propagation-orphan
|
||||
Description: Ensure that if deleteOptions.PropagationPolicy is set to Orphan,
|
||||
then deleting a ReplicationController should cause pods created
|
||||
by that RC to be orphaned.
|
||||
Release : v1.9
|
||||
Testname: Garbage Collector, delete replication controller, propagation policy orphan
|
||||
Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Orphan. Deleting the Replication Controller MUST cause pods created by that RC to be orphaned.
|
||||
*/
|
||||
framework.ConformanceIt("should orphan pods created by rc if delete options say so", func() {
|
||||
clientSet := f.ClientSet
|
||||
@ -463,6 +427,8 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
gatherMetrics(f)
|
||||
})
|
||||
|
||||
// deleteOptions.OrphanDependents is deprecated in 1.7 and preferred to use the PropagationPolicy.
|
||||
// Discussion is tracked under https://github.com/kubernetes/kubernetes/issues/65427 to promote for conformance in future.
|
||||
It("should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() {
|
||||
clientSet := f.ClientSet
|
||||
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
|
||||
@ -508,10 +474,9 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: garbage-collector-delete-deployment-propagation-background
|
||||
Description: Ensure that if deleteOptions.PropagationPolicy is set to Background,
|
||||
then deleting a Deployment should cause ReplicaSets created
|
||||
by that Deployment to also be deleted.
|
||||
Release : v1.9
|
||||
Testname: Garbage Collector, delete deployment, propagation policy background
|
||||
Description: Create a deployment with a replicaset. Once replicaset is created , delete the deployment with deleteOptions.PropagationPolicy set to Background. Deleting the deployment MUST delete the replicaset created by the deployment and also the Pods that belong to the deployments MUST be deleted.
|
||||
*/
|
||||
framework.ConformanceIt("should delete RS created by deployment when not orphaning", func() {
|
||||
clientSet := f.ClientSet
|
||||
@ -547,7 +512,8 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
By("wait for all rs to be garbage collected")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
return verifyRemainingDeploymentsReplicaSetsPods(f, clientSet, deployment, 0, 0, 0)
|
||||
objects := map[string]int{"Deployments": 0, "ReplicaSets": 0, "Pods": 0}
|
||||
return verifyRemainingObjects(f, objects)
|
||||
})
|
||||
if err != nil {
|
||||
errList := make([]error, 0)
|
||||
@ -567,10 +533,9 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: garbage-collector-delete-deployment-propagation-true
|
||||
Description: Ensure that if deleteOptions.PropagationPolicy is set to Orphan,
|
||||
then deleting a Deployment should cause ReplicaSets created
|
||||
by that Deployment to be orphaned.
|
||||
Release : v1.9
|
||||
Testname: Garbage Collector, delete deployment, propagation policy orphan
|
||||
Description: Create a deployment with a replicaset. Once replicaset is created , delete the deployment with deleteOptions.PropagationPolicy set to Orphan. Deleting the deployment MUST cause the replicaset created by the deployment to be orphaned, also the Pods created by the deployments MUST be orphaned.
|
||||
*/
|
||||
framework.ConformanceIt("should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan", func() {
|
||||
clientSet := f.ClientSet
|
||||
@ -606,7 +571,8 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the rs")
|
||||
time.Sleep(30 * time.Second)
|
||||
ok, err := verifyRemainingDeploymentsReplicaSetsPods(f, clientSet, deployment, 0, 1, 2)
|
||||
objects := map[string]int{"Deployments": 0, "ReplicaSets": 1, "Pods": 2}
|
||||
ok, err := verifyRemainingObjects(f, objects)
|
||||
if err != nil {
|
||||
framework.Failf("Unexpected error while verifying remaining deployments, rs, and pods: %v", err)
|
||||
}
|
||||
@ -641,9 +607,9 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: garbage-collector-delete-rc-after-owned-pods
|
||||
Description: Ensure that if deleteOptions.PropagationPolicy is set to Foreground,
|
||||
then a ReplicationController should not be deleted until all its dependent pods are deleted.
|
||||
Release : v1.9
|
||||
Testname: Garbage Collector, delete replication controller, after owned pods
|
||||
Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Foreground. Deleting the Replication Controller MUST cause pods created by that RC to be deleted before the RC is deleted.
|
||||
*/
|
||||
framework.ConformanceIt("should keep the rc around until all its pods are deleted if the deleteOptions says so", func() {
|
||||
clientSet := f.ClientSet
|
||||
@ -729,9 +695,9 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
|
||||
// TODO: this should be an integration test
|
||||
/*
|
||||
Testname: garbage-collector-multiple-owners
|
||||
Description: Ensure that if a Pod has multiple valid owners, it will not be deleted
|
||||
when one of of those owners gets deleted.
|
||||
Release : v1.9
|
||||
Testname: Garbage Collector, multiple owners
|
||||
Description: Create a replication controller RC1, with maximum allocatable Pods between 10 and 100 replicas. Create second replication controller RC2 and set RC2 as owner for half of those replicas. Once RC1 is created and the all Pods are created, delete RC1 with deleteOptions.PropagationPolicy set to Foreground. Half of the Pods that has RC2 as owner MUST not be deleted but have a deletion timestamp. Deleting the Replication Controller MUST not delete Pods that are owned by multiple replication controllers.
|
||||
*/
|
||||
framework.ConformanceIt("should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted", func() {
|
||||
clientSet := f.ClientSet
|
||||
@ -771,12 +737,12 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
By(fmt.Sprintf("set half of pods created by rc %s to have rc %s as owner as well", rc1Name, rc2Name))
|
||||
pods, err := podClient.List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", f.Namespace.Name)
|
||||
patch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"ReplicationController","name":"%s","uid":"%s"}]}}`, rc2.ObjectMeta.Name, rc2.ObjectMeta.UID)
|
||||
for i := 0; i < halfReplicas; i++ {
|
||||
pod := pods.Items[i]
|
||||
_, err := podClient.Patch(pod.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod.Name, f.Namespace.Name, patch)
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("delete the rc %s", rc1Name))
|
||||
@ -843,40 +809,46 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
|
||||
// TODO: should be an integration test
|
||||
/*
|
||||
Testname: garbage-collector-dependency-cycle
|
||||
Description: Ensure that a dependency cycle will
|
||||
not block the garbage collector.
|
||||
Release : v1.9
|
||||
Testname: Garbage Collector, dependency cycle
|
||||
Description: Create three pods, patch them with Owner references such that pod1 has pod3, pod2 has pod1 and pod3 has pod2 as owner references respectively. Delete pod1 MUST delete all pods. The dependency cycle MUST not block the garbage collection.
|
||||
*/
|
||||
framework.ConformanceIt("should not be blocked by dependency circle", func() {
|
||||
clientSet := f.ClientSet
|
||||
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
pod1 := newGCPod("pod1")
|
||||
pod1Name := "pod1"
|
||||
pod1 := newGCPod(pod1Name)
|
||||
pod1, err := podClient.Create(pod1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pod2 := newGCPod("pod2")
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", pod1Name, f.Namespace.Name)
|
||||
pod2Name := "pod2"
|
||||
pod2 := newGCPod(pod2Name)
|
||||
pod2, err = podClient.Create(pod2)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pod3 := newGCPod("pod3")
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", pod2Name, f.Namespace.Name)
|
||||
pod3Name := "pod3"
|
||||
pod3 := newGCPod(pod3Name)
|
||||
pod3, err = podClient.Create(pod3)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", pod3Name, f.Namespace.Name)
|
||||
// create circular dependency
|
||||
addRefPatch := func(name string, uid types.UID) []byte {
|
||||
return []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"Pod","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}]}}`, name, uid))
|
||||
}
|
||||
pod1, err = podClient.Patch(pod1.Name, types.StrategicMergePatchType, addRefPatch(pod3.Name, pod3.UID))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
patch1 := addRefPatch(pod3.Name, pod3.UID)
|
||||
pod1, err = podClient.Patch(pod1.Name, types.StrategicMergePatchType, patch1)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1)
|
||||
framework.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences)
|
||||
pod2, err = podClient.Patch(pod2.Name, types.StrategicMergePatchType, addRefPatch(pod1.Name, pod1.UID))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
patch2 := addRefPatch(pod1.Name, pod1.UID)
|
||||
pod2, err = podClient.Patch(pod2.Name, types.StrategicMergePatchType, patch2)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2)
|
||||
framework.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences)
|
||||
pod3, err = podClient.Patch(pod3.Name, types.StrategicMergePatchType, addRefPatch(pod2.Name, pod2.UID))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
patch3 := addRefPatch(pod2.Name, pod2.UID)
|
||||
pod3, err = podClient.Patch(pod3.Name, types.StrategicMergePatchType, patch3)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3)
|
||||
framework.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences)
|
||||
// delete one pod, should result in the deletion of all pods
|
||||
deleteOptions := getForegroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID))
|
||||
err = podClient.Delete(pod1.ObjectMeta.Name, deleteOptions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete pod %s in namespace: %s", pod1.Name, f.Namespace.Name)
|
||||
var pods *v1.PodList
|
||||
var err2 error
|
||||
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
|
||||
@ -939,7 +911,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
persistedOwner, err := resourceClient.Create(owner)
|
||||
persistedOwner, err := resourceClient.Create(owner, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
|
||||
}
|
||||
@ -964,7 +936,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
persistedDependent, err := resourceClient.Create(dependent)
|
||||
persistedDependent, err := resourceClient.Create(dependent, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
|
||||
}
|
||||
@ -1040,7 +1012,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
persistedOwner, err := resourceClient.Create(owner)
|
||||
persistedOwner, err := resourceClient.Create(owner, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
|
||||
}
|
||||
@ -1065,7 +1037,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = resourceClient.Create(dependent)
|
||||
_, err = resourceClient.Create(dependent, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
|
||||
}
|
||||
@ -1107,7 +1079,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
By("Create the cronjob")
|
||||
cronJob := newCronJob("simple", "*/1 * * * ?")
|
||||
cronJob, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Create(cronJob)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name)
|
||||
|
||||
By("Wait for the CronJob to create new Job")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 2*time.Minute, func() (bool, error) {
|
||||
@ -1127,7 +1099,8 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
By("Verify if cronjob does not leave jobs nor pods behind")
|
||||
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
return verifyRemainingCronJobsJobsPods(f, f.ClientSet, 0, 0, 0)
|
||||
objects := map[string]int{"CronJobs": 0, "Jobs": 0, "Pods": 0}
|
||||
return verifyRemainingObjects(f, objects)
|
||||
})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to wait for all jobs and pods to be deleted: %v", err)
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e/apimachinery/generated_clientset.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/apimachinery/generated_clientset.go
generated
vendored
@ -49,7 +49,7 @@ func stagingClientPod(name, value string) v1.Pod {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
},
|
||||
},
|
||||
@ -70,7 +70,7 @@ func testingPod(name, value string) v1.Pod {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
@ -243,7 +243,7 @@ func newTestingCronJob(name string, value string) *batchv1beta1.CronJob {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "c",
|
||||
Image: "busybox",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
MountPath: "/data",
|
||||
|
70
vendor/k8s.io/kubernetes/test/e2e/apimachinery/initializers.go
generated
vendored
70
vendor/k8s.io/kubernetes/test/e2e/apimachinery/initializers.go
generated
vendored
@ -52,8 +52,9 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
||||
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
_, err := c.CoreV1().Pods(ns).Create(newUninitializedPod(podName))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pod := newUninitializedPod(podName)
|
||||
_, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
@ -72,34 +73,35 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
||||
|
||||
// verify that we can update an initializing pod
|
||||
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s in namespace: %s", podName, ns)
|
||||
pod.Annotations = map[string]string{"update-1": "test"}
|
||||
pod, err = c.CoreV1().Pods(ns).Update(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update pod %s in namespace %s to: %+v", pod.Name, ns, pod)
|
||||
|
||||
// verify the list call filters out uninitialized pods
|
||||
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{IncludeUninitialized: true})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
listOptions := metav1.ListOptions{IncludeUninitialized: true}
|
||||
pods, err := c.CoreV1().Pods(ns).List(listOptions)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s, given list options: %+v", ns, listOptions)
|
||||
Expect(pods.Items).To(HaveLen(1))
|
||||
pods, err = c.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", ns)
|
||||
Expect(pods.Items).To(HaveLen(0))
|
||||
|
||||
// clear initializers
|
||||
pod.Initializers = nil
|
||||
pod, err = c.CoreV1().Pods(ns).Update(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update pod %s in namespace %s to: %+v", pod.Name, ns, pod)
|
||||
|
||||
// pod should now start running
|
||||
err = framework.WaitForPodRunningInNamespace(c, pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, pod.Namespace)
|
||||
|
||||
// ensure create call returns
|
||||
<-ch
|
||||
|
||||
// verify that we cannot start the pod initializing again
|
||||
pod, err = c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s in namespace: %s", podName, ns)
|
||||
pod.Initializers = &metav1.Initializers{
|
||||
Pending: []metav1.Initializer{{Name: "Other"}},
|
||||
}
|
||||
@ -119,7 +121,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
||||
// create and register an initializer
|
||||
initializerName := "pod.test.e2e.kubernetes.io"
|
||||
initializerConfigName := "e2e-test-initializer"
|
||||
_, err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Create(&v1alpha1.InitializerConfiguration{
|
||||
initializerConfig := &v1alpha1.InitializerConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: initializerConfigName},
|
||||
Initializers: []v1alpha1.Initializer{
|
||||
{
|
||||
@ -129,11 +131,12 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
_, err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Create(initializerConfig)
|
||||
if errors.IsNotFound(err) {
|
||||
framework.Skipf("dynamic configuration of initializers requires the alpha admissionregistration.k8s.io group to be enabled")
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create and register initializer with config: %+v", initializerConfig)
|
||||
|
||||
// we must remove the initializer when the test is complete and ensure no pods are pending for that initializer
|
||||
defer cleanupInitializer(c, initializerConfigName, initializerName)
|
||||
@ -145,8 +148,9 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
defer close(ch)
|
||||
_, err := c.CoreV1().Pods(ns).Create(newInitPod(podName))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pod := newInitPod(podName)
|
||||
_, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
|
||||
}()
|
||||
|
||||
// wait until the pod shows up uninitialized
|
||||
@ -162,7 +166,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s from namespace: %s", podName, ns)
|
||||
Expect(pod.Initializers).NotTo(BeNil())
|
||||
Expect(pod.Initializers.Pending).To(HaveLen(1))
|
||||
Expect(pod.Initializers.Pending[0].Name).To(Equal(initializerName))
|
||||
@ -171,14 +175,14 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
||||
By("Completing initialization")
|
||||
pod.Initializers = nil
|
||||
pod, err = c.CoreV1().Pods(ns).Update(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update pod %s in namespace %s to: %+v", pod.Name, ns, pod)
|
||||
|
||||
// ensure create call returns
|
||||
<-ch
|
||||
|
||||
// pod should now start running
|
||||
err = framework.WaitForPodRunningInNamespace(c, pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, pod.Namespace)
|
||||
|
||||
// bypass initialization by explicitly passing an empty pending list
|
||||
By("Setting an empty initializer as an admin to bypass initialization")
|
||||
@ -186,7 +190,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
||||
pod = newUninitializedPod(podName)
|
||||
pod.Initializers.Pending = nil
|
||||
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
|
||||
Expect(pod.Initializers).To(BeNil())
|
||||
|
||||
// bypass initialization for mirror pods
|
||||
@ -198,7 +202,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
||||
}
|
||||
pod.Spec.NodeName = "node-does-not-yet-exist"
|
||||
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
|
||||
Expect(pod.Initializers).To(BeNil())
|
||||
Expect(pod.Annotations[v1.MirrorPodAnnotationKey]).To(Equal("true"))
|
||||
})
|
||||
@ -213,7 +217,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
||||
// create and register an initializer, without setting up a controller to handle it.
|
||||
initializerName := "pod.test.e2e.kubernetes.io"
|
||||
initializerConfigName := "e2e-test-initializer"
|
||||
_, err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Create(&v1alpha1.InitializerConfiguration{
|
||||
initializerConfig := &v1alpha1.InitializerConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: initializerConfigName},
|
||||
Initializers: []v1alpha1.Initializer{
|
||||
{
|
||||
@ -223,11 +227,12 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
_, err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Create(initializerConfig)
|
||||
if errors.IsNotFound(err) {
|
||||
framework.Skipf("dynamic configuration of initializers requires the alpha admissionregistration.k8s.io group to be enabled")
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create and register initializer with config: %+v", initializerConfig)
|
||||
|
||||
// we must remove the initializer when the test is complete and ensure no pods are pending for that initializer
|
||||
defer cleanupInitializer(c, initializerConfigName, initializerName)
|
||||
@ -236,31 +241,32 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// create a replicaset
|
||||
persistedRS, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newReplicaset())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
rs := newReplicaset()
|
||||
persistedRS, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create replicaset %s in namespace: %s", persistedRS.Name, ns)
|
||||
// wait for replicaset controller to confirm that it has handled the creation
|
||||
err = waitForRSObservedGeneration(c, persistedRS.Namespace, persistedRS.Name, persistedRS.Generation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "replicaset %s failed to observe generation: %d", persistedRS.Name, persistedRS.Generation)
|
||||
|
||||
// update the replicaset spec to trigger a resync
|
||||
patch := []byte(`{"spec":{"minReadySeconds":5}}`)
|
||||
persistedRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Patch(persistedRS.Name, types.StrategicMergePatchType, patch)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to apply to replicaset %s in namespace %s a strategic merge patch: %s", persistedRS.Name, ns, patch)
|
||||
|
||||
// wait for replicaset controller to confirm that it has handle the spec update
|
||||
err = waitForRSObservedGeneration(c, persistedRS.Namespace, persistedRS.Name, persistedRS.Generation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "replicaset %s failed to observe generation: %d", persistedRS.Name, persistedRS.Generation)
|
||||
|
||||
// verify that the replicaset controller doesn't create extra pod
|
||||
selector, err := metav1.LabelSelectorAsSelector(persistedRS.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to convert label selector %+v of LabelSelector api type into a struct that implements labels.Selector", persistedRS.Spec.Selector)
|
||||
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
IncludeUninitialized: true,
|
||||
}
|
||||
pods, err := c.CoreV1().Pods(ns).List(listOptions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s, given list options: %+v", ns, listOptions)
|
||||
Expect(len(pods.Items)).Should(Equal(1))
|
||||
})
|
||||
|
||||
@ -277,13 +283,13 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
||||
framework.Failf("expect err to be timeout error, got %v", err)
|
||||
}
|
||||
uninitializedPod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s in namespace: %s", podName, ns)
|
||||
Expect(uninitializedPod.Initializers).NotTo(BeNil())
|
||||
Expect(len(uninitializedPod.Initializers.Pending)).Should(Equal(1))
|
||||
|
||||
patch := fmt.Sprintf(`{"metadata":{"initializers":{"pending":[{"$patch":"delete","name":"%s"}]}}}`, uninitializedPod.Initializers.Pending[0].Name)
|
||||
patchedPod, err := c.CoreV1().Pods(ns).Patch(uninitializedPod.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s a strategic merge patch: %s", uninitializedPod.Name, ns, patch)
|
||||
Expect(patchedPod.Initializers).To(BeNil())
|
||||
})
|
||||
})
|
||||
|
63
vendor/k8s.io/kubernetes/test/e2e/apimachinery/namespace.go
generated
vendored
63
vendor/k8s.io/kubernetes/test/e2e/apimachinery/namespace.go
generated
vendored
@ -45,8 +45,9 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
|
||||
go func(n int) {
|
||||
defer wg.Done()
|
||||
defer GinkgoRecover()
|
||||
_, err = f.CreateNamespace(fmt.Sprintf("nslifetest-%v", n), nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ns := fmt.Sprintf("nslifetest-%v", n)
|
||||
_, err = f.CreateNamespace(ns, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", ns)
|
||||
}(n)
|
||||
}
|
||||
wg.Wait()
|
||||
@ -54,8 +55,9 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
|
||||
//Wait 10 seconds, then SEND delete requests for all the namespaces.
|
||||
By("Waiting 10 seconds")
|
||||
time.Sleep(time.Duration(10 * time.Second))
|
||||
deleted, err := framework.DeleteNamespaces(f.ClientSet, []string{"nslifetest"}, nil /* skipFilter */)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
deleteFilter := []string{"nslifetest"}
|
||||
deleted, err := framework.DeleteNamespaces(f.ClientSet, deleteFilter, nil /* skipFilter */)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete namespace(s) containing: %s", deleteFilter)
|
||||
Expect(len(deleted)).To(Equal(totalNS))
|
||||
|
||||
By("Waiting for namespaces to vanish")
|
||||
@ -93,23 +95,25 @@ func waitForPodInNamespace(c clientset.Interface, ns, podName string) *v1.Pod {
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s in namespace: %s", podName, ns)
|
||||
return pod
|
||||
}
|
||||
|
||||
func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
By("Creating a test namespace")
|
||||
namespace, err := f.CreateNamespace("nsdeletetest", nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
namespaceName := "nsdeletetest"
|
||||
namespace, err := f.CreateNamespace(namespaceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
|
||||
By("Waiting for a default service account to be provisioned in namespace")
|
||||
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
|
||||
|
||||
By("Creating a pod in the namespace")
|
||||
podName := "test-pod"
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pod",
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
@ -121,7 +125,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
},
|
||||
}
|
||||
pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, namespace.Name)
|
||||
|
||||
By("Waiting for the pod to have running status")
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
|
||||
@ -150,7 +154,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
|
||||
By("Deleting the namespace")
|
||||
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete namespace: %s", namespace.Name)
|
||||
|
||||
By("Waiting for the namespace to be removed.")
|
||||
maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds
|
||||
@ -164,26 +168,27 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
}))
|
||||
|
||||
By("Recreating the namespace")
|
||||
namespace, err = f.CreateNamespace("nsdeletetest", nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
namespace, err = f.CreateNamespace(namespaceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
|
||||
By("Verifying there are no pods in the namespace")
|
||||
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(HaveOccurred(), "failed to get pod %s in namespace: %s", pod.Name, namespace.Name)
|
||||
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(podB.Name, metav1.GetOptions{IncludeUninitialized: true})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(HaveOccurred(), "failed to get pod %s in namespace: %s", podB.Name, namespace.Name)
|
||||
}
|
||||
|
||||
func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
var err error
|
||||
|
||||
By("Creating a test namespace")
|
||||
namespace, err := f.CreateNamespace("nsdeletetest", nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
namespaceName := "nsdeletetest"
|
||||
namespace, err := f.CreateNamespace(namespaceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
|
||||
By("Waiting for a default service account to be provisioned in namespace")
|
||||
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
|
||||
|
||||
By("Creating a service in the namespace")
|
||||
serviceName := "test-service"
|
||||
@ -204,11 +209,11 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
},
|
||||
}
|
||||
service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create service %s in namespace %s", serviceName, namespace.Name)
|
||||
|
||||
By("Deleting the namespace")
|
||||
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete namespace: %s", namespace.Name)
|
||||
|
||||
By("Waiting for the namespace to be removed.")
|
||||
maxWaitSeconds := int64(60)
|
||||
@ -222,12 +227,12 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
}))
|
||||
|
||||
By("Recreating the namespace")
|
||||
namespace, err = f.CreateNamespace("nsdeletetest", nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
namespace, err = f.CreateNamespace(namespaceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
|
||||
|
||||
By("Verifying there is no service in the namespace")
|
||||
_, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(service.Name, metav1.GetOptions{})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(HaveOccurred(), "failed to get service %s in namespace: %s", service.Name, namespace.Name)
|
||||
}
|
||||
|
||||
// This test must run [Serial] due to the impact of running other parallel
|
||||
@ -262,10 +267,18 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
|
||||
|
||||
f := framework.NewDefaultFramework("namespaces")
|
||||
|
||||
It("should ensure that all pods are removed when a namespace is deleted.",
|
||||
/*
|
||||
Testname: namespace-deletion-removes-pods
|
||||
Description: Ensure that if a namespace is deleted then all pods are removed from that namespace.
|
||||
*/
|
||||
framework.ConformanceIt("should ensure that all pods are removed when a namespace is deleted",
|
||||
func() { ensurePodsAreRemovedWhenNamespaceIsDeleted(f) })
|
||||
|
||||
It("should ensure that all services are removed when a namespace is deleted.",
|
||||
/*
|
||||
Testname: namespace-deletion-removes-services
|
||||
Description: Ensure that if a namespace is deleted then all services are removed from that namespace.
|
||||
*/
|
||||
framework.ConformanceIt("should ensure that all services are removed when a namespace is deleted",
|
||||
func() { ensureServicesAreRemovedWhenNamespaceIsDeleted(f) })
|
||||
|
||||
It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)",
|
||||
|
23
vendor/k8s.io/kubernetes/test/e2e/apimachinery/table_conversion.go
generated
vendored
23
vendor/k8s.io/kubernetes/test/e2e/apimachinery/table_conversion.go
generated
vendored
@ -18,6 +18,7 @@ package apimachinery
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"text/tabwriter"
|
||||
|
||||
@ -31,8 +32,8 @@ import (
|
||||
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -54,11 +55,11 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
framework.Logf("Creating pod %s", podName)
|
||||
|
||||
_, err := c.CoreV1().Pods(ns).Create(newTablePod(podName))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
|
||||
|
||||
table := &metav1beta1.Table{}
|
||||
err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s in Table form in namespace: %s", podName, ns)
|
||||
framework.Logf("Table: %#v", table)
|
||||
|
||||
Expect(len(table.ColumnDefinitions)).To(BeNumerically(">", 2))
|
||||
@ -79,7 +80,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
client := c.CoreV1().PodTemplates(ns)
|
||||
|
||||
By("creating a large number of resources")
|
||||
workqueue.Parallelize(5, 20, func(i int) {
|
||||
workqueue.ParallelizeUntil(context.TODO(), 5, 20, func(i int) {
|
||||
for tries := 3; tries >= 0; tries-- {
|
||||
_, err := client.Create(&v1.PodTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -106,11 +107,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
VersionedParams(&metav1.ListOptions{Limit: 2}, metav1.ParameterCodec).
|
||||
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
|
||||
Do().Into(pagedTable)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO: kops PR job is still using etcd2, which prevents this feature from working. Remove this check when kops is upgraded to etcd3
|
||||
if len(pagedTable.Rows) > 2 {
|
||||
framework.Skipf("ERROR: This cluster does not support chunking, which means it is running etcd2 and not supported.")
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod templates in Table form in namespace: %s", ns)
|
||||
Expect(len(pagedTable.Rows)).To(Equal(2))
|
||||
Expect(pagedTable.ResourceVersion).ToNot(Equal(""))
|
||||
Expect(pagedTable.SelfLink).ToNot(Equal(""))
|
||||
@ -122,7 +119,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
VersionedParams(&metav1.ListOptions{Continue: pagedTable.Continue}, metav1.ParameterCodec).
|
||||
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
|
||||
Do().Into(pagedTable)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod templates in Table form in namespace: %s", ns)
|
||||
Expect(len(pagedTable.Rows)).To(BeNumerically(">", 0))
|
||||
Expect(pagedTable.Rows[0].Cells[0]).To(Equal("template-0002"))
|
||||
})
|
||||
@ -132,7 +129,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
|
||||
table := &metav1beta1.Table{}
|
||||
err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get nodes in Table form across all namespaces")
|
||||
framework.Logf("Table: %#v", table)
|
||||
|
||||
Expect(len(table.ColumnDefinitions)).To(BeNumerically(">=", 2))
|
||||
@ -160,7 +157,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
},
|
||||
}
|
||||
err := c.AuthorizationV1().RESTClient().Post().Resource("selfsubjectaccessreviews").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Body(sar).Do().Into(table)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(HaveOccurred(), "failed to return error when posting self subject access review: %+v, to a backend that does not implement metadata", sar)
|
||||
Expect(err.(errors.APIStatus).Status().Code).To(Equal(int32(406)))
|
||||
})
|
||||
})
|
||||
@ -169,7 +166,7 @@ func printTable(table *metav1beta1.Table) string {
|
||||
buf := &bytes.Buffer{}
|
||||
tw := tabwriter.NewWriter(buf, 5, 8, 1, ' ', 0)
|
||||
err := printers.PrintTable(table, tw, printers.PrintOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to print table: %+v", table)
|
||||
tw.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
174
vendor/k8s.io/kubernetes/test/e2e/apimachinery/watch.go
generated
vendored
174
vendor/k8s.io/kubernetes/test/e2e/apimachinery/watch.go
generated
vendored
@ -17,6 +17,8 @@ limitations under the License.
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@ -55,15 +57,15 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
|
||||
By("creating a watch on configmaps with label A")
|
||||
watchA, err := watchConfigMaps(f, "", multipleWatchersLabelValueA)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueA)
|
||||
|
||||
By("creating a watch on configmaps with label B")
|
||||
watchB, err := watchConfigMaps(f, "", multipleWatchersLabelValueB)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueB)
|
||||
|
||||
By("creating a watch on configmaps with label A or B")
|
||||
watchAB, err := watchConfigMaps(f, "", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmaps with label %s or %s", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
|
||||
|
||||
testConfigMapA := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -84,7 +86,7 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
|
||||
By("creating a configmap with label A and ensuring the correct watchers observe the notification")
|
||||
testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapA)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a configmap with label %s in namespace: %s", multipleWatchersLabelValueA, ns)
|
||||
expectEvent(watchA, watch.Added, testConfigMapA)
|
||||
expectEvent(watchAB, watch.Added, testConfigMapA)
|
||||
expectNoEvent(watchB, watch.Added, testConfigMapA)
|
||||
@ -93,7 +95,7 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
expectEvent(watchA, watch.Modified, testConfigMapA)
|
||||
expectEvent(watchAB, watch.Modified, testConfigMapA)
|
||||
expectNoEvent(watchB, watch.Modified, testConfigMapA)
|
||||
@ -102,28 +104,28 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
expectEvent(watchA, watch.Modified, testConfigMapA)
|
||||
expectEvent(watchAB, watch.Modified, testConfigMapA)
|
||||
expectNoEvent(watchB, watch.Modified, testConfigMapA)
|
||||
|
||||
By("deleting configmap A and ensuring the correct watchers observe the notification")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapA.GetName(), nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
|
||||
expectEvent(watchA, watch.Deleted, nil)
|
||||
expectEvent(watchAB, watch.Deleted, nil)
|
||||
expectNoEvent(watchB, watch.Deleted, nil)
|
||||
|
||||
By("creating a configmap with label B and ensuring the correct watchers observe the notification")
|
||||
testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapB)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", testConfigMapB, ns)
|
||||
expectEvent(watchB, watch.Added, testConfigMapB)
|
||||
expectEvent(watchAB, watch.Added, testConfigMapB)
|
||||
expectNoEvent(watchA, watch.Added, testConfigMapB)
|
||||
|
||||
By("deleting configmap B and ensuring the correct watchers observe the notification")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapB.GetName(), nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMapB.GetName(), ns)
|
||||
expectEvent(watchB, watch.Deleted, nil)
|
||||
expectEvent(watchAB, watch.Deleted, nil)
|
||||
expectNoEvent(watchA, watch.Deleted, nil)
|
||||
@ -149,27 +151,27 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
|
||||
By("creating a new configmap")
|
||||
testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
|
||||
By("modifying the configmap once")
|
||||
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
|
||||
By("modifying the configmap a second time")
|
||||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s a second time", testConfigMap.GetName(), ns)
|
||||
|
||||
By("deleting the configmap")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMap.GetName(), ns)
|
||||
|
||||
By("creating a watch on configmaps from the resource version returned by the first update")
|
||||
testWatch, err := watchConfigMaps(f, testConfigMapFirstUpdate.ObjectMeta.ResourceVersion, fromResourceVersionLabelValue)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmaps from the resource version %s returned by the first update", testConfigMapFirstUpdate.ObjectMeta.ResourceVersion)
|
||||
|
||||
By("Expecting to observe notifications for all changes to the configmap after the first update")
|
||||
expectEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
|
||||
@ -186,9 +188,10 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
c := f.ClientSet
|
||||
ns := f.Namespace.Name
|
||||
|
||||
configMapName := "e2e-watch-test-watch-closed"
|
||||
testConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "e2e-watch-test-watch-closed",
|
||||
Name: configMapName,
|
||||
Labels: map[string]string{
|
||||
watchConfigMapLabelKey: watchRestartedLabelValue,
|
||||
},
|
||||
@ -197,17 +200,17 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
|
||||
By("creating a watch on configmaps")
|
||||
testWatchBroken, err := watchConfigMaps(f, "", watchRestartedLabelValue)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmap with label: %s", watchRestartedLabelValue)
|
||||
|
||||
By("creating a new configmap")
|
||||
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
By("modifying the configmap once")
|
||||
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
By("closing the watch once it receives two notifications")
|
||||
expectEvent(testWatchBroken, watch.Added, testConfigMap)
|
||||
@ -221,7 +224,7 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s a second time", configMapName, ns)
|
||||
|
||||
By("creating a new watch on configmaps from the last resource version observed by the first watch")
|
||||
lastEventConfigMap, ok := lastEvent.Object.(*v1.ConfigMap)
|
||||
@ -229,11 +232,11 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
framework.Failf("Expected last notfication to refer to a configmap but got: %v", lastEvent)
|
||||
}
|
||||
testWatchRestarted, err := watchConfigMaps(f, lastEventConfigMap.ObjectMeta.ResourceVersion, watchRestartedLabelValue)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion)
|
||||
|
||||
By("deleting the configmap")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
By("Expecting to observe notifications for all changes to the configmap since the first watch closed")
|
||||
expectEvent(testWatchRestarted, watch.Modified, testConfigMapSecondUpdate)
|
||||
@ -250,9 +253,10 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
c := f.ClientSet
|
||||
ns := f.Namespace.Name
|
||||
|
||||
configMapName := "e2e-watch-test-label-changed"
|
||||
testConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "e2e-watch-test-label-changed",
|
||||
Name: configMapName,
|
||||
Labels: map[string]string{
|
||||
watchConfigMapLabelKey: toBeChangedLabelValue,
|
||||
},
|
||||
@ -261,23 +265,23 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
|
||||
By("creating a watch on configmaps with a certain label")
|
||||
testWatch, err := watchConfigMaps(f, "", toBeChangedLabelValue)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmap with label: %s", toBeChangedLabelValue)
|
||||
|
||||
By("creating a new configmap")
|
||||
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
By("modifying the configmap once")
|
||||
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "1")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
By("changing the label value of the configmap")
|
||||
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = "wrong-value"
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s by changing label value", configMapName, ns)
|
||||
|
||||
By("Expecting to observe a delete notification for the watched object")
|
||||
expectEvent(testWatch, watch.Added, testConfigMap)
|
||||
@ -288,7 +292,7 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "2")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s a second time", configMapName, ns)
|
||||
|
||||
By("Expecting not to observe a notification because the object no longer meets the selector's requirements")
|
||||
expectNoEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
|
||||
@ -297,23 +301,66 @@ var _ = SIGDescribe("Watchers", func() {
|
||||
testConfigMapLabelRestored, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = toBeChangedLabelValue
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s by changing label value back", configMapName, ns)
|
||||
|
||||
By("modifying the configmap a third time")
|
||||
testConfigMapThirdUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
|
||||
setConfigMapData(cm, "mutation", "3")
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s a third time", configMapName, ns)
|
||||
|
||||
By("deleting the configmap")
|
||||
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", configMapName, ns)
|
||||
|
||||
By("Expecting to observe an add notification for the watched object when the label value was restored")
|
||||
expectEvent(testWatch, watch.Added, testConfigMapLabelRestored)
|
||||
expectEvent(testWatch, watch.Modified, testConfigMapThirdUpdate)
|
||||
expectEvent(testWatch, watch.Deleted, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: watch-consistency
|
||||
Description: Ensure that concurrent watches are consistent with each other by initiating an additional watch
|
||||
for events received from the first watch, initiated at the resource version of the event, and checking that all
|
||||
resource versions of all events match. Events are produced from writes on a background goroutine.
|
||||
*/
|
||||
It("should receive events on concurrent watches in same order", func() {
|
||||
c := f.ClientSet
|
||||
ns := f.Namespace.Name
|
||||
|
||||
iterations := 100
|
||||
|
||||
By("starting a background goroutine to produce watch events")
|
||||
donec := make(chan struct{})
|
||||
stopc := make(chan struct{})
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
defer close(donec)
|
||||
produceConfigMapEvents(f, stopc, 5*time.Millisecond)
|
||||
}()
|
||||
|
||||
By("creating watches starting from each resource version of the events produced and verifying they all receive resource versions in the same order")
|
||||
wcs := []watch.Interface{}
|
||||
resourceVersion := "0"
|
||||
for i := 0; i < iterations; i++ {
|
||||
wc, err := c.CoreV1().ConfigMaps(ns).Watch(metav1.ListOptions{ResourceVersion: resourceVersion})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
wcs = append(wcs, wc)
|
||||
resourceVersion = waitForNextConfigMapEvent(wcs[0]).ResourceVersion
|
||||
for _, wc := range wcs[1:] {
|
||||
e := waitForNextConfigMapEvent(wc)
|
||||
if resourceVersion != e.ResourceVersion {
|
||||
framework.Failf("resource version mismatch, expected %s but got %s", resourceVersion, e.ResourceVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
close(stopc)
|
||||
for _, wc := range wcs {
|
||||
wc.Stop()
|
||||
}
|
||||
<-donec
|
||||
})
|
||||
})
|
||||
|
||||
func watchConfigMaps(f *framework.Framework, resourceVersion string, labels ...string) (watch.Interface, error) {
|
||||
@ -381,3 +428,70 @@ func waitForEvent(w watch.Interface, expectType watch.EventType, expectObject ru
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitForNextConfigMapEvent(watch watch.Interface) *v1.ConfigMap {
|
||||
select {
|
||||
case event := <-watch.ResultChan():
|
||||
if configMap, ok := event.Object.(*v1.ConfigMap); ok {
|
||||
return configMap
|
||||
} else {
|
||||
framework.Failf("expected config map")
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
framework.Failf("timed out waiting for watch event")
|
||||
}
|
||||
return nil // should never happen
|
||||
}
|
||||
|
||||
const (
|
||||
createEvent = iota
|
||||
updateEvent
|
||||
deleteEvent
|
||||
)
|
||||
|
||||
func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWaitBetweenEvents time.Duration) {
|
||||
c := f.ClientSet
|
||||
ns := f.Namespace.Name
|
||||
|
||||
name := func(i int) string {
|
||||
return fmt.Sprintf("cm-%d", i)
|
||||
}
|
||||
|
||||
existing := []int{}
|
||||
tc := time.NewTicker(minWaitBetweenEvents)
|
||||
defer tc.Stop()
|
||||
i := 0
|
||||
for range tc.C {
|
||||
op := rand.Intn(3)
|
||||
if len(existing) == 0 {
|
||||
op = createEvent
|
||||
}
|
||||
|
||||
cm := &v1.ConfigMap{}
|
||||
switch op {
|
||||
case createEvent:
|
||||
cm.Name = name(i)
|
||||
_, err := c.CoreV1().ConfigMaps(ns).Create(cm)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
existing = append(existing, i)
|
||||
i += 1
|
||||
case updateEvent:
|
||||
idx := rand.Intn(len(existing))
|
||||
cm.Name = name(existing[idx])
|
||||
_, err := c.CoreV1().ConfigMaps(ns).Update(cm)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
case deleteEvent:
|
||||
idx := rand.Intn(len(existing))
|
||||
err := c.CoreV1().ConfigMaps(ns).Delete(name(existing[idx]), &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
existing = append(existing[:idx], existing[idx+1:]...)
|
||||
default:
|
||||
framework.Failf("Unsupported event operation: %d", op)
|
||||
}
|
||||
select {
|
||||
case <-stopc:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
293
vendor/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go
generated
vendored
293
vendor/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go
generated
vendored
@ -33,10 +33,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -52,27 +52,32 @@ const (
|
||||
roleBindingName = "webhook-auth-reader"
|
||||
|
||||
// The webhook configuration names should not be reused between test instances.
|
||||
crWebhookConfigName = "e2e-test-webhook-config-cr"
|
||||
webhookConfigName = "e2e-test-webhook-config"
|
||||
mutatingWebhookConfigName = "e2e-test-mutating-webhook-config"
|
||||
podMutatingWebhookConfigName = "e2e-test-mutating-webhook-pod"
|
||||
crMutatingWebhookConfigName = "e2e-test-mutating-webhook-config-cr"
|
||||
webhookFailClosedConfigName = "e2e-test-webhook-fail-closed"
|
||||
webhookForWebhooksConfigName = "e2e-test-webhook-for-webhooks-config"
|
||||
removableValidatingHookName = "e2e-test-should-be-removable-validating-webhook-config"
|
||||
removableMutatingHookName = "e2e-test-should-be-removable-mutating-webhook-config"
|
||||
crdWebhookConfigName = "e2e-test-webhook-config-crd"
|
||||
crWebhookConfigName = "e2e-test-webhook-config-cr"
|
||||
webhookConfigName = "e2e-test-webhook-config"
|
||||
attachingPodWebhookConfigName = "e2e-test-webhook-config-attaching-pod"
|
||||
mutatingWebhookConfigName = "e2e-test-mutating-webhook-config"
|
||||
podMutatingWebhookConfigName = "e2e-test-mutating-webhook-pod"
|
||||
crMutatingWebhookConfigName = "e2e-test-mutating-webhook-config-cr"
|
||||
webhookFailClosedConfigName = "e2e-test-webhook-fail-closed"
|
||||
validatingWebhookForWebhooksConfigName = "e2e-test-validating-webhook-for-webhooks-config"
|
||||
mutatingWebhookForWebhooksConfigName = "e2e-test-mutating-webhook-for-webhooks-config"
|
||||
dummyValidatingWebhookConfigName = "e2e-test-dummy-validating-webhook-config"
|
||||
dummyMutatingWebhookConfigName = "e2e-test-dummy-mutating-webhook-config"
|
||||
crdWebhookConfigName = "e2e-test-webhook-config-crd"
|
||||
|
||||
skipNamespaceLabelKey = "skip-webhook-admission"
|
||||
skipNamespaceLabelValue = "yes"
|
||||
skippedNamespaceName = "exempted-namesapce"
|
||||
disallowedPodName = "disallowed-pod"
|
||||
toBeAttachedPodName = "to-be-attached-pod"
|
||||
hangingPodName = "hanging-pod"
|
||||
disallowedConfigMapName = "disallowed-configmap"
|
||||
allowedConfigMapName = "allowed-configmap"
|
||||
failNamespaceLabelKey = "fail-closed-webhook"
|
||||
failNamespaceLabelValue = "yes"
|
||||
failNamespaceName = "fail-closed-namesapce"
|
||||
addedLabelKey = "added-label"
|
||||
addedLabelValue = "yes"
|
||||
)
|
||||
|
||||
var serverWebhookVersion = utilversion.MustParseSemantic("v1.8.0")
|
||||
@ -117,6 +122,12 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
|
||||
testWebhook(f)
|
||||
})
|
||||
|
||||
It("Should be able to deny attaching pod", func() {
|
||||
webhookCleanup := registerWebhookForAttachingPod(f, context)
|
||||
defer webhookCleanup()
|
||||
testAttachingPodWebhook(f)
|
||||
})
|
||||
|
||||
It("Should be able to deny custom resource creation", func() {
|
||||
testcrd, err := framework.CreateTestCRD(f)
|
||||
if err != nil {
|
||||
@ -125,7 +136,7 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
|
||||
defer testcrd.CleanUp()
|
||||
webhookCleanup := registerWebhookForCustomResource(f, context, testcrd)
|
||||
defer webhookCleanup()
|
||||
testCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClient)
|
||||
testCustomResourceWebhook(f, testcrd.Crd, testcrd.GetV1DynamicClient())
|
||||
})
|
||||
|
||||
It("Should unconditionally reject operations on fail closed webhook", func() {
|
||||
@ -146,10 +157,12 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
|
||||
testMutatingPodWebhook(f)
|
||||
})
|
||||
|
||||
It("Should not be able to prevent deleting validating-webhook-configurations or mutating-webhook-configurations", func() {
|
||||
webhookCleanup := registerWebhookForWebhookConfigurations(f, context)
|
||||
defer webhookCleanup()
|
||||
testWebhookForWebhookConfigurations(f)
|
||||
It("Should not be able to mutate or prevent deletion of webhook configuration objects", func() {
|
||||
validatingWebhookCleanup := registerValidatingWebhookForWebhookConfigurations(f, context)
|
||||
defer validatingWebhookCleanup()
|
||||
mutatingWebhookCleanup := registerMutatingWebhookForWebhookConfigurations(f, context)
|
||||
defer mutatingWebhookCleanup()
|
||||
testWebhooksForWebhookConfigurations(f)
|
||||
})
|
||||
|
||||
It("Should mutate custom resource", func() {
|
||||
@ -160,7 +173,7 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
|
||||
defer testcrd.CleanUp()
|
||||
webhookCleanup := registerMutatingWebhookForCustomResource(f, context, testcrd)
|
||||
defer webhookCleanup()
|
||||
testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClient)
|
||||
testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.GetV1DynamicClient())
|
||||
})
|
||||
|
||||
It("Should deny crd creation", func() {
|
||||
@ -405,6 +418,53 @@ func registerWebhook(f *framework.Framework, context *certContext) func() {
|
||||
}
|
||||
}
|
||||
|
||||
func registerWebhookForAttachingPod(f *framework.Framework, context *certContext) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the webhook via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := attachingPodWebhookConfigName
|
||||
// A webhook that cannot talk to server, with fail-open policy
|
||||
failOpenHook := failingWebhook(namespace, "fail-open.k8s.io")
|
||||
policyIgnore := v1beta1.Ignore
|
||||
failOpenHook.FailurePolicy = &policyIgnore
|
||||
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
Name: "deny-attaching-pod.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Connect},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"pods/attach"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/pods/attach"),
|
||||
},
|
||||
CABundle: context.signingCert,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
return func() {
|
||||
client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(configName, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func registerMutatingWebhookForConfigMap(f *framework.Framework, context *certContext) func() {
|
||||
client := f.ClientSet
|
||||
By("Registering the mutating configmap webhook via the AdmissionRegistration API")
|
||||
@ -560,7 +620,7 @@ func testWebhook(f *framework.Framework) {
|
||||
// Creating the pod, the request should be rejected
|
||||
pod := nonCompliantPod(f)
|
||||
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(BeNil())
|
||||
Expect(err).To(HaveOccurred(), "create pod %s in namespace %s should have been denied by webhook", pod.Name, f.Namespace.Name)
|
||||
expectedErrMsg1 := "the pod contains unwanted container name"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg1) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
|
||||
@ -575,8 +635,8 @@ func testWebhook(f *framework.Framework) {
|
||||
// Creating the pod, the request should be rejected
|
||||
pod = hangingPod(f)
|
||||
_, err = client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(BeNil())
|
||||
expectedTimeoutErr := "request did not complete within allowed duration"
|
||||
Expect(err).To(HaveOccurred(), "create pod %s in namespace %s should have caused webhook to hang", pod.Name, f.Namespace.Name)
|
||||
expectedTimeoutErr := "request did not complete within"
|
||||
if !strings.Contains(err.Error(), expectedTimeoutErr) {
|
||||
framework.Failf("expect timeout error %q, got %q", expectedTimeoutErr, err.Error())
|
||||
}
|
||||
@ -585,7 +645,7 @@ func testWebhook(f *framework.Framework) {
|
||||
// Creating the configmap, the request should be rejected
|
||||
configmap := nonCompliantConfigMap(f)
|
||||
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
|
||||
Expect(err).NotTo(BeNil())
|
||||
Expect(err).To(HaveOccurred(), "create configmap %s in namespace %s should have been denied by the webhook", configmap.Name, f.Namespace.Name)
|
||||
expectedErrMsg := "the configmap contains unwanted key and value"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
@ -602,7 +662,7 @@ func testWebhook(f *framework.Framework) {
|
||||
},
|
||||
}
|
||||
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", configmap.Name, f.Namespace.Name)
|
||||
|
||||
By("update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook")
|
||||
toNonCompliantFn := func(cm *v1.ConfigMap) {
|
||||
@ -612,7 +672,7 @@ func testWebhook(f *framework.Framework) {
|
||||
cm.Data["webhook-e2e-test"] = "webhook-disallow"
|
||||
}
|
||||
_, err = updateConfigMap(client, f.Namespace.Name, allowedConfigMapName, toNonCompliantFn)
|
||||
Expect(err).NotTo(BeNil())
|
||||
Expect(err).To(HaveOccurred(), "update (PUT) admitted configmap %s in namespace %s to a non-compliant one should be rejected by webhook", allowedConfigMapName, f.Namespace.Name)
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
@ -620,7 +680,7 @@ func testWebhook(f *framework.Framework) {
|
||||
By("update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook")
|
||||
patch := nonCompliantConfigMapPatch()
|
||||
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Patch(allowedConfigMapName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(BeNil())
|
||||
Expect(err).To(HaveOccurred(), "update admitted configmap %s in namespace %s by strategic merge patch to a non-compliant one should be rejected by webhook. Patch: %+v", allowedConfigMapName, f.Namespace.Name, patch)
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
@ -639,7 +699,26 @@ func testWebhook(f *framework.Framework) {
|
||||
By("create a configmap that violates the webhook policy but is in a whitelisted namespace")
|
||||
configmap = nonCompliantConfigMap(f)
|
||||
_, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(configmap)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", configmap.Name, skippedNamespaceName)
|
||||
}
|
||||
|
||||
func testAttachingPodWebhook(f *framework.Framework) {
|
||||
By("create a pod")
|
||||
client := f.ClientSet
|
||||
pod := toBeAttachedPod(f)
|
||||
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
err = framework.WaitForPodNameRunningInNamespace(client, pod.Name, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
|
||||
By("'kubectl attach' the pod, should be denied by the webhook")
|
||||
timer := time.NewTimer(30 * time.Second)
|
||||
defer timer.Stop()
|
||||
_, err = framework.NewKubectlCommand("attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec()
|
||||
Expect(err).To(HaveOccurred(), "'kubectl attach' the pod, should be denied by the webhook")
|
||||
if e, a := "attaching to pod 'to-be-attached-pod' is not allowed", err.Error(); !strings.Contains(a, e) {
|
||||
framework.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a)
|
||||
}
|
||||
}
|
||||
|
||||
// failingWebhook returns a webhook with rule of create configmaps,
|
||||
@ -725,22 +804,24 @@ func testFailClosedWebhook(f *framework.Framework) {
|
||||
},
|
||||
}
|
||||
_, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(configmap)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(HaveOccurred(), "create configmap in namespace: %s should be unconditionally rejected by the webhook", failNamespaceName)
|
||||
if !errors.IsInternalError(err) {
|
||||
framework.Failf("expect an internal error, got %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func registerWebhookForWebhookConfigurations(f *framework.Framework, context *certContext) func() {
|
||||
func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, context *certContext) func() {
|
||||
var err error
|
||||
client := f.ClientSet
|
||||
By("Registering a webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
|
||||
By("Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := webhookForWebhooksConfigName
|
||||
configName := validatingWebhookForWebhooksConfigName
|
||||
failurePolicy := v1beta1.Fail
|
||||
|
||||
// This webhook will deny all requests to Delete admissionregistration objects
|
||||
// This webhook denies all requests to Delete validating webhook configuration and
|
||||
// mutating webhook configuration objects. It should never be called, however, because
|
||||
// dynamic admission webhooks should not be called on requests involving webhook configuration objects.
|
||||
_, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
@ -771,7 +852,6 @@ func registerWebhookForWebhookConfigurations(f *framework.Framework, context *ce
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 10s.
|
||||
@ -782,23 +862,76 @@ func registerWebhookForWebhookConfigurations(f *framework.Framework, context *ce
|
||||
}
|
||||
}
|
||||
|
||||
// This test assumes that the deletion-rejecting webhook defined in
|
||||
// registerWebhookForWebhookConfigurations is in place.
|
||||
func testWebhookForWebhookConfigurations(f *framework.Framework) {
|
||||
func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, context *certContext) func() {
|
||||
var err error
|
||||
client := f.ClientSet
|
||||
By("Creating a validating-webhook-configuration object")
|
||||
By("Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
configName := mutatingWebhookForWebhooksConfigName
|
||||
failurePolicy := v1beta1.Fail
|
||||
|
||||
// This webhook adds a label to all requests create to validating webhook configuration and
|
||||
// mutating webhook configuration objects. It should never be called, however, because
|
||||
// dynamic admission webhooks should not be called on requests involving webhook configuration objects.
|
||||
_, err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
Name: "add-label-to-webhook-configurations.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{"admissionregistration.k8s.io"},
|
||||
APIVersions: []string{"*"},
|
||||
Resources: []string{
|
||||
"validatingwebhookconfigurations",
|
||||
"mutatingwebhookconfigurations",
|
||||
},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/add-label"),
|
||||
},
|
||||
CABundle: context.signingCert,
|
||||
},
|
||||
FailurePolicy: &failurePolicy,
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
return func() {
|
||||
err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(configName, nil)
|
||||
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace)
|
||||
}
|
||||
}
|
||||
|
||||
// This test assumes that the deletion-rejecting webhook defined in
|
||||
// registerValidatingWebhookForWebhookConfigurations and the webhook-config-mutating
|
||||
// webhook defined in registerMutatingWebhookForWebhookConfigurations already exist.
|
||||
func testWebhooksForWebhookConfigurations(f *framework.Framework) {
|
||||
var err error
|
||||
client := f.ClientSet
|
||||
By("Creating a dummy validating-webhook-configuration object")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
failurePolicy := v1beta1.Ignore
|
||||
|
||||
_, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
mutatedValidatingWebhookConfiguration, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: removableValidatingHookName,
|
||||
Name: dummyValidatingWebhookConfigName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
Name: "should-be-removable-validating-webhook.k8s.io",
|
||||
Name: "dummy-validating-webhook.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
// This will not match any real resources so this webhook should never be called.
|
||||
@ -824,25 +957,28 @@ func testWebhookForWebhookConfigurations(f *framework.Framework) {
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", removableValidatingHookName, namespace)
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", dummyValidatingWebhookConfigName, namespace)
|
||||
if mutatedValidatingWebhookConfiguration.ObjectMeta.Labels != nil && mutatedValidatingWebhookConfiguration.ObjectMeta.Labels[addedLabelKey] == addedLabelValue {
|
||||
framework.Failf("expected %s not to be mutated by mutating webhooks but it was", dummyValidatingWebhookConfigName)
|
||||
}
|
||||
|
||||
// The webhook configuration is honored in 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
By("Deleting the validating-webhook-configuration, which should be possible to remove")
|
||||
|
||||
err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(removableValidatingHookName, nil)
|
||||
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", removableValidatingHookName, namespace)
|
||||
err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(dummyValidatingWebhookConfigName, nil)
|
||||
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", dummyValidatingWebhookConfigName, namespace)
|
||||
|
||||
By("Creating a mutating-webhook-configuration object")
|
||||
By("Creating a dummy mutating-webhook-configuration object")
|
||||
|
||||
_, err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
|
||||
mutatedMutatingWebhookConfiguration, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: removableMutatingHookName,
|
||||
Name: dummyMutatingWebhookConfigName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
Name: "should-be-removable-mutating-webhook.k8s.io",
|
||||
Name: "dummy-mutating-webhook.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
// This will not match any real resources so this webhook should never be called.
|
||||
@ -868,15 +1004,18 @@ func testWebhookForWebhookConfigurations(f *framework.Framework) {
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", removableMutatingHookName, namespace)
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", dummyMutatingWebhookConfigName, namespace)
|
||||
if mutatedMutatingWebhookConfiguration.ObjectMeta.Labels != nil && mutatedMutatingWebhookConfiguration.ObjectMeta.Labels[addedLabelKey] == addedLabelValue {
|
||||
framework.Failf("expected %s not to be mutated by mutating webhooks but it was", dummyMutatingWebhookConfigName)
|
||||
}
|
||||
|
||||
// The webhook configuration is honored in 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
By("Deleting the mutating-webhook-configuration, which should be possible to remove")
|
||||
|
||||
err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(removableMutatingHookName, nil)
|
||||
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", removableMutatingHookName, namespace)
|
||||
err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(dummyMutatingWebhookConfigName, nil)
|
||||
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", dummyMutatingWebhookConfigName, namespace)
|
||||
}
|
||||
|
||||
func createNamespace(f *framework.Framework, ns *v1.Namespace) error {
|
||||
@ -930,6 +1069,22 @@ func hangingPod(f *framework.Framework) *v1.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
func toBeAttachedPod(f *framework.Framework) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: toBeAttachedPodName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container1",
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func nonCompliantConfigMap(f *framework.Framework) *v1.ConfigMap {
|
||||
return &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1002,7 +1157,7 @@ func registerWebhookForCustomResource(f *framework.Framework, context *certConte
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{testcrd.ApiGroup},
|
||||
APIVersions: []string{testcrd.ApiVersion},
|
||||
APIVersions: testcrd.GetAPIVersions(),
|
||||
Resources: []string{testcrd.GetPluralName()},
|
||||
},
|
||||
}},
|
||||
@ -1043,7 +1198,7 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, context *c
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{testcrd.ApiGroup},
|
||||
APIVersions: []string{testcrd.ApiVersion},
|
||||
APIVersions: testcrd.GetAPIVersions(),
|
||||
Resources: []string{testcrd.GetPluralName()},
|
||||
},
|
||||
}},
|
||||
@ -1062,7 +1217,7 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, context *c
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{testcrd.ApiGroup},
|
||||
APIVersions: []string{testcrd.ApiVersion},
|
||||
APIVersions: testcrd.GetAPIVersions(),
|
||||
Resources: []string{testcrd.GetPluralName()},
|
||||
},
|
||||
}},
|
||||
@ -1087,12 +1242,13 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, context *c
|
||||
|
||||
func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) {
|
||||
By("Creating a custom resource that should be denied by the webhook")
|
||||
crInstanceName := "cr-instance-1"
|
||||
crInstance := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": crd.Spec.Names.Kind,
|
||||
"apiVersion": crd.Spec.Group + "/" + crd.Spec.Version,
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "cr-instance-1",
|
||||
"name": crInstanceName,
|
||||
"namespace": f.Namespace.Name,
|
||||
},
|
||||
"data": map[string]interface{}{
|
||||
@ -1100,8 +1256,8 @@ func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := customResourceClient.Create(crInstance)
|
||||
Expect(err).NotTo(BeNil())
|
||||
_, err := customResourceClient.Create(crInstance, metav1.CreateOptions{})
|
||||
Expect(err).To(HaveOccurred(), "create custom resource %s in namespace %s should be denied by webhook", crInstanceName, f.Namespace.Name)
|
||||
expectedErrMsg := "the custom resource contains unwanted data"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
@ -1110,12 +1266,13 @@ func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1
|
||||
|
||||
func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) {
|
||||
By("Creating a custom resource that should be mutated by the webhook")
|
||||
crName := "cr-instance-1"
|
||||
cr := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": crd.Spec.Names.Kind,
|
||||
"apiVersion": crd.Spec.Group + "/" + crd.Spec.Version,
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "cr-instance-1",
|
||||
"name": crName,
|
||||
"namespace": f.Namespace.Name,
|
||||
},
|
||||
"data": map[string]interface{}{
|
||||
@ -1123,8 +1280,8 @@ func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextension
|
||||
},
|
||||
},
|
||||
}
|
||||
mutatedCR, err := customResourceClient.Create(cr)
|
||||
Expect(err).To(BeNil())
|
||||
mutatedCR, err := customResourceClient.Create(cr, metav1.CreateOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name)
|
||||
expectedCRData := map[string]interface{}{
|
||||
"mutation-start": "yes",
|
||||
"mutation-stage-1": "yes",
|
||||
@ -1186,12 +1343,18 @@ func testCRDDenyWebhook(f *framework.Framework) {
|
||||
name := fmt.Sprintf("e2e-test-%s-%s-crd", f.BaseName, "deny")
|
||||
kind := fmt.Sprintf("E2e-test-%s-%s-crd", f.BaseName, "deny")
|
||||
group := fmt.Sprintf("%s-crd-test.k8s.io", f.BaseName)
|
||||
apiVersion := "v1"
|
||||
apiVersions := []apiextensionsv1beta1.CustomResourceDefinitionVersion{
|
||||
{
|
||||
Name: "v1",
|
||||
Served: true,
|
||||
Storage: true,
|
||||
},
|
||||
}
|
||||
testcrd := &framework.TestCrd{
|
||||
Name: name,
|
||||
Kind: kind,
|
||||
ApiGroup: group,
|
||||
ApiVersion: apiVersion,
|
||||
Name: name,
|
||||
Kind: kind,
|
||||
ApiGroup: group,
|
||||
Versions: apiVersions,
|
||||
}
|
||||
|
||||
// Creating a custom resource definition for use by assorted tests.
|
||||
@ -1213,8 +1376,8 @@ func testCRDDenyWebhook(f *framework.Framework) {
|
||||
},
|
||||
},
|
||||
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
|
||||
Group: testcrd.ApiGroup,
|
||||
Version: testcrd.ApiVersion,
|
||||
Group: testcrd.ApiGroup,
|
||||
Versions: testcrd.Versions,
|
||||
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
|
||||
Plural: testcrd.GetPluralName(),
|
||||
Singular: testcrd.Name,
|
||||
@ -1227,7 +1390,7 @@ func testCRDDenyWebhook(f *framework.Framework) {
|
||||
|
||||
// create CRD
|
||||
_, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd)
|
||||
Expect(err).NotTo(BeNil())
|
||||
Expect(err).To(HaveOccurred(), "create custom resource definition %s should be denied by webhook", testcrd.GetMetaName())
|
||||
expectedErrMsg := "the crd contains unwanted label"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
|
45
vendor/k8s.io/kubernetes/test/e2e/apps/BUILD
generated
vendored
45
vendor/k8s.io/kubernetes/test/e2e/apps/BUILD
generated
vendored
@ -37,7 +37,28 @@ go_library(
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
@ -45,27 +66,7 @@ go_library(
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/apps/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/apps/OWNERS
generated
vendored
@ -5,3 +5,5 @@ approvers:
|
||||
- mfojtik
|
||||
reviewers:
|
||||
- sig-apps-reviewers
|
||||
labels:
|
||||
- sig/apps
|
||||
|
70
vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go
generated
vendored
70
vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go
generated
vendored
@ -34,6 +34,7 @@ import (
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/controller/job"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -179,8 +180,8 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring no unexpected event has happened")
|
||||
err = checkNoEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
@ -213,13 +214,13 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(errors.IsNotFound(err)).To(BeTrue())
|
||||
|
||||
By("Ensuring there are no active jobs in the cronjob")
|
||||
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, true)
|
||||
By("Ensuring the job is not in the cronjob active list")
|
||||
err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring MissingJob event has occurred")
|
||||
err = checkNoEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
@ -298,7 +299,7 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batchv1beta1.Concur
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "c",
|
||||
Image: "busybox",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
MountPath: "/data",
|
||||
@ -335,7 +336,7 @@ func deleteCronJob(c clientset.Interface, ns, name string) error {
|
||||
// Wait for at least given amount of active jobs.
|
||||
func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1beta1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{})
|
||||
curr, err := getCronJob(c, ns, cronJobName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -349,7 +350,7 @@ func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int
|
||||
// empty after the timeout.
|
||||
func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty bool) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1beta1().CronJobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
curr, err := getCronJob(c, ns, jobName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -362,6 +363,23 @@ func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty boo
|
||||
})
|
||||
}
|
||||
|
||||
// Wait till a given job actually goes away from the Active list for a given cronjob
|
||||
func waitForJobNotActive(c clientset.Interface, ns, cronJobName, jobName string) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
curr, err := getCronJob(c, ns, cronJobName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, j := range curr.Status.Active {
|
||||
if j.Name == jobName {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for a job to not exist by listing jobs explicitly.
|
||||
func waitForJobNotExist(c clientset.Interface, ns string, targetJob *batchv1.Job) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
@ -425,24 +443,26 @@ func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
|
||||
})
|
||||
}
|
||||
|
||||
// checkNoEventWithReason checks no events with a reason within a list has occurred
|
||||
func checkNoEventWithReason(c clientset.Interface, ns, cronJobName string, reasons []string) error {
|
||||
sj, err := c.BatchV1beta1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in getting cronjob %s/%s: %v", ns, cronJobName, err)
|
||||
}
|
||||
events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, sj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in listing events: %s", err)
|
||||
}
|
||||
for _, e := range events.Items {
|
||||
for _, reason := range reasons {
|
||||
if e.Reason == reason {
|
||||
return fmt.Errorf("Found event with reason %s: %#v", reason, e)
|
||||
// waitForEventWithReason waits for events with a reason within a list has occurred
|
||||
func waitForEventWithReason(c clientset.Interface, ns, cronJobName string, reasons []string) error {
|
||||
return wait.Poll(framework.Poll, 30*time.Second, func() (bool, error) {
|
||||
sj, err := getCronJob(c, ns, cronJobName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, sj)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, e := range events.Items {
|
||||
for _, reason := range reasons {
|
||||
if e.Reason == reason {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// filterNotDeletedJobs returns the job list without any jobs that are pending
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go
generated
vendored
@ -281,7 +281,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
// Requires master ssh access.
|
||||
framework.SkipUnlessProviderIs("gce", "aws")
|
||||
restarter := NewRestartConfig(
|
||||
framework.GetMasterHost(), "kube-scheduler", ports.SchedulerPort, restartPollInterval, restartTimeout)
|
||||
framework.GetMasterHost(), "kube-scheduler", ports.InsecureSchedulerPort, restartPollInterval, restartTimeout)
|
||||
|
||||
// Create pods while the scheduler is down and make sure the scheduler picks them up by
|
||||
// scaling the rc to the same size.
|
||||
|
39
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go
generated
vendored
39
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go
generated
vendored
@ -24,8 +24,7 @@ import (
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -54,6 +53,10 @@ const (
|
||||
daemonsetColorLabel = daemonsetLabelPrefix + "color"
|
||||
)
|
||||
|
||||
// The annotation key scheduler.alpha.kubernetes.io/node-selector is for assigning
|
||||
// node selectors labels to namespaces
|
||||
var NamespaceNodeSelectors = []string{"scheduler.alpha.kubernetes.io/node-selector"}
|
||||
|
||||
// This test must be run in serial because it assumes the Daemon Set pods will
|
||||
// always get scheduled. If we run other tests in parallel, this may not
|
||||
// happen. In the future, running in parallel may work if we have an eviction
|
||||
@ -100,7 +103,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
ns = f.Namespace.Name
|
||||
|
||||
c = f.ClientSet
|
||||
err := clearDaemonSetNodeLabels(c)
|
||||
|
||||
updatedNS, err := updateNamespaceAnnotations(c, ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
ns = updatedNS.Name
|
||||
|
||||
err = clearDaemonSetNodeLabels(c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
@ -495,6 +504,26 @@ func clearDaemonSetNodeLabels(c clientset.Interface) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateNamespaceAnnotations sets node selectors related annotations on tests namespaces to empty
|
||||
func updateNamespaceAnnotations(c clientset.Interface, nsName string) (*v1.Namespace, error) {
|
||||
nsClient := c.CoreV1().Namespaces()
|
||||
|
||||
ns, err := nsClient.Get(nsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ns.Annotations == nil {
|
||||
ns.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
for _, n := range NamespaceNodeSelectors {
|
||||
ns.Annotations[n] = ""
|
||||
}
|
||||
|
||||
return nsClient.Update(ns)
|
||||
}
|
||||
|
||||
func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) {
|
||||
nodeClient := c.CoreV1().Nodes()
|
||||
var newNode *v1.Node
|
||||
@ -520,7 +549,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
|
||||
newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels)
|
||||
return true, err
|
||||
}
|
||||
if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
|
||||
if se, ok := err.(*apierrors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
|
||||
framework.Logf("failed to update node due to resource version conflict")
|
||||
return false, nil
|
||||
}
|
||||
@ -734,7 +763,7 @@ func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *a
|
||||
func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
if _, err := c.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to get failed daemon pod %q: %v", pod.Name, err)
|
||||
|
37
vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go
generated
vendored
37
vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go
generated
vendored
@ -38,9 +38,9 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutil "k8s.io/kubernetes/test/utils"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -70,16 +70,35 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
It("deployment reaping should cascade to its replica sets and pods", func() {
|
||||
testDeleteDeployment(f)
|
||||
})
|
||||
It("RollingUpdateDeployment should delete old pods and create new ones", func() {
|
||||
/*
|
||||
Testname: Deployment RollingUpdate
|
||||
Description: A conformant Kubernetes distribution MUST support the Deployment with RollingUpdate strategy.
|
||||
*/
|
||||
framework.ConformanceIt("RollingUpdateDeployment should delete old pods and create new ones", func() {
|
||||
testRollingUpdateDeployment(f)
|
||||
})
|
||||
It("RecreateDeployment should delete old pods and create new ones", func() {
|
||||
/*
|
||||
Testname: Deployment Recreate
|
||||
Description: A conformant Kubernetes distribution MUST support the Deployment with Recreate strategy.
|
||||
*/
|
||||
framework.ConformanceIt("RecreateDeployment should delete old pods and create new ones", func() {
|
||||
testRecreateDeployment(f)
|
||||
})
|
||||
It("deployment should delete old replica sets", func() {
|
||||
/*
|
||||
Testname: Deployment RevisionHistoryLimit
|
||||
Description: A conformant Kubernetes distribution MUST clean up Deployment's ReplicaSets based on
|
||||
the Deployment's `.spec.revisionHistoryLimit`.
|
||||
*/
|
||||
framework.ConformanceIt("deployment should delete old replica sets", func() {
|
||||
testDeploymentCleanUpPolicy(f)
|
||||
})
|
||||
It("deployment should support rollover", func() {
|
||||
/*
|
||||
Testname: Deployment Rollover
|
||||
Description: A conformant Kubernetes distribution MUST support Deployment rollover,
|
||||
i.e. allow arbitrary number of changes to desired state during rolling update
|
||||
before the rollout finishes.
|
||||
*/
|
||||
framework.ConformanceIt("deployment should support rollover", func() {
|
||||
testRolloverDeployment(f)
|
||||
})
|
||||
It("deployment should support rollback", func() {
|
||||
@ -91,7 +110,13 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
|
||||
testDeploymentsControllerRef(f)
|
||||
})
|
||||
It("deployment should support proportional scaling", func() {
|
||||
/*
|
||||
Testname: Deployment Proportional Scaling
|
||||
Description: A conformant Kubernetes distribution MUST support Deployment
|
||||
proportional scaling, i.e. proportionally scale a Deployment's ReplicaSets
|
||||
when a Deployment is scaled.
|
||||
*/
|
||||
framework.ConformanceIt("deployment should support proportional scaling", func() {
|
||||
testProportionalScalingDeployment(f)
|
||||
})
|
||||
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e/apps/job.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/apps/job.go
generated
vendored
@ -44,7 +44,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
@ -63,7 +63,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
@ -84,7 +84,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
|
||||
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
|
29
vendor/k8s.io/kubernetes/test/e2e/apps/network_partition.go
generated
vendored
29
vendor/k8s.io/kubernetes/test/e2e/apps/network_partition.go
generated
vendored
@ -106,12 +106,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
f := framework.NewDefaultFramework("network-partition")
|
||||
var c clientset.Interface
|
||||
var ns string
|
||||
ignoreLabels := framework.ImagePullerLabels
|
||||
|
||||
BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
_, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
|
||||
_, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
|
||||
@ -197,11 +196,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
go controller.Run(stopCh)
|
||||
|
||||
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
||||
host := framework.GetNodeExternalIP(&node)
|
||||
master := framework.GetMasterAddress(c)
|
||||
host, err := framework.GetNodeExternalIP(&node)
|
||||
framework.ExpectNoError(err)
|
||||
masterAddresses := framework.GetAllMasterAddresses(c)
|
||||
defer func() {
|
||||
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
|
||||
framework.UnblockNetwork(host, master)
|
||||
for _, masterAddress := range masterAddresses {
|
||||
framework.UnblockNetwork(host, masterAddress)
|
||||
}
|
||||
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
return
|
||||
@ -214,7 +216,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
}
|
||||
}()
|
||||
|
||||
framework.BlockNetwork(host, master)
|
||||
for _, masterAddress := range masterAddresses {
|
||||
framework.BlockNetwork(host, masterAddress)
|
||||
}
|
||||
|
||||
By("Expect to observe node and pod status change from Ready to NotReady after network partition")
|
||||
expectNodeReadiness(false, newNode)
|
||||
@ -574,11 +578,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
go controller.Run(stopCh)
|
||||
|
||||
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
||||
host := framework.GetNodeExternalIP(&node)
|
||||
master := framework.GetMasterAddress(c)
|
||||
host, err := framework.GetNodeExternalIP(&node)
|
||||
framework.ExpectNoError(err)
|
||||
masterAddresses := framework.GetAllMasterAddresses(c)
|
||||
defer func() {
|
||||
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
|
||||
framework.UnblockNetwork(host, master)
|
||||
for _, masterAddress := range masterAddresses {
|
||||
framework.UnblockNetwork(host, masterAddress)
|
||||
}
|
||||
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
return
|
||||
@ -588,7 +595,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
expectNodeReadiness(true, newNode)
|
||||
}()
|
||||
|
||||
framework.BlockNetwork(host, master)
|
||||
for _, masterAddress := range masterAddresses {
|
||||
framework.BlockNetwork(host, masterAddress)
|
||||
}
|
||||
|
||||
By("Expect to observe node and pod status change from Ready to NotReady after network partition")
|
||||
expectNodeReadiness(false, newNode)
|
||||
|
14
vendor/k8s.io/kubernetes/test/e2e/apps/rc.go
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e/apps/rc.go
generated
vendored
@ -60,11 +60,21 @@ var _ = SIGDescribe("ReplicationController", func() {
|
||||
testReplicationControllerConditionCheck(f)
|
||||
})
|
||||
|
||||
It("should adopt matching pods on creation", func() {
|
||||
/*
|
||||
Release : v1.13
|
||||
Testname: Replication Controller, adopt matching pods
|
||||
Description: An ownerless Pod is created, then a Replication Controller (RC) is created whose label selector will match the Pod. The RC MUST either adopt the Pod or delete and replace it with a new Pod
|
||||
*/
|
||||
framework.ConformanceIt("should adopt matching pods on creation", func() {
|
||||
testRCAdoptMatchingOrphans(f)
|
||||
})
|
||||
|
||||
It("should release no longer matching pods", func() {
|
||||
/*
|
||||
Release : v1.13
|
||||
Testname: Replication Controller, release pods
|
||||
Description: A Replication Controller (RC) is created, and its Pods are created. When the labels on one of the Pods change to no longer match the RC's label selector, the RC MUST release the Pod and update the Pod's owner references.
|
||||
*/
|
||||
framework.ConformanceIt("should release no longer matching pods", func() {
|
||||
testRCReleaseControlledNotMatching(f)
|
||||
})
|
||||
})
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/apps/replica_set.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/apps/replica_set.go
generated
vendored
@ -103,7 +103,12 @@ var _ = SIGDescribe("ReplicaSet", func() {
|
||||
testReplicaSetConditionCheck(f)
|
||||
})
|
||||
|
||||
It("should adopt matching pods on creation and release no longer matching pods", func() {
|
||||
/*
|
||||
Release : v1.13
|
||||
Testname: Replica Set, adopt matching pods and release non matching pods
|
||||
Description: A Pod is created, then a Replica Set (RS) whose label selector will match the Pod. The RS MUST either adopt the Pod or delete and replace it with a new Pod. When the labels on one of the Pods owned by the RS change to no longer match the RS's label selector, the RS MUST release the Pod and update the Pod's owner references
|
||||
*/
|
||||
framework.ConformanceIt("should adopt matching pods on creation and release no longer matching pods", func() {
|
||||
testRSAdoptMatchingAndReleaseNotMatching(f)
|
||||
})
|
||||
})
|
||||
|
259
vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go
generated
vendored
259
vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package apps
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
@ -31,6 +32,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -111,7 +113,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
By("Verifying statefulset set proper service name")
|
||||
framework.ExpectNoError(sst.CheckServiceName(ss, headlessSvcName))
|
||||
|
||||
cmd := "echo $(hostname) > /data/hostname; sync;"
|
||||
cmd := "echo $(hostname) | dd of=/data/hostname conv=fsync"
|
||||
By("Running " + cmd + " in all stateful pods")
|
||||
framework.ExpectNoError(sst.ExecInStatefulPods(ss, cmd))
|
||||
|
||||
@ -248,6 +250,14 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
})
|
||||
|
||||
// This can't be Conformance yet because it depends on a default
|
||||
// StorageClass and a dynamic provisioner.
|
||||
It("should perform rolling updates and roll backs of template modifications with PVCs", func() {
|
||||
By("Creating a new StatefulSet with PVCs")
|
||||
*(ss.Spec.Replicas) = 3
|
||||
rollbackTest(c, ns, ss)
|
||||
})
|
||||
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: StatefulSet, Rolling Update
|
||||
@ -256,116 +266,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() {
|
||||
By("Creating a new StatefulSet")
|
||||
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHttpProbe(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).To(Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
|
||||
ss.Namespace, ss.Name, updateRevision, currentRevision))
|
||||
pods := sst.GetPodList(ss)
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
}
|
||||
sst.SortStatefulPods(pods)
|
||||
sst.BreakPodHttpProbe(ss, &pods.Items[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
||||
newImage := NewNginxImage
|
||||
oldImage := ss.Spec.Template.Spec.Containers[0].Image
|
||||
|
||||
By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
|
||||
Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image")
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a new revision")
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).NotTo(Equal(updateRevision),
|
||||
"Current revision should not equal update revision during rolling update")
|
||||
|
||||
By("Updating Pods in reverse ordinal order")
|
||||
pods = sst.GetPodList(ss)
|
||||
sst.SortStatefulPods(pods)
|
||||
sst.RestorePodHttpProbe(ss, &pods.Items[1])
|
||||
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
|
||||
ss, pods = sst.WaitForRollingUpdate(ss)
|
||||
Expect(ss.Status.CurrentRevision).To(Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
|
||||
ss.Namespace,
|
||||
ss.Name,
|
||||
ss.Status.CurrentRevision,
|
||||
updateRevision))
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage),
|
||||
fmt.Sprintf(" Pod %s/%s has image %s not have new image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
newImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
updateRevision))
|
||||
}
|
||||
|
||||
By("Rolling back to a previous revision")
|
||||
sst.BreakPodHttpProbe(ss, &pods.Items[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
||||
priorRevision := currentRevision
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = oldImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).NotTo(Equal(updateRevision),
|
||||
"Current revision should not equal update revision during roll back")
|
||||
Expect(priorRevision).To(Equal(updateRevision),
|
||||
"Prior revision should equal update revision during roll back")
|
||||
|
||||
By("Rolling back update in reverse ordinal order")
|
||||
pods = sst.GetPodList(ss)
|
||||
sst.SortStatefulPods(pods)
|
||||
sst.RestorePodHttpProbe(ss, &pods.Items[1])
|
||||
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
|
||||
ss, pods = sst.WaitForRollingUpdate(ss)
|
||||
Expect(ss.Status.CurrentRevision).To(Equal(priorRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
|
||||
ss.Namespace,
|
||||
ss.Name,
|
||||
ss.Status.CurrentRevision,
|
||||
updateRevision))
|
||||
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage),
|
||||
fmt.Sprintf("Pod %s/%s has image %s not equal to previous image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
oldImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(priorRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
priorRevision))
|
||||
}
|
||||
rollbackTest(c, ns, ss)
|
||||
})
|
||||
|
||||
/*
|
||||
@ -700,7 +601,9 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
|
||||
By("Verifying that stateful set " + ssName + " was scaled up in order")
|
||||
expectedOrder := []string{ssName + "-0", ssName + "-1", ssName + "-2"}
|
||||
_, err = watch.Until(framework.StatefulSetTimeout, watcher, func(event watch.Event) (bool, error) {
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
|
||||
if event.Type != watch.Added {
|
||||
return false, nil
|
||||
}
|
||||
@ -731,7 +634,9 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
|
||||
By("Verifying that stateful set " + ssName + " was scaled down in reverse order")
|
||||
expectedOrder = []string{ssName + "-2", ssName + "-1", ssName + "-0"}
|
||||
_, err = watch.Until(framework.StatefulSetTimeout, watcher, func(event watch.Event) (bool, error) {
|
||||
ctx, cancel = watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
|
||||
if event.Type != watch.Deleted {
|
||||
return false, nil
|
||||
}
|
||||
@ -810,7 +715,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
Ports: []v1.ContainerPort{conflictingPort},
|
||||
},
|
||||
},
|
||||
@ -837,8 +742,10 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
By("Waiting until stateful pod " + statefulPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name)
|
||||
w, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName}))
|
||||
framework.ExpectNoError(err)
|
||||
// we need to get UID from pod in any state and wait until stateful set controller will remove pod atleast once
|
||||
_, err = watch.Until(framework.StatefulPodTimeout, w, func(event watch.Event) (bool, error) {
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulPodTimeout)
|
||||
defer cancel()
|
||||
// we need to get UID from pod in any state and wait until stateful set controller will remove pod at least once
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
|
||||
pod := event.Object.(*v1.Pod)
|
||||
switch event.Type {
|
||||
case watch.Deleted:
|
||||
@ -862,7 +769,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state")
|
||||
// we may catch delete event, thats why we are waiting for running phase like this, and not with watch.Until
|
||||
// we may catch delete event, that's why we are waiting for running phase like this, and not with watchtools.UntilWithoutRetry
|
||||
Eventually(func() error {
|
||||
statefulPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(statefulPodName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -1076,7 +983,7 @@ func (m *mysqlGaleraTester) deploy(ns string) *apps.StatefulSet {
|
||||
func (m *mysqlGaleraTester) write(statefulPodIndex int, kv map[string]string) {
|
||||
name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex)
|
||||
for k, v := range kv {
|
||||
cmd := fmt.Sprintf("use statefulset; insert into foo (k, v) values (\"%v\", \"%v\");", k, v)
|
||||
cmd := fmt.Sprintf("use statefulset; insert into foo (k, v) values (\"%v\", \"%v\");", k, v)
|
||||
framework.Logf(m.mysqlExec(cmd, m.ss.Namespace, name))
|
||||
}
|
||||
}
|
||||
@ -1176,3 +1083,119 @@ func pollReadWithTimeout(statefulPod statefulPodTester, statefulPodNumber int, k
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// This function is used by two tests to test StatefulSet rollbacks: one using
|
||||
// PVCs and one using no storage.
|
||||
func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
|
||||
sst := framework.NewStatefulSetTester(c)
|
||||
sst.SetHttpProbe(ss)
|
||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).To(Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
|
||||
ss.Namespace, ss.Name, updateRevision, currentRevision))
|
||||
pods := sst.GetPodList(ss)
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
currentRevision))
|
||||
}
|
||||
sst.SortStatefulPods(pods)
|
||||
err = sst.BreakPodHttpProbe(ss, &pods.Items[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
||||
newImage := NewNginxImage
|
||||
oldImage := ss.Spec.Template.Spec.Containers[0].Image
|
||||
|
||||
By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
|
||||
Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image")
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a new revision")
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).NotTo(Equal(updateRevision),
|
||||
"Current revision should not equal update revision during rolling update")
|
||||
|
||||
By("Updating Pods in reverse ordinal order")
|
||||
pods = sst.GetPodList(ss)
|
||||
sst.SortStatefulPods(pods)
|
||||
err = sst.RestorePodHttpProbe(ss, &pods.Items[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
|
||||
ss, pods = sst.WaitForRollingUpdate(ss)
|
||||
Expect(ss.Status.CurrentRevision).To(Equal(updateRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
|
||||
ss.Namespace,
|
||||
ss.Name,
|
||||
ss.Status.CurrentRevision,
|
||||
updateRevision))
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage),
|
||||
fmt.Sprintf(" Pod %s/%s has image %s not have new image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
newImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
updateRevision))
|
||||
}
|
||||
|
||||
By("Rolling back to a previous revision")
|
||||
err = sst.BreakPodHttpProbe(ss, &pods.Items[1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
||||
priorRevision := currentRevision
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = oldImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ss = sst.WaitForStatus(ss)
|
||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||
Expect(currentRevision).NotTo(Equal(updateRevision),
|
||||
"Current revision should not equal update revision during roll back")
|
||||
Expect(priorRevision).To(Equal(updateRevision),
|
||||
"Prior revision should equal update revision during roll back")
|
||||
|
||||
By("Rolling back update in reverse ordinal order")
|
||||
pods = sst.GetPodList(ss)
|
||||
sst.SortStatefulPods(pods)
|
||||
sst.RestorePodHttpProbe(ss, &pods.Items[1])
|
||||
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
|
||||
ss, pods = sst.WaitForRollingUpdate(ss)
|
||||
Expect(ss.Status.CurrentRevision).To(Equal(priorRevision),
|
||||
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
|
||||
ss.Namespace,
|
||||
ss.Name,
|
||||
ss.Status.CurrentRevision,
|
||||
updateRevision))
|
||||
|
||||
for i := range pods.Items {
|
||||
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage),
|
||||
fmt.Sprintf("Pod %s/%s has image %s not equal to previous image %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Spec.Containers[0].Image,
|
||||
oldImage))
|
||||
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(priorRevision),
|
||||
fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s",
|
||||
pods.Items[i].Namespace,
|
||||
pods.Items[i].Name,
|
||||
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
|
||||
priorRevision))
|
||||
}
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/apps/types.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/apps/types.go
generated
vendored
@ -31,7 +31,7 @@ var (
|
||||
CronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"}
|
||||
NautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
|
||||
KittenImage = imageutils.GetE2EImage(imageutils.Kitten)
|
||||
NginxImage = imageutils.GetE2EImage(imageutils.NginxSlim)
|
||||
NewNginxImage = imageutils.GetE2EImage(imageutils.NginxSlimNew)
|
||||
NginxImage = imageutils.GetE2EImage(imageutils.Nginx)
|
||||
NewNginxImage = imageutils.GetE2EImage(imageutils.NginxNew)
|
||||
RedisImage = imageutils.GetE2EImage(imageutils.Redis)
|
||||
)
|
||||
|
49
vendor/k8s.io/kubernetes/test/e2e/auth/BUILD
generated
vendored
49
vendor/k8s.io/kubernetes/test/e2e/auth/BUILD
generated
vendored
@ -12,45 +12,48 @@ go_library(
|
||||
"certificates.go",
|
||||
"framework.go",
|
||||
"metadata_concealment.go",
|
||||
"node_authn.go",
|
||||
"node_authz.go",
|
||||
"pod_security_policy.go",
|
||||
"service_accounts.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/auth",
|
||||
deps = [
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/util:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//plugin/pkg/admission/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/evanphx/json-patch:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/auth/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/auth/OWNERS
generated
vendored
@ -4,11 +4,9 @@ reviewers:
|
||||
- smarterclayton
|
||||
- sttts
|
||||
- tallclair
|
||||
- ericchiang
|
||||
approvers:
|
||||
- liggitt
|
||||
- mikedanese
|
||||
- smarterclayton
|
||||
- sttts
|
||||
- tallclair
|
||||
- ericchiang
|
||||
|
943
vendor/k8s.io/kubernetes/test/e2e/auth/audit.go
generated
vendored
943
vendor/k8s.io/kubernetes/test/e2e/auth/audit.go
generated
vendored
File diff suppressed because it is too large
Load Diff
5
vendor/k8s.io/kubernetes/test/e2e/auth/certificates.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/auth/certificates.go
generated
vendored
@ -91,9 +91,10 @@ var _ = SIGDescribe("Certificates API", func() {
|
||||
|
||||
framework.Logf("waiting for CSR to be signed")
|
||||
framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) {
|
||||
csr, _ = csrs.Get(csrName, metav1.GetOptions{})
|
||||
csr, err = csrs.Get(csrName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
framework.Logf("error getting csr: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
if len(csr.Status.Certificate) == 0 {
|
||||
framework.Logf("csr not signed yet")
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e/auth/metadata_concealment.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/auth/metadata_concealment.go
generated
vendored
@ -55,10 +55,10 @@ var _ = SIGDescribe("Metadata Concealment", func() {
|
||||
},
|
||||
}
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create job (%s:%s)", f.Namespace.Name, job.Name)
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, int32(1))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, int32(1))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to ensure job completion (%s:%s)", f.Namespace.Name, job.Name)
|
||||
})
|
||||
})
|
||||
|
108
vendor/k8s.io/kubernetes/test/e2e/auth/node_authn.go
generated
vendored
Normal file
108
vendor/k8s.io/kubernetes/test/e2e/auth/node_authn.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
|
||||
|
||||
f := framework.NewDefaultFramework("node-authn")
|
||||
var ns string
|
||||
var nodeIPs []string
|
||||
BeforeEach(func() {
|
||||
ns = f.Namespace.Name
|
||||
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list nodes in namespace: %s", ns)
|
||||
Expect(len(nodeList.Items)).NotTo(BeZero())
|
||||
|
||||
pickedNode := nodeList.Items[0]
|
||||
nodeIPs = framework.GetNodeAddresses(&pickedNode, v1.NodeExternalIP)
|
||||
// The pods running in the cluster can see the internal addresses.
|
||||
nodeIPs = append(nodeIPs, framework.GetNodeAddresses(&pickedNode, v1.NodeInternalIP)...)
|
||||
|
||||
// make sure ServiceAccount admission controller is enabled, so secret generation on SA creation works
|
||||
saName := "default"
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get(saName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to retrieve service account (%s:%s)", ns, saName)
|
||||
Expect(len(sa.Secrets)).NotTo(BeZero())
|
||||
})
|
||||
|
||||
It("The kubelet's main port 10250 should reject requests with no credentials", func() {
|
||||
pod := createNodeAuthTestPod(f)
|
||||
for _, nodeIP := range nodeIPs {
|
||||
// Anonymous authentication is disabled by default
|
||||
result := framework.RunHostCmdOrDie(ns, pod.Name, fmt.Sprintf("curl -sIk -o /dev/null -w '%s' https://%s:%v/metrics", "%{http_code}", nodeIP, ports.KubeletPort))
|
||||
Expect(result).To(Or(Equal("401"), Equal("403")), "the kubelet's main port 10250 should reject requests with no credentials")
|
||||
}
|
||||
})
|
||||
|
||||
It("The kubelet can delegate ServiceAccount tokens to the API server", func() {
|
||||
By("create a new ServiceAccount for authentication")
|
||||
trueValue := true
|
||||
newSA := &v1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: "node-auth-newSA",
|
||||
},
|
||||
AutomountServiceAccountToken: &trueValue,
|
||||
}
|
||||
_, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Create(newSA)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create service account (%s:%s)", ns, newSA.Name)
|
||||
|
||||
pod := createNodeAuthTestPod(f)
|
||||
|
||||
for _, nodeIP := range nodeIPs {
|
||||
result := framework.RunHostCmdOrDie(ns,
|
||||
pod.Name,
|
||||
fmt.Sprintf("curl -sIk -o /dev/null -w '%s' --header \"Authorization: Bearer `%s`\" https://%s:%v/metrics",
|
||||
"%{http_code}",
|
||||
"cat /var/run/secrets/kubernetes.io/serviceaccount/token",
|
||||
nodeIP, ports.KubeletPort))
|
||||
Expect(result).To(Or(Equal("401"), Equal("403")), "the kubelet can delegate ServiceAccount tokens to the API server")
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
func createNodeAuthTestPod(f *framework.Framework) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "test-node-authn-",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "test-node-authn",
|
||||
Image: imageutils.GetE2EImage(imageutils.Hostexec),
|
||||
Command: []string{"sleep 3600"},
|
||||
}},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
return f.PodClient().CreateSync(pod)
|
||||
}
|
23
vendor/k8s.io/kubernetes/test/e2e/auth/node_authz.go
generated
vendored
23
vendor/k8s.io/kubernetes/test/e2e/auth/node_authz.go
generated
vendored
@ -51,23 +51,24 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
ns = f.Namespace.Name
|
||||
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list nodes in namespace: %s", ns)
|
||||
Expect(len(nodeList.Items)).NotTo(Equal(0))
|
||||
nodeName = nodeList.Items[0].Name
|
||||
asUser = NodeNamePrefix + nodeName
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get("default", metav1.GetOptions{})
|
||||
saName := "default"
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get(saName, metav1.GetOptions{})
|
||||
Expect(len(sa.Secrets)).NotTo(Equal(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to retrieve service account (%s:%s)", ns, saName)
|
||||
defaultSaSecret = sa.Secrets[0].Name
|
||||
By("Creating a kubernetes client that impersonates a node")
|
||||
config, err := framework.LoadConfig()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to load kubernetes client config")
|
||||
config.Impersonate = restclient.ImpersonationConfig{
|
||||
UserName: asUser,
|
||||
Groups: []string{NodesGroup},
|
||||
}
|
||||
c, err = clientset.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create Clientset for the given config: %+v", *config)
|
||||
|
||||
})
|
||||
It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func() {
|
||||
@ -97,7 +98,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
},
|
||||
}
|
||||
_, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(configmap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create configmap (%s:%s) %+v", ns, configmap.Name, *configmap)
|
||||
_, err = c.CoreV1().ConfigMaps(ns).Get(configmap.Name, metav1.GetOptions{})
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
})
|
||||
@ -114,7 +115,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
},
|
||||
}
|
||||
_, err := f.ClientSet.CoreV1().Secrets(ns).Create(secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create secret (%s:%s)", ns, secret.Name)
|
||||
|
||||
By("Node should not get the secret")
|
||||
_, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{})
|
||||
@ -147,10 +148,12 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
}
|
||||
|
||||
_, err = f.ClientSet.CoreV1().Pods(ns).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod (%s:%s)", ns, pod.Name)
|
||||
|
||||
By("The node should able to access the secret")
|
||||
err = wait.Poll(framework.Poll, 1*time.Minute, func() (bool, error) {
|
||||
itv := framework.Poll
|
||||
dur := 1 * time.Minute
|
||||
err = wait.Poll(itv, dur, func() (bool, error) {
|
||||
_, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get secret %v, err: %v", secret.Name, err)
|
||||
@ -158,7 +161,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get secret after trying every %v for %v (%s:%s)", itv, dur, ns, secret.Name)
|
||||
})
|
||||
|
||||
It("A node shouldn't be able to create another node", func() {
|
||||
|
183
vendor/k8s.io/kubernetes/test/e2e/auth/pod_security_policy.go
generated
vendored
183
vendor/k8s.io/kubernetes/test/e2e/auth/pod_security_policy.go
generated
vendored
@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -32,10 +31,10 @@ import (
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
|
||||
psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util"
|
||||
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -75,75 +74,35 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
|
||||
It("should forbid pod creation when no PSP is available", func() {
|
||||
By("Running a restricted pod")
|
||||
_, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "restricted"))
|
||||
_, err := c.CoreV1().Pods(ns).Create(restrictedPod("restricted"))
|
||||
expectForbidden(err)
|
||||
})
|
||||
|
||||
// TODO: merge tests for extensions/policy API groups when PSP will be completely moved out of the extensions
|
||||
|
||||
It("should enforce the restricted extensions.PodSecurityPolicy", func() {
|
||||
By("Creating & Binding a restricted policy for the test service account")
|
||||
_, cleanup := createAndBindPSP(f, restrictedPSP("restrictive"))
|
||||
defer cleanup()
|
||||
|
||||
By("Running a restricted pod")
|
||||
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "allowed"))
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
_, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
expectForbidden(err)
|
||||
})
|
||||
})
|
||||
|
||||
It("should enforce the restricted policy.PodSecurityPolicy", func() {
|
||||
By("Creating & Binding a restricted policy for the test service account")
|
||||
_, cleanup := createAndBindPSPInPolicy(f, restrictedPSPInPolicy("restrictive"))
|
||||
_, cleanup := createAndBindPSP(f, restrictedPSP("restrictive"))
|
||||
defer cleanup()
|
||||
|
||||
By("Running a restricted pod")
|
||||
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "allowed"))
|
||||
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod("allowed"))
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
testPrivilegedPods(func(pod *v1.Pod) {
|
||||
_, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
expectForbidden(err)
|
||||
})
|
||||
})
|
||||
|
||||
It("should allow pods under the privileged extensions.PodSecurityPolicy", func() {
|
||||
By("Creating & Binding a privileged policy for the test service account")
|
||||
// Ensure that the permissive policy is used even in the presence of the restricted policy.
|
||||
_, cleanup := createAndBindPSP(f, restrictedPSP("restrictive"))
|
||||
defer cleanup()
|
||||
expectedPSP, cleanup := createAndBindPSP(f, framework.PrivilegedPSP("permissive"))
|
||||
defer cleanup()
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
p, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
|
||||
|
||||
// Verify expected PSP was used.
|
||||
p, err = c.CoreV1().Pods(ns).Get(p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
validated, found := p.Annotations[psputil.ValidatedPSPAnnotation]
|
||||
Expect(found).To(BeTrue(), "PSP annotation not found")
|
||||
Expect(validated).To(Equal(expectedPSP.Name), "Unexpected validated PSP")
|
||||
})
|
||||
})
|
||||
|
||||
It("should allow pods under the privileged policy.PodSecurityPolicy", func() {
|
||||
By("Creating & Binding a privileged policy for the test service account")
|
||||
// Ensure that the permissive policy is used even in the presence of the restricted policy.
|
||||
_, cleanup := createAndBindPSPInPolicy(f, restrictedPSPInPolicy("restrictive"))
|
||||
_, cleanup := createAndBindPSP(f, restrictedPSP("restrictive"))
|
||||
defer cleanup()
|
||||
expectedPSP, cleanup := createAndBindPSPInPolicy(f, privilegedPSPInPolicy("permissive"))
|
||||
expectedPSP, cleanup := createAndBindPSP(f, privilegedPSP("permissive"))
|
||||
defer cleanup()
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
testPrivilegedPods(func(pod *v1.Pod) {
|
||||
p, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
|
||||
@ -163,16 +122,16 @@ func expectForbidden(err error) {
|
||||
Expect(apierrs.IsForbidden(err)).To(BeTrue(), "should be forbidden error")
|
||||
}
|
||||
|
||||
func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
|
||||
func testPrivilegedPods(tester func(pod *v1.Pod)) {
|
||||
By("Running a privileged pod", func() {
|
||||
privileged := restrictedPod(f, "privileged")
|
||||
privileged := restrictedPod("privileged")
|
||||
privileged.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
||||
privileged.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation = nil
|
||||
tester(privileged)
|
||||
})
|
||||
|
||||
By("Running a HostPath pod", func() {
|
||||
hostpath := restrictedPod(f, "hostpath")
|
||||
hostpath := restrictedPod("hostpath")
|
||||
hostpath.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{{
|
||||
Name: "hp",
|
||||
MountPath: "/hp",
|
||||
@ -187,26 +146,26 @@ func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
|
||||
})
|
||||
|
||||
By("Running a HostNetwork pod", func() {
|
||||
hostnet := restrictedPod(f, "hostnet")
|
||||
hostnet := restrictedPod("hostnet")
|
||||
hostnet.Spec.HostNetwork = true
|
||||
tester(hostnet)
|
||||
})
|
||||
|
||||
By("Running a HostPID pod", func() {
|
||||
hostpid := restrictedPod(f, "hostpid")
|
||||
hostpid := restrictedPod("hostpid")
|
||||
hostpid.Spec.HostPID = true
|
||||
tester(hostpid)
|
||||
})
|
||||
|
||||
By("Running a HostIPC pod", func() {
|
||||
hostipc := restrictedPod(f, "hostipc")
|
||||
hostipc := restrictedPod("hostipc")
|
||||
hostipc.Spec.HostIPC = true
|
||||
tester(hostipc)
|
||||
})
|
||||
|
||||
if common.IsAppArmorSupported() {
|
||||
By("Running a custom AppArmor profile pod", func() {
|
||||
aa := restrictedPod(f, "apparmor")
|
||||
aa := restrictedPod("apparmor")
|
||||
// Every node is expected to have the docker-default profile.
|
||||
aa.Annotations[apparmor.ContainerAnnotationKeyPrefix+"pause"] = "localhost/docker-default"
|
||||
tester(aa)
|
||||
@ -214,13 +173,13 @@ func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
|
||||
}
|
||||
|
||||
By("Running an unconfined Seccomp pod", func() {
|
||||
unconfined := restrictedPod(f, "seccomp")
|
||||
unconfined := restrictedPod("seccomp")
|
||||
unconfined.Annotations[v1.SeccompPodAnnotationKey] = "unconfined"
|
||||
tester(unconfined)
|
||||
})
|
||||
|
||||
By("Running a SYS_ADMIN pod", func() {
|
||||
sysadmin := restrictedPod(f, "sysadmin")
|
||||
sysadmin := restrictedPod("sysadmin")
|
||||
sysadmin.Spec.Containers[0].SecurityContext.Capabilities = &v1.Capabilities{
|
||||
Add: []v1.Capability{"SYS_ADMIN"},
|
||||
}
|
||||
@ -229,49 +188,8 @@ func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
|
||||
})
|
||||
}
|
||||
|
||||
func createAndBindPSP(f *framework.Framework, pspTemplate *extensionsv1beta1.PodSecurityPolicy) (psp *extensionsv1beta1.PodSecurityPolicy, cleanup func()) {
|
||||
// Create the PodSecurityPolicy object.
|
||||
psp = pspTemplate.DeepCopy()
|
||||
// Add the namespace to the name to ensure uniqueness and tie it to the namespace.
|
||||
ns := f.Namespace.Name
|
||||
name := fmt.Sprintf("%s-%s", ns, psp.Name)
|
||||
psp.Name = name
|
||||
psp, err := f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().Create(psp)
|
||||
framework.ExpectNoError(err, "Failed to create PSP")
|
||||
|
||||
// Create the Role to bind it to the namespace.
|
||||
_, err = f.ClientSet.RbacV1beta1().Roles(ns).Create(&rbacv1beta1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Rules: []rbacv1beta1.PolicyRule{{
|
||||
APIGroups: []string{"extensions"},
|
||||
Resources: []string{"podsecuritypolicies"},
|
||||
ResourceNames: []string{name},
|
||||
Verbs: []string{"use"},
|
||||
}},
|
||||
})
|
||||
framework.ExpectNoError(err, "Failed to create PSP role")
|
||||
|
||||
// Bind the role to the namespace.
|
||||
framework.BindRoleInNamespace(f.ClientSet.RbacV1beta1(), name, ns, rbacv1beta1.Subject{
|
||||
Kind: rbacv1beta1.ServiceAccountKind,
|
||||
Namespace: ns,
|
||||
Name: "default",
|
||||
})
|
||||
framework.ExpectNoError(framework.WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(),
|
||||
serviceaccount.MakeUsername(ns, "default"), ns, "use", name,
|
||||
schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true))
|
||||
|
||||
return psp, func() {
|
||||
// Cleanup non-namespaced PSP object.
|
||||
f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
// createAndBindPSPInPolicy creates a PSP in the policy API group (unlike createAndBindPSP()).
|
||||
// TODO: merge these functions when PSP will be completely moved out of the extensions
|
||||
func createAndBindPSPInPolicy(f *framework.Framework, pspTemplate *policy.PodSecurityPolicy) (psp *policy.PodSecurityPolicy, cleanup func()) {
|
||||
// createAndBindPSP creates a PSP in the policy API group.
|
||||
func createAndBindPSP(f *framework.Framework, pspTemplate *policy.PodSecurityPolicy) (psp *policy.PodSecurityPolicy, cleanup func()) {
|
||||
// Create the PodSecurityPolicy object.
|
||||
psp = pspTemplate.DeepCopy()
|
||||
// Add the namespace to the name to ensure uniqueness and tie it to the namespace.
|
||||
@ -311,7 +229,7 @@ func createAndBindPSPInPolicy(f *framework.Framework, pspTemplate *policy.PodSec
|
||||
}
|
||||
}
|
||||
|
||||
func restrictedPod(f *framework.Framework, name string) *v1.Pod {
|
||||
func restrictedPod(name string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -334,8 +252,7 @@ func restrictedPod(f *framework.Framework, name string) *v1.Pod {
|
||||
}
|
||||
|
||||
// privilegedPSPInPolicy creates a PodSecurityPolicy (in the "policy" API Group) that allows everything.
|
||||
// TODO: replace by PrivilegedPSP when PSP will be completely moved out of the extensions
|
||||
func privilegedPSPInPolicy(name string) *policy.PodSecurityPolicy {
|
||||
func privilegedPSP(name string) *policy.PodSecurityPolicy {
|
||||
return &policy.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -368,8 +285,7 @@ func privilegedPSPInPolicy(name string) *policy.PodSecurityPolicy {
|
||||
}
|
||||
|
||||
// restrictedPSPInPolicy creates a PodSecurityPolicy (in the "policy" API Group) that is most strict.
|
||||
// TODO: replace by restrictedPSP when PSP will be completely moved out of the extensions
|
||||
func restrictedPSPInPolicy(name string) *policy.PodSecurityPolicy {
|
||||
func restrictedPSP(name string) *policy.PodSecurityPolicy {
|
||||
return &policy.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@ -423,61 +339,6 @@ func restrictedPSPInPolicy(name string) *policy.PodSecurityPolicy {
|
||||
}
|
||||
}
|
||||
|
||||
// restrictedPSP creates a PodSecurityPolicy that is most strict.
|
||||
func restrictedPSP(name string) *extensionsv1beta1.PodSecurityPolicy {
|
||||
return &extensionsv1beta1.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{
|
||||
seccomp.AllowedProfilesAnnotationKey: v1.SeccompProfileRuntimeDefault,
|
||||
seccomp.DefaultProfileAnnotationKey: v1.SeccompProfileRuntimeDefault,
|
||||
apparmor.AllowedProfilesAnnotationKey: apparmor.ProfileRuntimeDefault,
|
||||
apparmor.DefaultProfileAnnotationKey: apparmor.ProfileRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Spec: extensionsv1beta1.PodSecurityPolicySpec{
|
||||
Privileged: false,
|
||||
AllowPrivilegeEscalation: utilpointer.BoolPtr(false),
|
||||
RequiredDropCapabilities: []v1.Capability{
|
||||
"AUDIT_WRITE",
|
||||
"CHOWN",
|
||||
"DAC_OVERRIDE",
|
||||
"FOWNER",
|
||||
"FSETID",
|
||||
"KILL",
|
||||
"MKNOD",
|
||||
"NET_RAW",
|
||||
"SETGID",
|
||||
"SETUID",
|
||||
"SYS_CHROOT",
|
||||
},
|
||||
Volumes: []extensionsv1beta1.FSType{
|
||||
extensionsv1beta1.ConfigMap,
|
||||
extensionsv1beta1.EmptyDir,
|
||||
extensionsv1beta1.PersistentVolumeClaim,
|
||||
"projected",
|
||||
extensionsv1beta1.Secret,
|
||||
},
|
||||
HostNetwork: false,
|
||||
HostIPC: false,
|
||||
HostPID: false,
|
||||
RunAsUser: extensionsv1beta1.RunAsUserStrategyOptions{
|
||||
Rule: extensionsv1beta1.RunAsUserStrategyMustRunAsNonRoot,
|
||||
},
|
||||
SELinux: extensionsv1beta1.SELinuxStrategyOptions{
|
||||
Rule: extensionsv1beta1.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: extensionsv1beta1.SupplementalGroupsStrategyOptions{
|
||||
Rule: extensionsv1beta1.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: extensionsv1beta1.FSGroupStrategyOptions{
|
||||
Rule: extensionsv1beta1.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
35
vendor/k8s.io/kubernetes/test/e2e/auth/service_accounts.go
generated
vendored
35
vendor/k8s.io/kubernetes/test/e2e/auth/service_accounts.go
generated
vendored
@ -153,6 +153,15 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
}
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Service Account Tokens Must AutoMount
|
||||
Description: Ensure that Service Account keys are mounted into the Container. Pod
|
||||
contains three containers each will read Service Account token,
|
||||
root CA and default namespace respectively from the default API
|
||||
Token Mount path. All these three files MUST exist and the Service
|
||||
Account mount path MUST be auto mounted to the Container.
|
||||
*/
|
||||
framework.ConformanceIt("should mount an API token into pods ", func() {
|
||||
var tokenContent string
|
||||
var rootCAContent string
|
||||
@ -235,7 +244,33 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Service account tokens auto mount optionally
|
||||
Description: Ensure that Service Account keys are mounted into the Pod only
|
||||
when AutoMountServiceToken is not set to false. We test the
|
||||
following scenarios here.
|
||||
1. Create Pod, Pod Spec has AutomountServiceAccountToken set to nil
|
||||
a) Service Account with default value,
|
||||
b) Service Account is an configured AutomountServiceAccountToken set to true,
|
||||
c) Service Account is an configured AutomountServiceAccountToken set to false
|
||||
2. Create Pod, Pod Spec has AutomountServiceAccountToken set to true
|
||||
a) Service Account with default value,
|
||||
b) Service Account is configured with AutomountServiceAccountToken set to true,
|
||||
c) Service Account is configured with AutomountServiceAccountToken set to false
|
||||
3. Create Pod, Pod Spec has AutomountServiceAccountToken set to false
|
||||
a) Service Account with default value,
|
||||
b) Service Account is configured with AutomountServiceAccountToken set to true,
|
||||
c) Service Account is configured with AutomountServiceAccountToken set to false
|
||||
|
||||
The Containers running in these pods MUST verify that the ServiceTokenVolume path is
|
||||
auto mounted only when Pod Spec has AutomountServiceAccountToken not set to false
|
||||
and ServiceAccount object has AutomountServiceAccountToken not set to false, this
|
||||
include test cases 1a,1b,2a,2b and 2c.
|
||||
In the test cases 1c,3a,3b and 3c the ServiceTokenVolume MUST not be auto mounted.
|
||||
*/
|
||||
framework.ConformanceIt("should allow opting out of API token automount ", func() {
|
||||
|
||||
var err error
|
||||
trueValue := true
|
||||
falseValue := false
|
||||
|
40
vendor/k8s.io/kubernetes/test/e2e/autoscaling/BUILD
generated
vendored
40
vendor/k8s.io/kubernetes/test/e2e/autoscaling/BUILD
generated
vendored
@ -19,36 +19,36 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/test/e2e/autoscaling",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//staging/src/k8s.io/api/autoscaling/v2beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/instrumentation/monitoring:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2/google:go_default_library",
|
||||
"//vendor/google.golang.org/api/monitoring/v3:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/scheduling/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
38
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_autoscaler_scalability.go
generated
vendored
38
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_autoscaler_scalability.go
generated
vendored
@ -33,9 +33,9 @@ import (
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
}
|
||||
break
|
||||
}
|
||||
glog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
|
||||
klog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
|
||||
})
|
||||
|
||||
It("should scale up at all [Feature:ClusterAutoscalerScalability1]", func() {
|
||||
@ -170,7 +170,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
replicas1 := additionalNodes1 * replicasPerNode
|
||||
replicas2 := additionalNodes2 * replicasPerNode
|
||||
|
||||
glog.Infof("cores per node: %v", coresPerNode)
|
||||
klog.Infof("cores per node: %v", coresPerNode)
|
||||
|
||||
// saturate cluster
|
||||
initialReplicas := nodeCount
|
||||
@ -178,7 +178,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
defer reservationCleanup()
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
glog.Infof("Reserved successfully")
|
||||
klog.Infof("Reserved successfully")
|
||||
|
||||
// configure pending pods & expected scale up #1
|
||||
rcConfig := reserveMemoryRCConfig(f, "extra-pod-1", replicas1, additionalNodes1*perNodeReservation, largeScaleUpTimeout)
|
||||
@ -191,7 +191,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
testCleanup1 := simpleScaleUpTestWithTolerance(f, config, tolerateUnreadyNodes, tolerateUnreadyPods)
|
||||
defer testCleanup1()
|
||||
|
||||
glog.Infof("Scaled up once")
|
||||
klog.Infof("Scaled up once")
|
||||
|
||||
// configure pending pods & expected scale up #2
|
||||
rcConfig2 := reserveMemoryRCConfig(f, "extra-pod-2", replicas2, additionalNodes2*perNodeReservation, largeScaleUpTimeout)
|
||||
@ -204,7 +204,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
testCleanup2 := simpleScaleUpTestWithTolerance(f, config2, tolerateUnreadyNodes, tolerateUnreadyPods)
|
||||
defer testCleanup2()
|
||||
|
||||
glog.Infof("Scaled up twice")
|
||||
klog.Infof("Scaled up twice")
|
||||
})
|
||||
|
||||
It("should scale down empty nodes [Feature:ClusterAutoscalerScalability3]", func() {
|
||||
@ -327,7 +327,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
|
||||
By("Checking if the number of nodes is as expected")
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
glog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes)
|
||||
klog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes)
|
||||
Expect(len(nodes.Items)).Should(Equal(totalNodes))
|
||||
})
|
||||
|
||||
@ -368,26 +368,6 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
||||
|
||||
})
|
||||
|
||||
func makeUnschedulable(f *framework.Framework, nodes []v1.Node) error {
|
||||
for _, node := range nodes {
|
||||
err := makeNodeUnschedulable(f.ClientSet, &node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeSchedulable(f *framework.Framework, nodes []v1.Node) error {
|
||||
for _, node := range nodes {
|
||||
err := makeNodeSchedulable(f.ClientSet, &node, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func anyKey(input map[string]int) string {
|
||||
for k := range input {
|
||||
return k
|
||||
@ -410,7 +390,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC
|
||||
} else {
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, config.expectedResult.nodes, scaleUpTimeout))
|
||||
}
|
||||
glog.Infof("cluster is increased")
|
||||
klog.Infof("cluster is increased")
|
||||
if tolerateMissingPodCount > 0 {
|
||||
framework.ExpectNoError(waitForCaPodsReadyInNamespace(f, f.ClientSet, tolerateMissingPodCount))
|
||||
} else {
|
||||
@ -547,5 +527,5 @@ func distributeLoad(f *framework.Framework, namespace string, id string, podDist
|
||||
|
||||
func timeTrack(start time.Time, name string) {
|
||||
elapsed := time.Since(start)
|
||||
glog.Infof("%s took %s", name, elapsed)
|
||||
klog.Infof("%s took %s", name, elapsed)
|
||||
}
|
||||
|
565
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_size_autoscaling.go
generated
vendored
565
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_size_autoscaling.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
@ -47,9 +48,9 @@ import (
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -82,6 +83,8 @@ const (
|
||||
|
||||
expendablePriorityClassName = "expendable-priority"
|
||||
highPriorityClassName = "high-priority"
|
||||
|
||||
gpuLabel = "cloud.google.com/gke-accelerator"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
@ -112,8 +115,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
nodeCount = len(nodes.Items)
|
||||
coreCount = 0
|
||||
for _, node := range nodes.Items {
|
||||
quentity := node.Status.Capacity[v1.ResourceCPU]
|
||||
coreCount += quentity.Value()
|
||||
quantity := node.Status.Allocatable[v1.ResourceCPU]
|
||||
coreCount += quantity.Value()
|
||||
}
|
||||
By(fmt.Sprintf("Initial number of schedulable nodes: %v", nodeCount))
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
@ -129,16 +132,11 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
err = enableAutoscaler("default-pool", 3, 5)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
Expect(getNAPNodePoolsNumber()).Should(Equal(0))
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if framework.ProviderIs("gke") {
|
||||
By("Remove changes introduced by NAP tests")
|
||||
removeNAPNodePools()
|
||||
disableAutoprovisioning()
|
||||
}
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
By(fmt.Sprintf("Restoring initial size of the cluster"))
|
||||
setMigSizes(originalSizes)
|
||||
expectedNodes := 0
|
||||
@ -163,7 +161,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
}
|
||||
break
|
||||
}
|
||||
glog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
|
||||
klog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
|
||||
})
|
||||
|
||||
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
@ -207,107 +205,123 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]",
|
||||
func() { simpleScaleUpTest(0) })
|
||||
|
||||
supportedGpuTypes := []string{"nvidia-tesla-k80", "nvidia-tesla-v100", "nvidia-tesla-p100"}
|
||||
for _, gpuType := range supportedGpuTypes {
|
||||
gpuType := gpuType // create new variable for each iteration step
|
||||
gpuType := os.Getenv("TESTED_GPU_TYPE")
|
||||
|
||||
It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
if gpuType == "" {
|
||||
framework.Failf("TEST_GPU_TYPE not defined")
|
||||
return
|
||||
}
|
||||
|
||||
const gpuPoolName = "gpu-pool"
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
const gpuPoolName = "gpu-pool"
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
|
||||
By("Schedule a pod which requires GPU")
|
||||
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
|
||||
By("Schedule a pod which requires GPU")
|
||||
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
})
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
})
|
||||
|
||||
It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
if gpuType == "" {
|
||||
framework.Failf("TEST_GPU_TYPE not defined")
|
||||
return
|
||||
}
|
||||
|
||||
const gpuPoolName = "gpu-pool"
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
const gpuPoolName = "gpu-pool"
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Schedule a single pod which requires GPU")
|
||||
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
|
||||
By("Schedule a single pod which requires GPU")
|
||||
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
|
||||
|
||||
By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 2)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 2)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
|
||||
By("Scale GPU deployment")
|
||||
framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
|
||||
By("Scale GPU deployment")
|
||||
framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(2))
|
||||
})
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(2))
|
||||
})
|
||||
|
||||
It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
if gpuType == "" {
|
||||
framework.Failf("TEST_GPU_TYPE not defined")
|
||||
return
|
||||
}
|
||||
|
||||
const gpuPoolName = "gpu-pool"
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
const gpuPoolName = "gpu-pool"
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
|
||||
By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
// Verify that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
||||
By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
|
||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||
// Verify that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
||||
|
||||
// Expect gpu pool to stay intact
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
})
|
||||
// Expect gpu pool to stay intact
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
})
|
||||
|
||||
It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
if gpuType == "" {
|
||||
framework.Failf("TEST_GPU_TYPE not defined")
|
||||
return
|
||||
}
|
||||
|
||||
const gpuPoolName = "gpu-pool"
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
const gpuPoolName = "gpu-pool"
|
||||
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
|
||||
defer deleteNodePool(gpuPoolName)
|
||||
|
||||
installNvidiaDriversDaemonSet()
|
||||
installNvidiaDriversDaemonSet()
|
||||
|
||||
By("Schedule a single pod which requires GPU")
|
||||
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
|
||||
By("Schedule a single pod which requires GPU")
|
||||
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
|
||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
|
||||
|
||||
By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
By("Enable autoscaler")
|
||||
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
|
||||
defer disableAutoscaler(gpuPoolName, 0, 1)
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
|
||||
|
||||
By("Remove the only POD requiring GPU")
|
||||
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
|
||||
By("Remove the only POD requiring GPU")
|
||||
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
|
||||
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
})
|
||||
}
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
|
||||
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
|
||||
})
|
||||
|
||||
It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]",
|
||||
func() {
|
||||
@ -335,10 +349,16 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
By("Expect no more scale-up to be happening after all pods are scheduled")
|
||||
status, err = getScaleUpStatus(c)
|
||||
|
||||
// wait for a while until scale-up finishes; we cannot read CA status immediately
|
||||
// after pods are scheduled as status config map is updated by CA once every loop iteration
|
||||
status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
|
||||
return s.status == caNoScaleUpStatus
|
||||
}, 2*freshStatusLimit)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if status.target != target {
|
||||
glog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target)
|
||||
klog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target)
|
||||
}
|
||||
Expect(status.timestamp.Add(freshStatusLimit).Before(time.Now())).Should(Equal(false))
|
||||
Expect(status.status).Should(Equal(caNoScaleUpStatus))
|
||||
@ -355,14 +375,17 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
defer deleteNodePool(extraPoolName)
|
||||
extraNodes := getPoolInitialSize(extraPoolName)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
|
||||
glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
|
||||
// We wait for nodes to become schedulable to make sure the new nodes
|
||||
// will be returned by getPoolNodes below.
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout))
|
||||
klog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
|
||||
|
||||
By("Getting memory available on new nodes, so we can account for it when creating RC")
|
||||
nodes := getPoolNodes(f, extraPoolName)
|
||||
Expect(len(nodes)).Should(Equal(extraNodes))
|
||||
extraMemMb := 0
|
||||
for _, node := range nodes {
|
||||
mem := node.Status.Capacity[v1.ResourceMemory]
|
||||
mem := node.Status.Allocatable[v1.ResourceMemory]
|
||||
extraMemMb += int((&mem).Value() / 1024 / 1024)
|
||||
}
|
||||
|
||||
@ -491,7 +514,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
|
||||
defer func() {
|
||||
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
|
||||
glog.Infof("RC and pods not using volume deleted")
|
||||
klog.Infof("RC and pods not using volume deleted")
|
||||
}()
|
||||
|
||||
By("waiting for all pods before triggering scale up")
|
||||
@ -565,14 +588,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
newNodesSet.Delete(nodes...)
|
||||
if len(newNodesSet) > 1 {
|
||||
By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet))
|
||||
glog.Infof("Usually only 1 new node is expected, investigating")
|
||||
glog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
|
||||
klog.Infof("Usually only 1 new node is expected, investigating")
|
||||
klog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
|
||||
if output, err := exec.Command("gcloud", "compute", "instances", "list",
|
||||
"--project="+framework.TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil {
|
||||
glog.Infof("Gcloud compute instances list: %s", output)
|
||||
klog.Infof("Gcloud compute instances list: %s", output)
|
||||
} else {
|
||||
glog.Errorf("Failed to get instances list: %v", err)
|
||||
klog.Errorf("Failed to get instances list: %v", err)
|
||||
}
|
||||
|
||||
for newNode := range newNodesSet {
|
||||
@ -580,9 +603,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
newNode,
|
||||
"--project="+framework.TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil {
|
||||
glog.Infof("Gcloud compute instances describe: %s", output)
|
||||
klog.Infof("Gcloud compute instances describe: %s", output)
|
||||
} else {
|
||||
glog.Errorf("Failed to get instances describe: %v", err)
|
||||
klog.Errorf("Failed to get instances describe: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -597,7 +620,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
if err == nil && node != nil {
|
||||
registeredNodes.Insert(nodeName)
|
||||
} else {
|
||||
glog.Errorf("Failed to get node %v: %v", nodeName, err)
|
||||
klog.Errorf("Failed to get node %v: %v", nodeName, err)
|
||||
}
|
||||
}
|
||||
By(fmt.Sprintf("Setting labels for registered new nodes: %v", registeredNodes.List()))
|
||||
@ -858,8 +881,21 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
clusterSize = manuallyIncreaseClusterSize(f, originalSizes)
|
||||
}
|
||||
|
||||
// If new nodes are disconnected too soon, they'll be considered not started
|
||||
// instead of unready, and cluster won't be considered unhealthy.
|
||||
//
|
||||
// More precisely, Cluster Autoscaler compares last transition time of
|
||||
// several readiness conditions to node create time. If it's within
|
||||
// 2 minutes, it'll assume node is just starting and not unhealthy.
|
||||
//
|
||||
// Nodes become ready in less than 1 minute after being created,
|
||||
// so waiting extra 2 minutes before breaking them (which triggers
|
||||
// readiness condition transition) should be sufficient, while
|
||||
// making no assumptions about minimal node startup time.
|
||||
time.Sleep(2 * time.Minute)
|
||||
|
||||
By("Block network connectivity to some nodes to simulate unhealthy cluster")
|
||||
nodesToBreakCount := int(math.Floor(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize))))
|
||||
nodesToBreakCount := int(math.Ceil(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize))))
|
||||
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String()})
|
||||
@ -894,106 +930,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout))
|
||||
})
|
||||
|
||||
It("should add new node and new node pool on too big pod, scale down to 1 and scale down to 0 [Feature:ClusterSizeAutoscalingScaleWithNAP]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
framework.ExpectNoError(enableAutoprovisioning(""))
|
||||
By("Create first pod")
|
||||
cleanupFunc1 := ReserveMemory(f, "memory-reservation1", 1, int(1.1*float64(memAllocatableMb)), true, defaultTimeout)
|
||||
defer func() {
|
||||
if cleanupFunc1 != nil {
|
||||
cleanupFunc1()
|
||||
}
|
||||
}()
|
||||
By("Waiting for scale up")
|
||||
// Verify that cluster size increased.
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+1 }, defaultTimeout))
|
||||
By("Check if NAP group was created")
|
||||
Expect(getNAPNodePoolsNumber()).Should(Equal(1))
|
||||
By("Create second pod")
|
||||
cleanupFunc2 := ReserveMemory(f, "memory-reservation2", 1, int(1.1*float64(memAllocatableMb)), true, defaultTimeout)
|
||||
defer func() {
|
||||
if cleanupFunc2 != nil {
|
||||
cleanupFunc2()
|
||||
}
|
||||
}()
|
||||
By("Waiting for scale up")
|
||||
// Verify that cluster size increased.
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+2 }, defaultTimeout))
|
||||
By("Delete first pod")
|
||||
cleanupFunc1()
|
||||
cleanupFunc1 = nil
|
||||
By("Waiting for scale down to 1")
|
||||
// Verify that cluster size decreased.
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+1 }, scaleDownTimeout))
|
||||
By("Delete second pod")
|
||||
cleanupFunc2()
|
||||
cleanupFunc2 = nil
|
||||
By("Waiting for scale down to 0")
|
||||
// Verify that cluster size decreased.
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
|
||||
By("Waiting for NAP group remove")
|
||||
framework.ExpectNoError(waitTillAllNAPNodePoolsAreRemoved())
|
||||
By("Check if NAP group was removeed")
|
||||
Expect(getNAPNodePoolsNumber()).Should(Equal(0))
|
||||
})
|
||||
|
||||
It("shouldn't add new node group if not needed [Feature:ClusterSizeAutoscalingScaleWithNAP]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
framework.ExpectNoError(enableAutoprovisioning(""))
|
||||
By("Create pods")
|
||||
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
|
||||
cleanupFunc := ReserveMemory(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout)
|
||||
defer cleanupFunc()
|
||||
By("Waiting for scale up")
|
||||
// Verify that cluster size increased.
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
||||
By("Check if NAP group was created hoping id didn't happen")
|
||||
Expect(getNAPNodePoolsNumber()).Should(Equal(0))
|
||||
})
|
||||
|
||||
It("shouldn't scale up if cores limit too low, should scale up after limit is changed [Feature:ClusterSizeAutoscalingScaleWithNAP]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
By(fmt.Sprintf("Set core limit to %d", coreCount))
|
||||
framework.ExpectNoError(enableAutoprovisioning(fmt.Sprintf(`"resource_limits":{"name":"cpu", "minimum":2, "maximum":%d}, "resource_limits":{"name":"memory", "minimum":0, "maximum":10000000}`, coreCount)))
|
||||
// Create pod allocating 1.1 allocatable for present nodes. Bigger node will have to be created.
|
||||
cleanupFunc := ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, time.Second)
|
||||
defer cleanupFunc()
|
||||
By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String()))
|
||||
time.Sleep(scaleUpTimeout)
|
||||
// Verify that cluster size is not changed
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount }, time.Second))
|
||||
By("Change resource limits")
|
||||
framework.ExpectNoError(enableAutoprovisioning(fmt.Sprintf(`"resource_limits":{"name":"cpu", "minimum":2, "maximum":%d}, "resource_limits":{"name":"memory", "minimum":0, "maximum":10000000}`, coreCount+5)))
|
||||
By("Wait for scale up")
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
|
||||
By("Check if NAP group was created")
|
||||
Expect(getNAPNodePoolsNumber()).Should(Equal(1))
|
||||
})
|
||||
|
||||
It("should create new node if there is no node for node selector [Feature:ClusterSizeAutoscalingScaleWithNAP]", func() {
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
framework.ExpectNoError(enableAutoprovisioning(""))
|
||||
// Create pod allocating 0.7 allocatable for present nodes with node selector.
|
||||
cleanupFunc := ReserveMemoryWithSelector(f, "memory-reservation", 1, int(0.7*float64(memAllocatableMb)), true, scaleUpTimeout, map[string]string{"test": "test"})
|
||||
defer cleanupFunc()
|
||||
By("Waiting for scale up")
|
||||
// Verify that cluster size increased.
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size == nodeCount+1 }, defaultTimeout))
|
||||
By("Check if NAP group was created")
|
||||
Expect(getNAPNodePoolsNumber()).Should(Equal(1))
|
||||
})
|
||||
|
||||
It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
defer createPriorityClasses(f)()
|
||||
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
|
||||
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName)
|
||||
@ -1006,8 +943,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
})
|
||||
|
||||
It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
defer createPriorityClasses(f)()
|
||||
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
|
||||
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
|
||||
@ -1018,8 +953,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
})
|
||||
|
||||
It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
defer createPriorityClasses(f)()
|
||||
// Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node.
|
||||
cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName)
|
||||
@ -1032,8 +965,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
})
|
||||
|
||||
It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
defer createPriorityClasses(f)()
|
||||
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
|
||||
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
|
||||
@ -1045,8 +976,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
})
|
||||
|
||||
It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
defer createPriorityClasses(f)()
|
||||
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
|
||||
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
|
||||
@ -1066,7 +995,7 @@ func installNvidiaDriversDaemonSet() {
|
||||
}
|
||||
|
||||
func execCmd(args ...string) *exec.Cmd {
|
||||
glog.Infof("Executing: %s", strings.Join(args, " "))
|
||||
klog.Infof("Executing: %s", strings.Join(args, " "))
|
||||
return exec.Command(args[0], args[1:]...)
|
||||
}
|
||||
|
||||
@ -1198,7 +1127,7 @@ func isRegionalCluster() bool {
|
||||
}
|
||||
|
||||
func enableAutoscaler(nodePool string, minCount, maxCount int) error {
|
||||
glog.Infof("Using gcloud to enable autoscaling for pool %s", nodePool)
|
||||
klog.Infof("Using gcloud to enable autoscaling for pool %s", nodePool)
|
||||
|
||||
args := []string{"container", "clusters", "update", framework.TestContext.CloudConfig.Cluster,
|
||||
"--enable-autoscaling",
|
||||
@ -1208,10 +1137,10 @@ func enableAutoscaler(nodePool string, minCount, maxCount int) error {
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Failed config update result: %s", output)
|
||||
klog.Errorf("Failed config update result: %s", output)
|
||||
return fmt.Errorf("Failed to enable autoscaling: %v", err)
|
||||
}
|
||||
glog.Infof("Config update result: %s", output)
|
||||
klog.Infof("Config update result: %s", output)
|
||||
|
||||
var finalErr error
|
||||
for startTime := time.Now(); startTime.Add(gkeUpdateTimeout).After(time.Now()); time.Sleep(30 * time.Second) {
|
||||
@ -1225,17 +1154,17 @@ func enableAutoscaler(nodePool string, minCount, maxCount int) error {
|
||||
}
|
||||
|
||||
func disableAutoscaler(nodePool string, minCount, maxCount int) error {
|
||||
glog.Infof("Using gcloud to disable autoscaling for pool %s", nodePool)
|
||||
klog.Infof("Using gcloud to disable autoscaling for pool %s", nodePool)
|
||||
args := []string{"container", "clusters", "update", framework.TestContext.CloudConfig.Cluster,
|
||||
"--no-enable-autoscaling",
|
||||
"--node-pool=" + nodePool}
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Failed config update result: %s", output)
|
||||
klog.Errorf("Failed config update result: %s", output)
|
||||
return fmt.Errorf("Failed to disable autoscaling: %v", err)
|
||||
}
|
||||
glog.Infof("Config update result: %s", output)
|
||||
klog.Infof("Config update result: %s", output)
|
||||
|
||||
var finalErr error
|
||||
for startTime := time.Now(); startTime.Add(gkeUpdateTimeout).After(time.Now()); time.Sleep(30 * time.Second) {
|
||||
@ -1248,17 +1177,6 @@ func disableAutoscaler(nodePool string, minCount, maxCount int) error {
|
||||
return fmt.Errorf("autoscaler still enabled, last error: %v", finalErr)
|
||||
}
|
||||
|
||||
func isAutoprovisioningEnabled() (bool, error) {
|
||||
strBody, err := getCluster("v1alpha1")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if strings.Contains(strBody, "\"enableNodeAutoprovisioning\": true") {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func executeHTTPRequest(method string, url string, body string) (string, error) {
|
||||
client := &http.Client{}
|
||||
req, err := http.NewRequest(method, url, strings.NewReader(body))
|
||||
@ -1278,133 +1196,13 @@ func executeHTTPRequest(method string, url string, body string) (string, error)
|
||||
return string(respBody), nil
|
||||
}
|
||||
|
||||
func enableAutoprovisioning(resourceLimits string) error {
|
||||
By("Using API to enable autoprovisioning.")
|
||||
var body string
|
||||
if resourceLimits != "" {
|
||||
body = fmt.Sprintf(`{"update": {"desired_cluster_autoscaling": {"enable_node_autoprovisioning": true, %s}}}`, resourceLimits)
|
||||
} else {
|
||||
body = `{"update": {"desired_cluster_autoscaling": {"enable_node_autoprovisioning": true, "resource_limits":{"name":"cpu", "minimum":0, "maximum":100}, "resource_limits":{"name":"memory", "minimum":0, "maximum":10000000}}}}`
|
||||
}
|
||||
_, err := executeHTTPRequest(http.MethodPut, getGKEClusterURL("v1alpha1"), body)
|
||||
if err != nil {
|
||||
glog.Errorf("Request error: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
glog.Infof("Wait for enabling autoprovisioning.")
|
||||
for start := time.Now(); time.Since(start) < gkeUpdateTimeout; time.Sleep(30 * time.Second) {
|
||||
enabled, err := isAutoprovisioningEnabled()
|
||||
if err != nil {
|
||||
glog.Errorf("Error: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
if enabled {
|
||||
By("Autoprovisioning enabled.")
|
||||
return nil
|
||||
}
|
||||
glog.Infof("Waiting for enabling autoprovisioning")
|
||||
}
|
||||
return fmt.Errorf("autoprovisioning wasn't enabled (timeout).")
|
||||
}
|
||||
|
||||
func disableAutoprovisioning() error {
|
||||
enabled, err := isAutoprovisioningEnabled()
|
||||
if err != nil {
|
||||
glog.Errorf("Error: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
if !enabled {
|
||||
By("Autoprovisioning disabled.")
|
||||
return nil
|
||||
}
|
||||
By("Using API to disable autoprovisioning.")
|
||||
_, err = executeHTTPRequest(http.MethodPut, getGKEClusterURL("v1alpha1"), "{\"update\": {\"desired_cluster_autoscaling\": {}}}")
|
||||
if err != nil {
|
||||
glog.Errorf("Request error: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
By("Wait for disabling autoprovisioning.")
|
||||
for start := time.Now(); time.Since(start) < gkeUpdateTimeout; time.Sleep(30 * time.Second) {
|
||||
enabled, err := isAutoprovisioningEnabled()
|
||||
if err != nil {
|
||||
glog.Errorf("Error: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
if !enabled {
|
||||
By("Autoprovisioning disabled.")
|
||||
return nil
|
||||
}
|
||||
By("Waiting for disabling autoprovisioning")
|
||||
}
|
||||
return fmt.Errorf("autoprovisioning wasn't disabled (timeout).")
|
||||
}
|
||||
|
||||
func getNAPNodePools() ([]string, error) {
|
||||
if framework.ProviderIs("gke") {
|
||||
args := []string{"container", "node-pools", "list", "--cluster=" + framework.TestContext.CloudConfig.Cluster}
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get instance groups: %v", string(output))
|
||||
return nil, err
|
||||
}
|
||||
re := regexp.MustCompile("nap.* ")
|
||||
lines := re.FindAllString(string(output), -1)
|
||||
for i, line := range lines {
|
||||
lines[i] = line[:strings.Index(line, " ")]
|
||||
}
|
||||
return lines, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("provider does not support NAP")
|
||||
}
|
||||
}
|
||||
|
||||
func removeNAPNodePools() error {
|
||||
By("Remove NAP node pools")
|
||||
pools, err := getNAPNodePools()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, pool := range pools {
|
||||
By("Remove node pool: " + pool)
|
||||
suffix := fmt.Sprintf("projects/%s/zones/%s/clusters/%s/nodePools/%s",
|
||||
framework.TestContext.CloudConfig.ProjectID,
|
||||
framework.TestContext.CloudConfig.Zone,
|
||||
framework.TestContext.CloudConfig.Cluster,
|
||||
pool)
|
||||
_, err := executeHTTPRequest(http.MethodDelete, getGKEURL("v1alpha1", suffix), "")
|
||||
if err != nil {
|
||||
glog.Errorf("Request error: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = waitTillAllNAPNodePoolsAreRemoved()
|
||||
if err != nil {
|
||||
glog.Errorf(fmt.Sprintf("Couldn't remove NAP groups: %s", err.Error()))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func getNAPNodePoolsNumber() int {
|
||||
groups, err := getNAPNodePools()
|
||||
framework.ExpectNoError(err)
|
||||
return len(groups)
|
||||
}
|
||||
|
||||
func waitTillAllNAPNodePoolsAreRemoved() error {
|
||||
By("Wait till all NAP node pools are removed")
|
||||
err := wait.PollImmediate(5*time.Second, defaultTimeout, func() (bool, error) {
|
||||
return getNAPNodePoolsNumber() == 0, nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func addNodePool(name string, machineType string, numNodes int) {
|
||||
args := []string{"container", "node-pools", "create", name, "--quiet",
|
||||
"--machine-type=" + machineType,
|
||||
"--num-nodes=" + strconv.Itoa(numNodes),
|
||||
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
glog.Infof("Creating node-pool %s: %s", name, output)
|
||||
klog.Infof("Creating node-pool %s: %s", name, output)
|
||||
framework.ExpectNoError(err, string(output))
|
||||
}
|
||||
|
||||
@ -1414,12 +1212,12 @@ func addGpuNodePool(name string, gpuType string, gpuCount int, numNodes int) {
|
||||
"--num-nodes=" + strconv.Itoa(numNodes),
|
||||
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
glog.Infof("Creating node-pool %s: %s", name, output)
|
||||
klog.Infof("Creating node-pool %s: %s", name, output)
|
||||
framework.ExpectNoError(err, string(output))
|
||||
}
|
||||
|
||||
func deleteNodePool(name string) {
|
||||
glog.Infof("Deleting node pool %s", name)
|
||||
klog.Infof("Deleting node pool %s", name)
|
||||
args := []string{"container", "node-pools", "delete", name, "--quiet",
|
||||
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
|
||||
err := wait.ExponentialBackoff(
|
||||
@ -1427,10 +1225,10 @@ func deleteNodePool(name string) {
|
||||
func() (bool, error) {
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Warningf("Error deleting nodegroup - error:%v, output: %s", err, output)
|
||||
klog.Warningf("Error deleting nodegroup - error:%v, output: %s", err, output)
|
||||
return false, nil
|
||||
}
|
||||
glog.Infof("Node-pool deletion output: %s", output)
|
||||
klog.Infof("Node-pool deletion output: %s", output)
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
@ -1456,7 +1254,7 @@ func getPoolInitialSize(poolName string) int {
|
||||
"--cluster=" + framework.TestContext.CloudConfig.Cluster,
|
||||
"--format=value(initialNodeCount)"}
|
||||
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
|
||||
glog.Infof("Node-pool initial size: %s", output)
|
||||
klog.Infof("Node-pool initial size: %s", output)
|
||||
framework.ExpectNoError(err, string(output))
|
||||
fields := strings.Fields(string(output))
|
||||
Expect(len(fields)).Should(Equal(1))
|
||||
@ -1504,7 +1302,7 @@ func doPut(url, content string) (string, error) {
|
||||
return strBody, nil
|
||||
}
|
||||
|
||||
func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, priorityClassName string) func() error {
|
||||
func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration, priorityClassName string) func() error {
|
||||
By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
|
||||
request := int64(1024 * 1024 * megabytes / replicas)
|
||||
config := &testutils.RCConfig{
|
||||
@ -1517,12 +1315,13 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
|
||||
Replicas: replicas,
|
||||
MemRequest: request,
|
||||
NodeSelector: selector,
|
||||
Tolerations: tolerations,
|
||||
PriorityClassName: priorityClassName,
|
||||
}
|
||||
for start := time.Now(); time.Since(start) < rcCreationRetryTimeout; time.Sleep(rcCreationRetryDelay) {
|
||||
err := framework.RunRC(*config)
|
||||
if err != nil && strings.Contains(err.Error(), "Error creating replication controller") {
|
||||
glog.Warningf("Failed to create memory reservation: %v", err)
|
||||
klog.Warningf("Failed to create memory reservation: %v", err)
|
||||
continue
|
||||
}
|
||||
if expectRunning {
|
||||
@ -1539,19 +1338,19 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
|
||||
// ReserveMemoryWithPriority creates a replication controller with pods with priority that, in summation,
|
||||
// request the specified amount of memory.
|
||||
func ReserveMemoryWithPriority(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, priorityClassName string) func() error {
|
||||
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, priorityClassName)
|
||||
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, priorityClassName)
|
||||
}
|
||||
|
||||
// ReserveMemoryWithSelector creates a replication controller with pods with node selector that, in summation,
|
||||
// request the specified amount of memory.
|
||||
func ReserveMemoryWithSelector(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string) func() error {
|
||||
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, selector, "")
|
||||
func ReserveMemoryWithSelectorAndTolerations(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration) func() error {
|
||||
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, selector, tolerations, "")
|
||||
}
|
||||
|
||||
// ReserveMemory creates a replication controller with pods that, in summation,
|
||||
// request the specified amount of memory.
|
||||
func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration) func() error {
|
||||
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, "")
|
||||
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, "")
|
||||
}
|
||||
|
||||
// WaitForClusterSizeFunc waits until the cluster size matches the given function.
|
||||
@ -1566,7 +1365,7 @@ func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int)
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String()})
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to list nodes: %v", err)
|
||||
klog.Warningf("Failed to list nodes: %v", err)
|
||||
continue
|
||||
}
|
||||
numNodes := len(nodes.Items)
|
||||
@ -1578,10 +1377,10 @@ func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int)
|
||||
numReady := len(nodes.Items)
|
||||
|
||||
if numNodes == numReady+expectedUnready && sizeFunc(numNodes) {
|
||||
glog.Infof("Cluster has reached the desired size")
|
||||
klog.Infof("Cluster has reached the desired size")
|
||||
return nil
|
||||
}
|
||||
glog.Infof("Waiting for cluster with func, current size %d, not ready nodes %d", numNodes, numNodes-numReady)
|
||||
klog.Infof("Waiting for cluster with func, current size %d, not ready nodes %d", numNodes, numNodes-numReady)
|
||||
}
|
||||
return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
|
||||
}
|
||||
@ -1604,21 +1403,21 @@ func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface
|
||||
// Failed pods in this context generally mean that they have been
|
||||
// double scheduled onto a node, but then failed a constraint check.
|
||||
if pod.Status.Phase == v1.PodFailed {
|
||||
glog.Warningf("Pod has failed: %v", pod)
|
||||
klog.Warningf("Pod has failed: %v", pod)
|
||||
}
|
||||
if !ready && pod.Status.Phase != v1.PodFailed {
|
||||
notready = append(notready, pod.Name)
|
||||
}
|
||||
}
|
||||
if len(notready) <= tolerateUnreadyCount {
|
||||
glog.Infof("sufficient number of pods ready. Tolerating %d unready", tolerateUnreadyCount)
|
||||
klog.Infof("sufficient number of pods ready. Tolerating %d unready", tolerateUnreadyCount)
|
||||
return nil
|
||||
}
|
||||
glog.Infof("Too many pods are not ready yet: %v", notready)
|
||||
klog.Infof("Too many pods are not ready yet: %v", notready)
|
||||
}
|
||||
glog.Info("Timeout on waiting for pods being ready")
|
||||
glog.Info(framework.RunKubectlOrDie("get", "pods", "-o", "json", "--all-namespaces"))
|
||||
glog.Info(framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
|
||||
klog.Info("Timeout on waiting for pods being ready")
|
||||
klog.Info(framework.RunKubectlOrDie("get", "pods", "-o", "json", "--all-namespaces"))
|
||||
klog.Info(framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
|
||||
|
||||
// Some pods are still not running.
|
||||
return fmt.Errorf("Too many pods are still not running: %v", notready)
|
||||
@ -1633,11 +1432,11 @@ func getAnyNode(c clientset.Interface) *v1.Node {
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String()})
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get node list: %v", err)
|
||||
klog.Errorf("Failed to get node list: %v", err)
|
||||
return nil
|
||||
}
|
||||
if len(nodes.Items) == 0 {
|
||||
glog.Errorf("No nodes")
|
||||
klog.Errorf("No nodes")
|
||||
return nil
|
||||
}
|
||||
return &nodes.Items[0]
|
||||
@ -1696,7 +1495,7 @@ func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error {
|
||||
if !errors.IsConflict(err) {
|
||||
return err
|
||||
}
|
||||
glog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
|
||||
klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
|
||||
}
|
||||
return fmt.Errorf("Failed to taint node in allowed number of retries")
|
||||
}
|
||||
@ -1737,12 +1536,18 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAdd
|
||||
if !errors.IsConflict(err) {
|
||||
return err
|
||||
}
|
||||
glog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
|
||||
klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
|
||||
}
|
||||
return fmt.Errorf("Failed to remove taint from node in allowed number of retries")
|
||||
}
|
||||
|
||||
func scheduleGpuPod(f *framework.Framework, id string) error {
|
||||
// ScheduleAnySingleGpuPod schedules a pod which requires single GPU of any type
|
||||
func ScheduleAnySingleGpuPod(f *framework.Framework, id string) error {
|
||||
return ScheduleGpuPod(f, id, "", 1)
|
||||
}
|
||||
|
||||
// ScheduleGpuPod schedules a pod which requires a given number of gpus of given type
|
||||
func ScheduleGpuPod(f *framework.Framework, id string, gpuType string, gpuLimit int64) error {
|
||||
config := &testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
@ -1751,10 +1556,14 @@ func scheduleGpuPod(f *framework.Framework, id string) error {
|
||||
Timeout: 3 * scaleUpTimeout, // spinning up GPU node is slow
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Replicas: 1,
|
||||
GpuLimit: 1,
|
||||
GpuLimit: gpuLimit,
|
||||
Labels: map[string]string{"requires-gpu": "yes"},
|
||||
}
|
||||
|
||||
if gpuType != "" {
|
||||
config.NodeSelector = map[string]string{gpuLabel: gpuType}
|
||||
}
|
||||
|
||||
err := framework.RunRC(*config)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1906,7 +1715,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
|
||||
if !errors.IsConflict(err) {
|
||||
return err
|
||||
}
|
||||
glog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j)
|
||||
klog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j)
|
||||
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1957,7 +1766,7 @@ func manuallyIncreaseClusterSize(f *framework.Framework, originalSizes map[strin
|
||||
}
|
||||
resized := setMigSizes(newSizes)
|
||||
if resized {
|
||||
glog.Warning("Unexpected node group size while waiting for cluster resize. Setting size to target again.")
|
||||
klog.Warning("Unexpected node group size while waiting for cluster resize. Setting size to target again.")
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -2062,7 +1871,7 @@ func getScaleUpStatus(c clientset.Interface) (*scaleUpStatus, error) {
|
||||
}
|
||||
result.target += newTarget
|
||||
}
|
||||
glog.Infof("Cluster-Autoscaler scale-up status: %v (%v, %v)", result.status, result.ready, result.target)
|
||||
klog.Infof("Cluster-Autoscaler scale-up status: %v (%v, %v)", result.status, result.ready, result.target)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
@ -2100,7 +1909,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
|
||||
err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
// log error, but attempt to remove other pdbs
|
||||
glog.Errorf("Failed to delete PodDisruptionBudget %v, err: %v", newPdbName, err)
|
||||
klog.Errorf("Failed to delete PodDisruptionBudget %v, err: %v", newPdbName, err)
|
||||
finalErr = err
|
||||
}
|
||||
}
|
||||
@ -2152,12 +1961,18 @@ func createPriorityClasses(f *framework.Framework) func() {
|
||||
}
|
||||
for className, priority := range priorityClasses {
|
||||
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority})
|
||||
if err != nil {
|
||||
klog.Errorf("Error creating priority class: %v", err)
|
||||
}
|
||||
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
|
||||
}
|
||||
|
||||
return func() {
|
||||
for className := range priorityClasses {
|
||||
f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(className, nil)
|
||||
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(className, nil)
|
||||
if err != nil {
|
||||
klog.Errorf("Error deleting priority class: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := externalMetricValue
|
||||
metricTarget := 2 * metricValue
|
||||
metricTarget := 3 * metricValue
|
||||
metricTargets := map[string]externalMetricTarget{
|
||||
"target": {
|
||||
value: metricTarget,
|
||||
@ -109,7 +109,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := externalMetricValue
|
||||
metricAverageTarget := 2 * metricValue
|
||||
metricAverageTarget := 3 * metricValue
|
||||
metricTargets := map[string]externalMetricTarget{
|
||||
"target_average": {
|
||||
value: metricAverageTarget,
|
||||
|
17
vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go
generated
vendored
17
vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go
generated
vendored
@ -46,7 +46,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
|
||||
})
|
||||
|
||||
SIGDescribe("[Serial] [Slow] ReplicaSet", func() {
|
||||
// CPU tests via deployments
|
||||
// CPU tests via ReplicaSets
|
||||
It(titleUp, func() {
|
||||
scaleUp("rs", common.KindReplicaSet, false, rc, f)
|
||||
})
|
||||
@ -96,13 +96,13 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
|
||||
|
||||
// HPAScaleTest struct is used by the scale(...) function.
|
||||
type HPAScaleTest struct {
|
||||
initPods int32
|
||||
totalInitialCPUUsage int32
|
||||
initPods int
|
||||
totalInitialCPUUsage int
|
||||
perPodCPURequest int64
|
||||
targetCPUUtilizationPercent int32
|
||||
minPods int32
|
||||
maxPods int32
|
||||
firstScale int32
|
||||
firstScale int
|
||||
firstScaleStasis time.Duration
|
||||
cpuBurst int
|
||||
secondScale int32
|
||||
@ -116,13 +116,14 @@ type HPAScaleTest struct {
|
||||
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
|
||||
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||
const timeToWait = 15 * time.Minute
|
||||
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset, f.ScalesGetter)
|
||||
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset, f.ScalesGetter)
|
||||
defer rc.CleanUp()
|
||||
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
|
||||
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
|
||||
rc.WaitForReplicas(int(scaleTest.firstScale), timeToWait)
|
||||
|
||||
rc.WaitForReplicas(scaleTest.firstScale, timeToWait)
|
||||
if scaleTest.firstScaleStasis > 0 {
|
||||
rc.EnsureDesiredReplicas(int(scaleTest.firstScale), scaleTest.firstScaleStasis)
|
||||
rc.EnsureDesiredReplicasInRange(scaleTest.firstScale, scaleTest.firstScale+1, scaleTest.firstScaleStasis, hpa.Name)
|
||||
}
|
||||
if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 {
|
||||
rc.ConsumeCPU(scaleTest.cpuBurst)
|
||||
@ -157,7 +158,7 @@ func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, r
|
||||
}
|
||||
scaleTest := &HPAScaleTest{
|
||||
initPods: 5,
|
||||
totalInitialCPUUsage: 375,
|
||||
totalInitialCPUUsage: 325,
|
||||
perPodCPURequest: 500,
|
||||
targetCPUUtilizationPercent: 30,
|
||||
minPods: 1,
|
||||
|
57
vendor/k8s.io/kubernetes/test/e2e/common/BUILD
generated
vendored
57
vendor/k8s.io/kubernetes/test/e2e/common/BUILD
generated
vendored
@ -12,6 +12,7 @@ go_library(
|
||||
"autoscaling_utils.go",
|
||||
"configmap.go",
|
||||
"configmap_volume.go",
|
||||
"container.go",
|
||||
"container_probe.go",
|
||||
"docker_containers.go",
|
||||
"downward_api.go",
|
||||
@ -21,51 +22,69 @@ go_library(
|
||||
"expansion.go",
|
||||
"host_path.go",
|
||||
"init_container.go",
|
||||
"kubelet.go",
|
||||
"kubelet_etc_hosts.go",
|
||||
"lifecycle_hook.go",
|
||||
"networking.go",
|
||||
"node_lease.go",
|
||||
"pods.go",
|
||||
"privileged.go",
|
||||
"projected.go",
|
||||
"projected_combined.go",
|
||||
"projected_configmap.go",
|
||||
"projected_downwardapi.go",
|
||||
"projected_secret.go",
|
||||
"runtime.go",
|
||||
"secrets.go",
|
||||
"secrets_volume.go",
|
||||
"security_context.go",
|
||||
"sysctl.go",
|
||||
"ttlafterfinished.go",
|
||||
"util.go",
|
||||
"volumes.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/common",
|
||||
deps = [
|
||||
"//pkg/api/v1/node:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/conditions:go_default_library",
|
||||
"//pkg/kubelet:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/images:go_default_library",
|
||||
"//pkg/kubelet/sysctl:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//pkg/util/slice:go_default_library",
|
||||
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/coordination/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/scale:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega/types:go_default_library",
|
||||
"//vendor/golang.org/x/net/websocket:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/scale:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/common/apparmor.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/common/apparmor.go
generated
vendored
@ -117,7 +117,7 @@ done`, testCmd)
|
||||
Affinity: loaderAffinity,
|
||||
Containers: []api.Container{{
|
||||
Name: "test",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"sh", "-c", testCmd},
|
||||
}},
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
|
26
vendor/k8s.io/kubernetes/test/e2e/common/autoscaling_utils.go
generated
vendored
26
vendor/k8s.io/kubernetes/test/e2e/common/autoscaling_utils.go
generated
vendored
@ -43,7 +43,7 @@ import (
|
||||
const (
|
||||
dynamicConsumptionTimeInSeconds = 30
|
||||
staticConsumptionTimeInSeconds = 3600
|
||||
dynamicRequestSizeInMillicores = 20
|
||||
dynamicRequestSizeInMillicores = 100
|
||||
dynamicRequestSizeInMegabytes = 100
|
||||
dynamicRequestSizeCustomMetric = 10
|
||||
port = 80
|
||||
@ -359,6 +359,10 @@ func (rc *ResourceConsumer) GetReplicas() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) GetHpa(name string) (*autoscalingv1.HorizontalPodAutoscaler, error) {
|
||||
return rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.Duration) {
|
||||
interval := 20 * time.Second
|
||||
err := wait.PollImmediate(interval, duration, func() (bool, error) {
|
||||
@ -369,13 +373,25 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.D
|
||||
framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas)
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration) {
|
||||
func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration, hpaName string) {
|
||||
rc.EnsureDesiredReplicasInRange(desiredReplicas, desiredReplicas, duration, hpaName)
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, maxDesiredReplicas int, duration time.Duration, hpaName string) {
|
||||
interval := 10 * time.Second
|
||||
err := wait.PollImmediate(interval, duration, func() (bool, error) {
|
||||
replicas := rc.GetReplicas()
|
||||
framework.Logf("expecting there to be %d replicas (are: %d)", desiredReplicas, replicas)
|
||||
if replicas != desiredReplicas {
|
||||
return false, fmt.Errorf("number of replicas changed unexpectedly")
|
||||
framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas)
|
||||
as, err := rc.GetHpa(hpaName)
|
||||
if err != nil {
|
||||
framework.Logf("Error getting HPA: %s", err)
|
||||
} else {
|
||||
framework.Logf("HPA status: %+v", as.Status)
|
||||
}
|
||||
if replicas < minDesiredReplicas {
|
||||
return false, fmt.Errorf("number of replicas below target")
|
||||
} else if replicas > maxDesiredReplicas {
|
||||
return false, fmt.Errorf("number of replicas above target")
|
||||
} else {
|
||||
return false, nil // Expected number of replicas found. Continue polling until timeout.
|
||||
}
|
||||
|
41
vendor/k8s.io/kubernetes/test/e2e/common/configmap.go
generated
vendored
41
vendor/k8s.io/kubernetes/test/e2e/common/configmap.go
generated
vendored
@ -20,19 +20,21 @@ import (
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var _ = Describe("[sig-api-machinery] ConfigMap", func() {
|
||||
var _ = Describe("[sig-node] ConfigMap", func() {
|
||||
f := framework.NewDefaultFramework("configmap")
|
||||
|
||||
/*
|
||||
Testname: configmap-in-env-field
|
||||
Description: Make sure config map value can be used as an environment
|
||||
variable in the container (on container.env field)
|
||||
Release : v1.9
|
||||
Testname: ConfigMap, from environment field
|
||||
Description: Create a Pod with an environment variable value set using a value from ConfigMap. A ConfigMap value MUST be accessible in the container environment.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable via environment variable [NodeConformance]", func() {
|
||||
name := "configmap-test-" + string(uuid.NewUUID())
|
||||
@ -51,7 +53,7 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "env-test",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
@ -78,9 +80,9 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-envfrom-field
|
||||
Description: Make sure config map value can be used as an source for
|
||||
environment variables in the container (on container.envFrom field)
|
||||
Release: v1.9
|
||||
Testname: ConfigMap, from environment variables
|
||||
Description: Create a Pod with a environment source from ConfigMap. All ConfigMap values MUST be available as environment variables in the container.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func() {
|
||||
name := "configmap-test-" + string(uuid.NewUUID())
|
||||
@ -99,7 +101,7 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "env-test",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
EnvFrom: []v1.EnvFromSource{
|
||||
{
|
||||
@ -121,6 +123,11 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
|
||||
"p_data_1=value-1", "p_data_2=value-2", "p_data_3=value-3",
|
||||
})
|
||||
})
|
||||
|
||||
It("should fail to create configMap in volume due to empty configmap key", func() {
|
||||
configMap, err := newConfigMapWithEmptyKey(f)
|
||||
Expect(err).To(HaveOccurred(), "created configMap %q with empty key in namespace %q", configMap.Name, f.Namespace.Name)
|
||||
})
|
||||
})
|
||||
|
||||
func newEnvFromConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
|
||||
@ -136,3 +143,19 @@ func newEnvFromConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newConfigMapWithEmptyKey(f *framework.Framework) (*v1.ConfigMap, error) {
|
||||
name := "configmap-test-emptyKey-" + string(uuid.NewUUID())
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating configMap that has name %s", configMap.Name))
|
||||
return f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap)
|
||||
}
|
||||
|
212
vendor/k8s.io/kubernetes/test/e2e/common/configmap_volume.go
generated
vendored
212
vendor/k8s.io/kubernetes/test/e2e/common/configmap_volume.go
generated
vendored
@ -27,24 +27,25 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
f := framework.NewDefaultFramework("configmap")
|
||||
|
||||
/*
|
||||
Testname: configmap-nomap-simple
|
||||
Description: Make sure config map without mappings works by mounting it
|
||||
to a volume with a custom path (mapping) on the pod with no other settings.
|
||||
Release : v1.9
|
||||
Testname: ConfigMap Volume, without mapping
|
||||
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
|
||||
doConfigMapE2EWithoutMappings(f, 0, 0, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-nomap-default-mode
|
||||
Description: Make sure config map without mappings works by mounting it
|
||||
to a volume with a custom path (mapping) on the pod with defaultMode set
|
||||
Release : v1.9
|
||||
Testname: ConfigMap Volume, without mapping, volume mode set
|
||||
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. File mode is changed to a custom value of '0x400'. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST be set to the custom value of ‘0x400’
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
|
||||
defaultMode := int32(0400)
|
||||
@ -57,9 +58,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-nomap-user
|
||||
Description: Make sure config map without mappings works by mounting it
|
||||
to a volume with a custom path (mapping) on the pod as non-root.
|
||||
Release : v1.9
|
||||
Testname: ConfigMap Volume, without mapping, non-root user
|
||||
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() {
|
||||
doConfigMapE2EWithoutMappings(f, 1000, 0, nil)
|
||||
@ -70,19 +71,18 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-simple-mapped
|
||||
Description: Make sure config map works by mounting it to a volume with
|
||||
a custom path (mapping) on the pod with no other settings and make sure
|
||||
the pod actually consumes it.
|
||||
Release : v1.9
|
||||
Testname: ConfigMap Volume, with mapping
|
||||
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
|
||||
doConfigMapE2EWithMappings(f, 0, 0, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-with-item-mode-mapped
|
||||
Description: Make sure config map works with an item mode (e.g. 0400)
|
||||
for the config map item.
|
||||
Release : v1.9
|
||||
Testname: ConfigMap Volume, with mapping, volume mode set
|
||||
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. File mode is changed to a custom value of '0x400'. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST be set to the custom value of ‘0x400’
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [NodeConformance]", func() {
|
||||
mode := int32(0400)
|
||||
@ -90,8 +90,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-simple-user-mapped
|
||||
Description: Make sure config map works when it is mounted as non-root.
|
||||
Release : v1.9
|
||||
Testname: ConfigMap Volume, with mapping, non-root user
|
||||
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() {
|
||||
doConfigMapE2EWithMappings(f, 1000, 0, nil)
|
||||
@ -102,9 +103,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-update-test
|
||||
Description: Make sure update operation is working on config map and
|
||||
the result is observed on volumes mounted in containers.
|
||||
Release : v1.9
|
||||
Testname: ConfigMap Volume, update
|
||||
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the ConfigMap is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod.
|
||||
*/
|
||||
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
@ -151,7 +152,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: mountImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
@ -184,7 +185,12 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
|
||||
})
|
||||
|
||||
It("binary data should be reflected in volume [NodeConformance]", func() {
|
||||
/*
|
||||
Release: v1.12
|
||||
Testname: ConfigMap Volume, text data, binary data
|
||||
Description: The ConfigMap that is created with text data and binary data MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. ConfigMap's text data and binary data MUST be verified by reading the content from the mounted files in the Pod.
|
||||
*/
|
||||
framework.ConformanceIt("binary data should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
|
||||
@ -233,7 +239,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName1,
|
||||
Image: mountImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
@ -245,7 +251,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
},
|
||||
{
|
||||
Name: containerName2,
|
||||
Image: "busybox",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"hexdump", "-C", "/etc/configmap-volume/dump.bin"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
@ -276,9 +282,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-CUD-test
|
||||
Description: Make sure Create, Update, Delete operations are all working
|
||||
on config map and the result is observed on volumes mounted in containers.
|
||||
Release : v1.9
|
||||
Testname: ConfigMap Volume, create, update and delete
|
||||
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the config map is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod. Also when the item(file) is deleted from the map that MUST result in a error reading that item(file).
|
||||
*/
|
||||
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
@ -379,7 +385,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: deleteContainerName,
|
||||
Image: mountImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/delete/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
@ -391,7 +397,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
},
|
||||
{
|
||||
Name: updateContainerName,
|
||||
Image: mountImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/update/data-3"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
@ -403,7 +409,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
},
|
||||
{
|
||||
Name: createContainerName,
|
||||
Image: mountImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/create/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
@ -459,9 +465,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-multiple-volumes
|
||||
Description: Make sure config map works when it mounted as two different
|
||||
volumes on the same node.
|
||||
Release : v1.9
|
||||
Testname: ConfigMap Volume, multiple volume maps
|
||||
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to multiple paths in the Pod. The content MUST be accessible from all the mapped volume mounts.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() {
|
||||
var (
|
||||
@ -509,7 +515,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "configmap-volume-test",
|
||||
Image: mountImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Args: []string{"--file_content=/etc/configmap-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
@ -534,6 +540,26 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
//The pod is in pending during volume creation until the configMap objects are available
|
||||
//or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timout exception unless it is marked optional.
|
||||
//Slow (~5 mins)
|
||||
It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() {
|
||||
volumeMountPath := "/etc/configmap-volumes"
|
||||
podName := "pod-configmaps-" + string(uuid.NewUUID())
|
||||
err := createNonOptionalConfigMapPod(f, volumeMountPath, podName)
|
||||
Expect(err).To(HaveOccurred(), "created pod %q with non-optional configMap in namespace %q", podName, f.Namespace.Name)
|
||||
})
|
||||
|
||||
//ConfigMap object defined for the pod, If a key is specified which is not present in the ConfigMap,
|
||||
// the volume setup will error unless it is marked optional, during the pod creation.
|
||||
//Slow (~5 mins)
|
||||
It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func() {
|
||||
volumeMountPath := "/etc/configmap-volumes"
|
||||
podName := "pod-configmaps-" + string(uuid.NewUUID())
|
||||
err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath, podName)
|
||||
Expect(err).To(HaveOccurred(), "created pod %q with non-optional configMap in namespace %q", podName, f.Namespace.Name)
|
||||
})
|
||||
})
|
||||
|
||||
func newConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
|
||||
@ -589,7 +615,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "configmap-volume-test",
|
||||
Image: mountImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Args: []string{
|
||||
"--file_content=/etc/configmap-volume/data-1",
|
||||
"--file_mode=/etc/configmap-volume/data-1"},
|
||||
@ -675,7 +701,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "configmap-volume-test",
|
||||
Image: mountImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Args: []string{"--file_content=/etc/configmap-volume/path/to/data-2",
|
||||
"--file_mode=/etc/configmap-volume/path/to/data-2"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
@ -718,3 +744,115 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
|
||||
}
|
||||
f.TestContainerOutput("consume configMaps", pod, 0, output)
|
||||
}
|
||||
|
||||
func createNonOptionalConfigMapPod(f *framework.Framework, volumeMountPath, podName string) error {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
falseValue := false
|
||||
|
||||
createName := "cm-test-opt-create-" + string(uuid.NewUUID())
|
||||
createContainerName := "createcm-volume-test"
|
||||
createVolumeName := "createcm-volume"
|
||||
|
||||
//creating a pod without configMap object created, by mentioning the configMap volume source's local reference name
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: createVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: createName,
|
||||
},
|
||||
Optional: &falseValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: createContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/create/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: createVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "create"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
By("Creating the pod")
|
||||
pod = f.PodClient().Create(pod)
|
||||
return f.WaitForPodRunning(pod.Name)
|
||||
}
|
||||
|
||||
func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMountPath, podName string) error {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
falseValue := false
|
||||
|
||||
createName := "cm-test-opt-create-" + string(uuid.NewUUID())
|
||||
createContainerName := "createcm-volume-test"
|
||||
createVolumeName := "createcm-volume"
|
||||
configMap := newConfigMap(f, createName)
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
//creating a pod with configMap object, but with different key which is not present in configMap object.
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: createVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: createName,
|
||||
},
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "data-4",
|
||||
Path: "path/to/data-4",
|
||||
},
|
||||
},
|
||||
Optional: &falseValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: createContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/create/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: createVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "create"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
By("Creating the pod")
|
||||
pod = f.PodClient().Create(pod)
|
||||
return f.WaitForPodRunning(pod.Name)
|
||||
}
|
||||
|
@ -14,10 +14,11 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -27,6 +28,11 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
ContainerStatusRetryTimeout = time.Minute * 5
|
||||
ContainerStatusPollInterval = time.Second * 1
|
||||
)
|
||||
|
||||
// One pod one container
|
||||
type ConformanceContainer struct {
|
||||
Container v1.Container
|
57
vendor/k8s.io/kubernetes/test/e2e/common/container_probe.go
generated
vendored
57
vendor/k8s.io/kubernetes/test/e2e/common/container_probe.go
generated
vendored
@ -50,9 +50,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-readiness-probe-initial-delay
|
||||
Description: Make sure that pod with readiness probe should not be
|
||||
ready before initial delay and never restart.
|
||||
Release : v1.9
|
||||
Testname: Pod readiness probe, with initial delay
|
||||
Description: Create a Pod that is configured with a initial delay set on the readiness probe. Check the Pod Start time to compare to the initial delay. The Pod MUST be ready only after the specified initial delay.
|
||||
*/
|
||||
framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart [NodeConformance]", func() {
|
||||
p := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil))
|
||||
@ -82,9 +82,10 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-readiness-probe-failure
|
||||
Description: Make sure that pod with readiness probe that fails should
|
||||
never be ready and never restart.
|
||||
Release : v1.9
|
||||
Testname: Pod readiness probe, failure
|
||||
Description: Create a Pod with a readiness probe that fails consistently. When this Pod is created,
|
||||
then the Pod MUST never be ready, never be running and restart count MUST be zero.
|
||||
*/
|
||||
framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func() {
|
||||
p := podClient.Create(makePodSpec(probe.withFailing().build(), nil))
|
||||
@ -107,9 +108,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-cat-liveness-probe-restarted
|
||||
Description: Make sure the pod is restarted with a cat /tmp/health
|
||||
liveness probe.
|
||||
Release : v1.9
|
||||
Testname: Pod liveness probe, using local file, restart
|
||||
Description: Create a Pod with liveness probe that uses ExecAction handler to cat /temp/health file. The Container deletes the file /temp/health after 10 second, triggering liveness probe to fail. The Pod MUST now be killed and restarted incrementing restart count to 1.
|
||||
*/
|
||||
framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
@ -121,7 +122,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
@ -139,9 +140,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-cat-liveness-probe-not-restarted
|
||||
Description: Make sure the pod is not restarted with a cat /tmp/health
|
||||
liveness probe.
|
||||
Release : v1.9
|
||||
Testname: Pod liveness probe, using local file, no restart
|
||||
Description: Pod is created with liveness probe that uses ‘exec’ command to cat /temp/health file. Liveness probe MUST not fail to check health and the restart count should remain 0.
|
||||
*/
|
||||
framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
@ -153,7 +154,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
@ -171,9 +172,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-http-liveness-probe-restarted
|
||||
Description: Make sure when http liveness probe fails, the pod should
|
||||
be restarted.
|
||||
Release : v1.9
|
||||
Testname: Pod liveness probe, using http endpoint, restart
|
||||
Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1.
|
||||
*/
|
||||
framework.ConformanceIt("should be restarted with a /healthz http liveness probe [NodeConformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
@ -205,9 +206,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
|
||||
// Slow by design (5 min)
|
||||
/*
|
||||
Testname: pods-restart-count
|
||||
Description: Make sure when a pod gets restarted, its start count
|
||||
should increase.
|
||||
Release : v1.9
|
||||
Testname: Pod liveness probe, using http endpoint, multiple restarts (slow)
|
||||
Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. The liveness probe must fail again after restart once the http handler for /healthz enpoind on the Pod returns an http error after 10 seconds from the start. Restart counts MUST increment everytime health check fails, measure upto 5 restart.
|
||||
*/
|
||||
framework.ConformanceIt("should have monotonically increasing restart count [Slow][NodeConformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
@ -238,9 +239,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-http-liveness-probe-not-restarted
|
||||
Description: Make sure when http liveness probe succeeds, the pod
|
||||
should not be restarted.
|
||||
Release : v1.9
|
||||
Testname: Pod liveness probe, using http endpoint, failure
|
||||
Description: A Pod is created with liveness probe on http endpoint ‘/’. Liveness probe on this endpoint will not fail. When liveness probe does not fail then the restart count MUST remain zero.
|
||||
*/
|
||||
framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe [NodeConformance]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
@ -252,7 +253,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
@ -272,9 +273,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-docker-liveness-probe-timeout
|
||||
Description: Make sure that the pod is restarted with a docker exec
|
||||
liveness probe with timeout.
|
||||
Release : v1.9
|
||||
Testname: Pod liveness probe, docker exec, restart
|
||||
Description: A Pod is created with liveness probe with a Exec action on the Pod. If the liveness probe call does not return within the timeout specified, liveness probe MUST restart the Pod.
|
||||
*/
|
||||
It("should be restarted with a docker exec liveness probe with timeout ", func() {
|
||||
// TODO: enable this test once the default exec handler supports timeout.
|
||||
@ -288,7 +289,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh", "-c", "sleep 600"},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
|
28
vendor/k8s.io/kubernetes/test/e2e/common/docker_containers.go
generated
vendored
28
vendor/k8s.io/kubernetes/test/e2e/common/docker_containers.go
generated
vendored
@ -28,10 +28,9 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
|
||||
f := framework.NewDefaultFramework("containers")
|
||||
|
||||
/*
|
||||
Testname: container-without-command-args
|
||||
Description: When a Pod is created neither 'command' nor 'args' are
|
||||
provided for a Container, ensure that the docker image's default
|
||||
command and args are used.
|
||||
Release : v1.9
|
||||
Testname: Docker containers, without command and arguments
|
||||
Description: Default command and arguments from the docker image entrypoint MUST be used when Pod does not specify the container command
|
||||
*/
|
||||
framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func() {
|
||||
f.TestContainerOutput("use defaults", entrypointTestPod(), 0, []string{
|
||||
@ -40,10 +39,9 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: container-with-args
|
||||
Description: When a Pod is created and 'args' are provided for a
|
||||
Container, ensure that they take precedent to the docker image's
|
||||
default arguments, but that the default command is used.
|
||||
Release : v1.9
|
||||
Testname: Docker containers, with arguments
|
||||
Description: Default command and from the docker image entrypoint MUST be used when Pod does not specify the container command but the arguments from Pod spec MUST override when specified.
|
||||
*/
|
||||
framework.ConformanceIt("should be able to override the image's default arguments (docker cmd) [NodeConformance]", func() {
|
||||
pod := entrypointTestPod()
|
||||
@ -57,10 +55,9 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
|
||||
// Note: when you override the entrypoint, the image's arguments (docker cmd)
|
||||
// are ignored.
|
||||
/*
|
||||
Testname: container-with-command
|
||||
Description: When a Pod is created and 'command' is provided for a
|
||||
Container, ensure that it takes precedent to the docker image's default
|
||||
command.
|
||||
Release : v1.9
|
||||
Testname: Docker containers, with command
|
||||
Description: Default command from the docker image entrypoint MUST NOT be used when Pod specifies the container command. Command from Pod spec MUST override the command in the image.
|
||||
*/
|
||||
framework.ConformanceIt("should be able to override the image's default command (docker entrypoint) [NodeConformance]", func() {
|
||||
pod := entrypointTestPod()
|
||||
@ -72,10 +69,9 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: container-with-command-args
|
||||
Description: When a Pod is created and 'command' and 'args' are
|
||||
provided for a Container, ensure that they take precedent to the docker
|
||||
image's default command and arguments.
|
||||
Release : v1.9
|
||||
Testname: Docker containers, with command and arguments
|
||||
Description: Default command and arguments from the docker image entrypoint MUST NOT be used when Pod specifies the container command and arguments. Command and arguments from Pod spec MUST override the command and arguments in the image.
|
||||
*/
|
||||
framework.ConformanceIt("should be able to override the image's default command and arguments [NodeConformance]", func() {
|
||||
pod := entrypointTestPod()
|
||||
|
44
vendor/k8s.io/kubernetes/test/e2e/common/downward_api.go
generated
vendored
44
vendor/k8s.io/kubernetes/test/e2e/common/downward_api.go
generated
vendored
@ -23,8 +23,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
@ -34,13 +35,13 @@ var (
|
||||
podUIDVersion = utilversion.MustParseSemantic("v1.8.0")
|
||||
)
|
||||
|
||||
var _ = Describe("[sig-api-machinery] Downward API", func() {
|
||||
var _ = Describe("[sig-node] Downward API", func() {
|
||||
f := framework.NewDefaultFramework("downward-api")
|
||||
|
||||
/*
|
||||
Testname: downwardapi-env-name-namespace-podip
|
||||
Description: Ensure that downward API can provide pod's name, namespace
|
||||
and IP address as environment variables.
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI, environment for name, namespace and ip
|
||||
Description: Downward API MUST expose Pod and Container fields as environment variables. Specify Pod Name, namespace and IP as environment variable in the Pod Spec are visible at runtime in the container.
|
||||
*/
|
||||
framework.ConformanceIt("should provide pod name, namespace and IP address as env vars [NodeConformance]", func() {
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
@ -84,9 +85,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-env-host-ip
|
||||
Description: Ensure that downward API can provide an IP address for
|
||||
host node as an environment variable.
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI, environment for host ip
|
||||
Description: Downward API MUST expose Pod and Container fields as environment variables. Specify host IP as environment variable in the Pod Spec are visible at runtime in the container.
|
||||
*/
|
||||
framework.ConformanceIt("should provide host IP as an env var [NodeConformance]", func() {
|
||||
framework.SkipUnlessServerVersionGTE(hostIPVersion, f.ClientSet.Discovery())
|
||||
@ -111,9 +112,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-env-limits-requests
|
||||
Description: Ensure that downward API can provide CPU/memory limit
|
||||
and CPU/memory request as environment variables.
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI, environment for CPU and memory limits and requests
|
||||
Description: Downward API MUST expose CPU request amd Memory request set through environment variables at runtime in the container.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance]", func() {
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
@ -162,10 +163,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-env-default-allocatable
|
||||
Description: Ensure that downward API can provide default node
|
||||
allocatable values for CPU and memory as environment variables if CPU
|
||||
and memory limits are not specified for a container.
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI, environment for default CPU and memory limits and requests
|
||||
Description: Downward API MUST expose CPU request amd Memory limits set through environment variables at runtime in the container.
|
||||
*/
|
||||
framework.ConformanceIt("should provide default limits.cpu/memory from node allocatable [NodeConformance]", func() {
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
@ -200,7 +200,7 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
Env: env,
|
||||
},
|
||||
@ -213,9 +213,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-env-pod-uid
|
||||
Description: Ensure that downward API can provide pod UID as an
|
||||
environment variable.
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI, environment for Pod UID
|
||||
Description: Downward API MUST expose Pod UID set through environment variables at runtime in the container.
|
||||
*/
|
||||
framework.ConformanceIt("should provide pod UID as env vars [NodeConformance]", func() {
|
||||
framework.SkipUnlessServerVersionGTE(podUIDVersion, f.ClientSet.Discovery())
|
||||
@ -300,7 +300,7 @@ var _ = framework.KubeDescribe("Downward API [Serial] [Disruptive] [NodeFeature:
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
Env: env,
|
||||
},
|
||||
@ -325,7 +325,7 @@ func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, ex
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
@ -357,7 +357,7 @@ func testDownwardAPIForEphemeralStorage(f *framework.Framework, podName string,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
|
81
vendor/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go
generated
vendored
81
vendor/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -32,7 +33,7 @@ import (
|
||||
|
||||
var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
// How long to wait for a log pod to be displayed
|
||||
const podLogTimeout = 2 * time.Minute
|
||||
const podLogTimeout = 3 * time.Minute
|
||||
f := framework.NewDefaultFramework("downward-api")
|
||||
var podClient *framework.PodClient
|
||||
BeforeEach(func() {
|
||||
@ -40,9 +41,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-podname
|
||||
Description: Ensure that downward API can provide pod's name through
|
||||
DownwardAPIVolumeFiles.
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, pod name
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide podname only [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
@ -54,9 +55,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-set-default-mode
|
||||
Description: Ensure that downward API can set default file permission
|
||||
mode for DownwardAPIVolumeFiles if no mode is specified.
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, volume mode 0400
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource with the volumesource mode set to -r-------- and DownwardAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should set DefaultMode on files [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
@ -69,9 +70,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-set-mode
|
||||
Description: Ensure that downward API can set file permission mode for
|
||||
DownwardAPIVolumeFiles.
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, file mode 0400
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the Pod name with the file mode set to -r--------. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should set mode on item file [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
@ -113,9 +114,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-update-label
|
||||
Description: Ensure that downward API updates labels in
|
||||
DownwardAPIVolumeFiles when pod's labels get modified.
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, update label
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains list of items for each of the Pod labels. The container runtime MUST be able to access Pod labels from the specified path on the mounted volume. Update the labels by adding a new label to the running Pod. The new label MUST be available from the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should update labels on modification [NodeConformance]", func() {
|
||||
labels := map[string]string{}
|
||||
@ -145,9 +146,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-update-annotation
|
||||
Description: Ensure that downward API updates annotations in
|
||||
DownwardAPIVolumeFiles when pod's annotations get modified.
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, update annotations
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains list of items for each of the Pod annotations. The container runtime MUST be able to access Pod annotations from the specified path on the mounted volume. Update the annotations by adding a new annotation to the running Pod. The new annotation MUST be available from the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() {
|
||||
annotations := map[string]string{}
|
||||
@ -179,9 +180,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-cpu-limit
|
||||
Description: Ensure that downward API can provide container's CPU limit
|
||||
through DownwardAPIVolumeFiles.
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, CPU limits
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the CPU limits. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
@ -193,9 +194,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-memory-limit
|
||||
Description: Ensure that downward API can provide container's memory
|
||||
limit through DownwardAPIVolumeFiles.
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, memory limits
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the memory limits. The container runtime MUST be able to access memory limits from the specified path on the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
@ -207,9 +208,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-cpu-request
|
||||
Description: Ensure that downward API can provide container's CPU
|
||||
request through DownwardAPIVolumeFiles.
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, CPU request
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the CPU request. The container runtime MUST be able to access CPU request from the specified path on the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
@ -221,9 +222,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-memory-request
|
||||
Description: Ensure that downward API can provide container's memory
|
||||
request through DownwardAPIVolumeFiles.
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, memory request
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the memory request. The container runtime MUST be able to access memory request from the specified path on the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
@ -235,10 +236,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-default-cpu
|
||||
Description: Ensure that downward API can provide default node
|
||||
allocatable value for CPU through DownwardAPIVolumeFiles if CPU
|
||||
limit is not specified for a container.
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, CPU limit, default node allocatable
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the CPU limits. CPU limits is not specified for the container. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume and the value MUST be default node allocatable.
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
@ -248,10 +248,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-default-memory
|
||||
Description: Ensure that downward API can provide default node
|
||||
allocatable value for memory through DownwardAPIVolumeFiles if memory
|
||||
limit is not specified for a container.
|
||||
Release : v1.9
|
||||
Testname: DownwardAPI volume, memory limit, default node allocatable
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the memory limits. memory limits is not specified for the container. The container runtime MUST be able to access memory limits from the specified path on the mounted volume and the value MUST be default node allocatable.
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
@ -268,7 +267,7 @@ func downwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMod
|
||||
pod.Spec.Containers = []v1.Container{
|
||||
{
|
||||
Name: "client-container",
|
||||
Image: mountImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Command: []string{"/mounttest", "--file_mode=" + filePath},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
@ -294,7 +293,7 @@ func downwardAPIVolumePodForSimpleTest(name string, filePath string) *v1.Pod {
|
||||
pod.Spec.Containers = []v1.Container{
|
||||
{
|
||||
Name: "client-container",
|
||||
Image: mountImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Command: []string{"/mounttest", "--file_content=" + filePath},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
@ -325,7 +324,7 @@ func downwardAPIVolumeBaseContainers(name, filePath string) []v1.Container {
|
||||
return []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: mountImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Command: []string{"/mounttest", "--file_content=" + filePath},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
@ -353,7 +352,7 @@ func downwardAPIVolumeDefaultBaseContainer(name, filePath string) []v1.Container
|
||||
return []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: mountImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Command: []string{"/mounttest", "--file_content=" + filePath},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
@ -372,7 +371,7 @@ func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[stri
|
||||
pod.Spec.Containers = []v1.Container{
|
||||
{
|
||||
Name: "client-container",
|
||||
Image: mountImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
|
97
vendor/k8s.io/kubernetes/test/e2e/common/empty_dir.go
generated
vendored
97
vendor/k8s.io/kubernetes/test/e2e/common/empty_dir.go
generated
vendored
@ -67,139 +67,126 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-mode-tmpfs
|
||||
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
|
||||
of 'Memory', ensure the volume has 0777 unix file permissions and tmpfs
|
||||
mount type.
|
||||
Release : v1.9
|
||||
Testname: EmptyDir, medium memory, volume mode default
|
||||
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs.
|
||||
*/
|
||||
framework.ConformanceIt("volume on tmpfs should have the correct mode [NodeConformance]", func() {
|
||||
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-root-0644-tmpfs
|
||||
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
|
||||
of 'Memory', ensure a root owned file with 0644 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
Release : v1.9
|
||||
Testname: EmptyDir, medium memory, volume mode 0644
|
||||
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0644. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0644,tmpfs) [NodeConformance]", func() {
|
||||
doTest0644(f, testImageRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-root-0666-tmpfs
|
||||
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
|
||||
of 'Memory', ensure a root owned file with 0666 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
Release : v1.9
|
||||
Testname: EmptyDir, medium memory, volume mode 0666
|
||||
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0666. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0666,tmpfs) [NodeConformance]", func() {
|
||||
doTest0666(f, testImageRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-root-0777-tmpfs
|
||||
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
|
||||
of 'Memory', ensure a root owned file with 0777 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
Release : v1.9
|
||||
Testname: EmptyDir, medium memory, volume mode 0777
|
||||
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0777. The volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0777,tmpfs) [NodeConformance]", func() {
|
||||
doTest0777(f, testImageRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-user-0644-tmpfs
|
||||
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
|
||||
of 'Memory', ensure a user owned file with 0644 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
Release : v1.9
|
||||
Testname: EmptyDir, medium memory, volume mode 0644, non-root user
|
||||
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0644. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0644,tmpfs) [NodeConformance]", func() {
|
||||
doTest0644(f, testImageNonRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-user-0666-tmpfs
|
||||
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
|
||||
of 'Memory', ensure a user owned file with 0666 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
Release : v1.9
|
||||
Testname: EmptyDir, medium memory, volume mode 0666,, non-root user
|
||||
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0666. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0666,tmpfs) [NodeConformance]", func() {
|
||||
doTest0666(f, testImageNonRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-user-0777-tmpfs
|
||||
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
|
||||
of 'Memory', ensure a user owned file with 0777 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
Release : v1.9
|
||||
Testname: EmptyDir, medium memory, volume mode 0777, non-root user
|
||||
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0777. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0777,tmpfs) [NodeConformance]", func() {
|
||||
doTest0777(f, testImageNonRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-mode
|
||||
Description: For a Pod created with an 'emptyDir' Volume, ensure the
|
||||
volume has 0777 unix file permissions.
|
||||
Release : v1.9
|
||||
Testname: EmptyDir, medium default, volume mode default
|
||||
Description: A Pod created with an 'emptyDir' Volume, the volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs.
|
||||
*/
|
||||
framework.ConformanceIt("volume on default medium should have the correct mode [NodeConformance]", func() {
|
||||
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-root-0644
|
||||
Description: For a Pod created with an 'emptyDir' Volume, ensure a
|
||||
root owned file with 0644 unix file permissions is created and enforced
|
||||
correctly.
|
||||
Release : v1.9
|
||||
Testname: EmptyDir, medium default, volume mode 0644
|
||||
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0644,default) [NodeConformance]", func() {
|
||||
doTest0644(f, testImageRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-root-0666
|
||||
Description: For a Pod created with an 'emptyDir' Volume, ensure a
|
||||
root owned file with 0666 unix file permissions is created and enforced
|
||||
correctly.
|
||||
Release : v1.9
|
||||
Testname: EmptyDir, medium default, volume mode 0666
|
||||
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0666,default) [NodeConformance]", func() {
|
||||
doTest0666(f, testImageRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-root-0777
|
||||
Description: For a Pod created with an 'emptyDir' Volume, ensure a
|
||||
root owned file with 0777 unix file permissions is created and enforced
|
||||
correctly.
|
||||
Release : v1.9
|
||||
Testname: EmptyDir, medium default, volume mode 0777
|
||||
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. The volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0777,default) [NodeConformance]", func() {
|
||||
doTest0777(f, testImageRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-user-0644
|
||||
Description: For a Pod created with an 'emptyDir' Volume, ensure a
|
||||
user owned file with 0644 unix file permissions is created and enforced
|
||||
correctly.
|
||||
Release : v1.9
|
||||
Testname: EmptyDir, medium default, volume mode 0644
|
||||
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0644,default) [NodeConformance]", func() {
|
||||
doTest0644(f, testImageNonRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-user-0666
|
||||
Description: For a Pod created with an 'emptyDir' Volume, ensure a
|
||||
user owned file with 0666 unix file permissions is created and enforced
|
||||
correctly.
|
||||
Release : v1.9
|
||||
Testname: EmptyDir, medium default, volume mode 0666
|
||||
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0666,default) [NodeConformance]", func() {
|
||||
doTest0666(f, testImageNonRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-user-0777
|
||||
Description: For a Pod created with an 'emptyDir' Volume, ensure a
|
||||
user owned file with 0777 unix file permissions is created and enforced
|
||||
correctly.
|
||||
Release : v1.9
|
||||
Testname: EmptyDir, medium default, volume mode 0777
|
||||
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0777,default) [NodeConformance]", func() {
|
||||
doTest0777(f, testImageNonRootUid, v1.StorageMediumDefault)
|
||||
|
31
vendor/k8s.io/kubernetes/test/e2e/common/expansion.go
generated
vendored
31
vendor/k8s.io/kubernetes/test/e2e/common/expansion.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -33,9 +34,9 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
f := framework.NewDefaultFramework("var-expansion")
|
||||
|
||||
/*
|
||||
Testname: var-expansion-env
|
||||
Description: Make sure environment variables can be set using an
|
||||
expansion of previously defined environment variables
|
||||
Release : v1.9
|
||||
Testname: Environment variables, expansion
|
||||
Description: Create a Pod with environment variables. Environment variables defined using previously defined environment variables MUST expand to proper values.
|
||||
*/
|
||||
framework.ConformanceIt("should allow composing env vars into new env vars [NodeConformance]", func() {
|
||||
podName := "var-expansion-" + string(uuid.NewUUID())
|
||||
@ -48,7 +49,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
@ -78,9 +79,9 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: var-expansion-command
|
||||
Description: Make sure a container's commands can be set using an
|
||||
expansion of environment variables.
|
||||
Release : v1.9
|
||||
Testname: Environment variables, command expansion
|
||||
Description: Create a Pod with environment variables and container command using them. Container command using the defined environment variables MUST expand to proper values.
|
||||
*/
|
||||
framework.ConformanceIt("should allow substituting values in a container's command [NodeConformance]", func() {
|
||||
podName := "var-expansion-" + string(uuid.NewUUID())
|
||||
@ -93,7 +94,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"sh", "-c", "TEST_VAR=wrong echo \"$(TEST_VAR)\""},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
@ -113,9 +114,9 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: var-expansion-arg
|
||||
Description: Make sure a container's args can be set using an
|
||||
expansion of environment variables.
|
||||
Release : v1.9
|
||||
Testname: Environment variables, command argument expansion
|
||||
Description: Create a Pod with environment variables and container command arguments using them. Container command arguments using the defined environment variables MUST expand to proper values.
|
||||
*/
|
||||
framework.ConformanceIt("should allow substituting values in a container's args [NodeConformance]", func() {
|
||||
podName := "var-expansion-" + string(uuid.NewUUID())
|
||||
@ -128,7 +129,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"sh", "-c"},
|
||||
Args: []string{"TEST_VAR=wrong echo \"$(TEST_VAR)\""},
|
||||
Env: []v1.EnvVar{
|
||||
@ -164,7 +165,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"sh", "-c", "test -d /testcontainer/" + podName + ";echo $?"},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
@ -225,7 +226,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
@ -274,7 +275,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
|
97
vendor/k8s.io/kubernetes/test/e2e/common/host_path.go
generated
vendored
97
vendor/k8s.io/kubernetes/test/e2e/common/host_path.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
@ -40,10 +41,9 @@ var _ = Describe("[sig-storage] HostPath", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-hostpath-mode
|
||||
Description: For a Pod created with a 'HostPath' Volume, ensure the
|
||||
volume is a directory with 0777 unix file permissions and that is has
|
||||
the sticky bit (mode flag t) set.
|
||||
Release : v1.9
|
||||
Testname: Host path, volume mode default
|
||||
Description: Create a Pod with host volume mounted. The volume mounted MUST be a directory with permissions mode -rwxrwxrwx and that is has the sticky bit (mode flag t) set.
|
||||
*/
|
||||
framework.ConformanceIt("should give a volume the correct mode [NodeConformance]", func() {
|
||||
source := &v1.HostPathVolumeSource{
|
||||
@ -116,91 +116,6 @@ var _ = Describe("[sig-storage] HostPath", func() {
|
||||
"content of file \"" + filePathInReader + "\": mount-tester new file",
|
||||
})
|
||||
})
|
||||
|
||||
It("should support existing directory subPath", func() {
|
||||
framework.SkipUnlessSSHKeyPresent()
|
||||
|
||||
subPath := "sub-path"
|
||||
fileName := "test-file"
|
||||
retryDuration := 180
|
||||
|
||||
filePathInWriter := path.Join(volumePath, fileName)
|
||||
filePathInReader := path.Join(volumePath, subPath, fileName)
|
||||
|
||||
source := &v1.HostPathVolumeSource{
|
||||
Path: "/tmp",
|
||||
}
|
||||
pod := testPodWithHostVol(volumePath, source)
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
pod.Spec.NodeName = nodeList.Items[0].Name
|
||||
|
||||
// Create the subPath directory on the host
|
||||
existing := path.Join(source.Path, subPath)
|
||||
result, err := framework.SSH(fmt.Sprintf("mkdir -p %s", existing), framework.GetNodeExternalIP(&nodeList.Items[0]), framework.TestContext.Provider)
|
||||
framework.LogSSHResult(result)
|
||||
framework.ExpectNoError(err)
|
||||
if result.Code != 0 {
|
||||
framework.Failf("mkdir returned non-zero")
|
||||
}
|
||||
|
||||
// Write the file in the subPath from container 0
|
||||
container := &pod.Spec.Containers[0]
|
||||
container.VolumeMounts[0].SubPath = subPath
|
||||
container.Args = []string{
|
||||
fmt.Sprintf("--new_file_0644=%v", filePathInWriter),
|
||||
fmt.Sprintf("--file_mode=%v", filePathInWriter),
|
||||
}
|
||||
|
||||
// Read it from outside the subPath from container 1
|
||||
pod.Spec.Containers[1].Args = []string{
|
||||
fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
|
||||
fmt.Sprintf("--retry_time=%d", retryDuration),
|
||||
}
|
||||
|
||||
f.TestContainerOutput("hostPath subPath", pod, 1, []string{
|
||||
"content of file \"" + filePathInReader + "\": mount-tester new file",
|
||||
})
|
||||
})
|
||||
|
||||
// TODO consolidate common code of this test and above
|
||||
It("should support existing single file subPath", func() {
|
||||
framework.SkipUnlessSSHKeyPresent()
|
||||
|
||||
subPath := "sub-path-test-file"
|
||||
retryDuration := 180
|
||||
|
||||
filePathInReader := path.Join(volumePath, subPath)
|
||||
|
||||
source := &v1.HostPathVolumeSource{
|
||||
Path: "/tmp",
|
||||
}
|
||||
pod := testPodWithHostVol(volumePath, source)
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
pod.Spec.NodeName = nodeList.Items[0].Name
|
||||
|
||||
// Create the subPath file on the host
|
||||
existing := path.Join(source.Path, subPath)
|
||||
result, err := framework.SSH(fmt.Sprintf("echo \"mount-tester new file\" > %s", existing), framework.GetNodeExternalIP(&nodeList.Items[0]), framework.TestContext.Provider)
|
||||
framework.LogSSHResult(result)
|
||||
framework.ExpectNoError(err)
|
||||
if result.Code != 0 {
|
||||
framework.Failf("echo returned non-zero")
|
||||
}
|
||||
|
||||
// Mount the file to the subPath in container 0
|
||||
container := &pod.Spec.Containers[0]
|
||||
container.VolumeMounts[0].SubPath = subPath
|
||||
|
||||
// Read it from outside the subPath from container 1
|
||||
pod.Spec.Containers[1].Args = []string{
|
||||
fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
|
||||
fmt.Sprintf("--retry_time=%d", retryDuration),
|
||||
}
|
||||
|
||||
f.TestContainerOutput("hostPath subPath", pod, 1, []string{
|
||||
"content of file \"" + filePathInReader + "\": mount-tester new file",
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
//These constants are borrowed from the other test.
|
||||
@ -236,7 +151,7 @@ func testPodWithHostVol(path string, source *v1.HostPathVolumeSource) *v1.Pod {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName1,
|
||||
Image: mountImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
@ -249,7 +164,7 @@ func testPodWithHostVol(path string, source *v1.HostPathVolumeSource) *v1.Pod {
|
||||
},
|
||||
{
|
||||
Name: containerName2,
|
||||
Image: mountImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
|
86
vendor/k8s.io/kubernetes/test/e2e/common/init_container.go
generated
vendored
86
vendor/k8s.io/kubernetes/test/e2e/common/init_container.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
@ -26,6 +27,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/client/conditions"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -42,7 +44,15 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
podClient = f.PodClient()
|
||||
})
|
||||
|
||||
It("should invoke init containers on a RestartNever pod", func() {
|
||||
/*
|
||||
Release: v1.12
|
||||
Testname: init-container-starts-app-restartnever-pod
|
||||
Description: Ensure that all InitContainers are started
|
||||
and all containers in pod are voluntarily terminated with exit status 0,
|
||||
and the system is not going to restart any of these containers
|
||||
when Pod has restart policy as RestartNever.
|
||||
*/
|
||||
framework.ConformanceIt("should invoke init containers on a RestartNever pod", func() {
|
||||
By("creating the pod")
|
||||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
@ -59,19 +69,19 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: "init1",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/true"},
|
||||
},
|
||||
{
|
||||
Name: "init2",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/true"},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "run1",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/true"},
|
||||
},
|
||||
},
|
||||
@ -82,7 +92,9 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
wr := watch.NewRecorder(w)
|
||||
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodCompleted)
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
|
||||
defer cancel()
|
||||
event, err := watchtools.UntilWithoutRetry(ctx, wr, conditions.PodCompleted)
|
||||
Expect(err).To(BeNil())
|
||||
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
|
||||
endPod := event.Object.(*v1.Pod)
|
||||
@ -99,7 +111,15 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("should invoke init containers on a RestartAlways pod", func() {
|
||||
/*
|
||||
Release: v1.12
|
||||
Testname: init-container-starts-app-restartalways-pod
|
||||
Description: Ensure that all InitContainers are started
|
||||
and all containers in pod started
|
||||
and at least one container is still running or is in the process of being restarted
|
||||
when Pod has restart policy as RestartAlways.
|
||||
*/
|
||||
framework.ConformanceIt("should invoke init containers on a RestartAlways pod", func() {
|
||||
By("creating the pod")
|
||||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
@ -115,12 +135,12 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: "init1",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/true"},
|
||||
},
|
||||
{
|
||||
Name: "init2",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/true"},
|
||||
},
|
||||
},
|
||||
@ -131,7 +151,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(50*1024*1024, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -143,7 +163,9 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
wr := watch.NewRecorder(w)
|
||||
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodRunning)
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
|
||||
defer cancel()
|
||||
event, err := watchtools.UntilWithoutRetry(ctx, wr, conditions.PodRunning)
|
||||
Expect(err).To(BeNil())
|
||||
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
|
||||
endPod := event.Object.(*v1.Pod)
|
||||
@ -160,7 +182,15 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("should not start app containers if init containers fail on a RestartAlways pod", func() {
|
||||
/*
|
||||
Release: v1.12
|
||||
Testname: init-container-fails-stops-app-restartalways-pod
|
||||
Description: Ensure that app container is not started
|
||||
when all InitContainers failed to start
|
||||
and Pod has restarted for few occurrences
|
||||
and pod has restart policy as RestartAlways.
|
||||
*/
|
||||
framework.ConformanceIt("should not start app containers if init containers fail on a RestartAlways pod", func() {
|
||||
By("creating the pod")
|
||||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
@ -177,12 +207,12 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: "init1",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/false"},
|
||||
},
|
||||
{
|
||||
Name: "init2",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/true"},
|
||||
},
|
||||
},
|
||||
@ -193,7 +223,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(50*1024*1024, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -206,8 +236,10 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
|
||||
wr := watch.NewRecorder(w)
|
||||
event, err := watch.Until(
|
||||
framework.PodStartTimeout, wr,
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
|
||||
defer cancel()
|
||||
event, err := watchtools.UntilWithoutRetry(
|
||||
ctx, wr,
|
||||
// check for the first container to fail at least once
|
||||
func(evt watch.Event) (bool, error) {
|
||||
switch t := evt.Object.(type) {
|
||||
@ -268,7 +300,13 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
|
||||
})
|
||||
|
||||
It("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() {
|
||||
/*
|
||||
Release: v1.12
|
||||
Testname: init-container-fails-stops-app-restartnever-pod
|
||||
Description: Ensure that app container is not started
|
||||
when at least one InitContainer fails to start and Pod has restart policy as RestartNever.
|
||||
*/
|
||||
framework.ConformanceIt("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() {
|
||||
By("creating the pod")
|
||||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
@ -285,24 +323,24 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: "init1",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/true"},
|
||||
},
|
||||
{
|
||||
Name: "init2",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/false"},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "run1",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/true"},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(50*1024*1024, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -316,8 +354,10 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
|
||||
wr := watch.NewRecorder(w)
|
||||
event, err := watch.Until(
|
||||
framework.PodStartTimeout, wr,
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
|
||||
defer cancel()
|
||||
event, err := watchtools.UntilWithoutRetry(
|
||||
ctx, wr,
|
||||
// check for the second container to fail at least once
|
||||
func(evt watch.Event) (bool, error) {
|
||||
switch t := evt.Object.(type) {
|
||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -39,7 +39,13 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
})
|
||||
Context("when scheduling a busybox command in a pod", func() {
|
||||
podName := "busybox-scheduling-" + string(uuid.NewUUID())
|
||||
framework.ConformanceIt("it should print the output to logs [NodeConformance]", func() {
|
||||
|
||||
/*
|
||||
Release : v1.13
|
||||
Testname: Kubelet, log output, default
|
||||
Description: By default the stdout and stderr from the process being executed in a pod MUST be sent to the pod's logs.
|
||||
*/
|
||||
framework.ConformanceIt("should print the output to logs [NodeConformance]", func() {
|
||||
podClient.CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
@ -49,7 +55,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Image: framework.BusyBoxImage,
|
||||
Name: podName,
|
||||
Command: []string{"sh", "-c", "echo 'Hello World' ; sleep 240"},
|
||||
},
|
||||
@ -83,7 +89,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Image: framework.BusyBoxImage,
|
||||
Name: podName,
|
||||
Command: []string{"/bin/false"},
|
||||
},
|
||||
@ -92,7 +98,12 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
})
|
||||
})
|
||||
|
||||
It("should have an error terminated reason [NodeConformance]", func() {
|
||||
/*
|
||||
Release : v1.13
|
||||
Testname: Kubelet, failed pod, terminated reason
|
||||
Description: Create a Pod with terminated state. Pod MUST have only one container. Container MUST be in terminated state and MUST have an terminated reason.
|
||||
*/
|
||||
framework.ConformanceIt("should have an terminated reason [NodeConformance]", func() {
|
||||
Eventually(func() error {
|
||||
podData, err := podClient.Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -105,14 +116,19 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
if contTerminatedState == nil {
|
||||
return fmt.Errorf("expected state to be terminated. Got pod status: %+v", podData.Status)
|
||||
}
|
||||
if contTerminatedState.Reason != "Error" {
|
||||
return fmt.Errorf("expected terminated state reason to be error. Got %+v", contTerminatedState)
|
||||
if contTerminatedState.ExitCode == 0 || contTerminatedState.Reason == "" {
|
||||
return fmt.Errorf("expected non-zero exitCode and non-empty terminated state reason. Got exitCode: %+v and terminated state reason: %+v", contTerminatedState.ExitCode, contTerminatedState.Reason)
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, time.Second*4).Should(BeNil())
|
||||
})
|
||||
|
||||
It("should be possible to delete [NodeConformance]", func() {
|
||||
/*
|
||||
Release : v1.13
|
||||
Testname: Kubelet, failed pod, delete
|
||||
Description: Create a Pod with terminated state. This terminated pod MUST be able to be deleted.
|
||||
*/
|
||||
framework.ConformanceIt("should be possible to delete [NodeConformance]", func() {
|
||||
err := podClient.Delete(podName, &metav1.DeleteOptions{})
|
||||
Expect(err).To(BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
|
||||
})
|
||||
@ -120,7 +136,12 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
Context("when scheduling a busybox Pod with hostAliases", func() {
|
||||
podName := "busybox-host-aliases" + string(uuid.NewUUID())
|
||||
|
||||
It("it should write entries to /etc/hosts [NodeConformance]", func() {
|
||||
/*
|
||||
Release : v1.13
|
||||
Testname: Kubelet, hostAliases
|
||||
Description: Create a Pod with hostAliases and a container with command to output /etc/hosts entries. Pod's logs MUST have matching entries of specified hostAliases to the output of /etc/hosts entries.
|
||||
*/
|
||||
framework.ConformanceIt("should write entries to /etc/hosts [NodeConformance]", func() {
|
||||
podClient.CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
@ -130,7 +151,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Image: framework.BusyBoxImage,
|
||||
Name: podName,
|
||||
Command: []string{"/bin/sh", "-c", "cat /etc/hosts; sleep 6000"},
|
||||
},
|
||||
@ -154,7 +175,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
buf.ReadFrom(rc)
|
||||
hostsFileContent := buf.String()
|
||||
|
||||
if !strings.Contains(hostsFileContent, "123.45.67.89\tfoo") || !strings.Contains(hostsFileContent, "123.45.67.89\tbar") {
|
||||
if !strings.Contains(hostsFileContent, "123.45.67.89\tfoo\tbar") {
|
||||
return fmt.Errorf("expected hosts file to contain entries from HostAliases. Got:\n%+v", hostsFileContent)
|
||||
}
|
||||
|
||||
@ -164,7 +185,13 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
})
|
||||
Context("when scheduling a read only busybox container", func() {
|
||||
podName := "busybox-readonly-fs" + string(uuid.NewUUID())
|
||||
framework.ConformanceIt("it should not write to root filesystem [NodeConformance]", func() {
|
||||
|
||||
/*
|
||||
Release : v1.13
|
||||
Testname: Kubelet, pod with read only root file system
|
||||
Description: Create a Pod with security context set with ReadOnlyRootFileSystem set to true. The Pod then tries to write to the /file on the root, write operation to the root filesystem MUST fail as expected.
|
||||
*/
|
||||
framework.ConformanceIt("should not write to root filesystem [NodeConformance]", func() {
|
||||
isReadOnly := true
|
||||
podClient.CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -175,7 +202,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: busyboxImage,
|
||||
Image: framework.BusyBoxImage,
|
||||
Name: podName,
|
||||
Command: []string{"/bin/sh", "-c", "echo test > /file; sleep 240"},
|
||||
SecurityContext: &v1.SecurityContext{
|
13
vendor/k8s.io/kubernetes/test/e2e/common/kubelet_etc_hosts.go
generated
vendored
13
vendor/k8s.io/kubernetes/test/e2e/common/kubelet_etc_hosts.go
generated
vendored
@ -20,10 +20,10 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -51,9 +51,12 @@ var _ = framework.KubeDescribe("KubeletManagedEtcHosts", func() {
|
||||
}
|
||||
|
||||
/*
|
||||
Testname: kubelet-managed-etc-hosts
|
||||
Description: Make sure Kubelet correctly manages /etc/hosts and mounts
|
||||
it into the container.
|
||||
Release : v1.9
|
||||
Testname: Kubelet, managed etc hosts
|
||||
Description: Create a Pod with containers with hostNetwork set to false, one of the containers mounts the /etc/hosts file form the host. Create a second Pod with hostNetwork set to true.
|
||||
1. The Pod with hostNetwork=false MUST have /etc/hosts of containers managed by the Kubelet.
|
||||
2. The Pod with hostNetwork=false but the container mounts /etc/hosts file from the host. The /etc/hosts file MUST not be managed by the Kubelet.
|
||||
3. The Pod with hostNetwork=true , /etc/hosts file MUST not be managed by the Kubelet.
|
||||
*/
|
||||
framework.ConformanceIt("should test kubelet managed /etc/hosts file [NodeConformance]", func() {
|
||||
By("Setting up the test")
|
||||
@ -123,7 +126,7 @@ func assertManagedStatus(
|
||||
}
|
||||
}
|
||||
|
||||
glog.Warningf(
|
||||
klog.Warningf(
|
||||
"For pod: %s, name: %s, expected %t, (/etc/hosts was %q), (/etc/hosts-original was %q), retryCount: %d",
|
||||
podName, name, expectedIsManaged, etcHostsContent, etcHostsOriginalContent, retryCount)
|
||||
|
||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e_node
|
||||
package common
|
||||
|
||||
import (
|
||||
"time"
|
||||
@ -84,6 +84,11 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
||||
}, preStopWaitTimeout, podCheckInterval).Should(BeNil())
|
||||
}
|
||||
}
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Pod Lifecycle, post start exec hook
|
||||
Description: When a post start handler is specified in the container lifecycle using a ‘Exec’ action, then the handler MUST be invoked after the start of the container. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a post start that invokes the server pod using ExecAction to validate that the post start is executed.
|
||||
*/
|
||||
framework.ConformanceIt("should execute poststart exec hook properly [NodeConformance]", func() {
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PostStart: &v1.Handler{
|
||||
@ -95,6 +100,11 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
||||
podWithHook := getPodWithHook("pod-with-poststart-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Pod Lifecycle, prestop exec hook
|
||||
Description: When a pre-stop handler is specified in the container lifecycle using a ‘Exec’ action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a pre-stop that invokes the server pod using ExecAction to validate that the pre-stop is executed.
|
||||
*/
|
||||
framework.ConformanceIt("should execute prestop exec hook properly [NodeConformance]", func() {
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PreStop: &v1.Handler{
|
||||
@ -106,6 +116,11 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
||||
podWithHook := getPodWithHook("pod-with-prestop-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Pod Lifecycle, post start http hook
|
||||
Description: When a post start handler is specified in the container lifecycle using a HttpGet action, then the handler MUST be invoked after the start of the container. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a post start that invokes the server pod to validate that the post start is executed.
|
||||
*/
|
||||
framework.ConformanceIt("should execute poststart http hook properly [NodeConformance]", func() {
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PostStart: &v1.Handler{
|
||||
@ -119,6 +134,11 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
|
||||
podWithHook := getPodWithHook("pod-with-poststart-http-hook", imageutils.GetPauseImageName(), lifecycle)
|
||||
testPodWithHook(podWithHook)
|
||||
})
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Pod Lifecycle, prestop http hook
|
||||
Description: When a pre-stop handler is specified in the container lifecycle using a ‘HttpGet’ action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a pre-stop that invokes the server pod to validate that the pre-stop is executed.
|
||||
*/
|
||||
framework.ConformanceIt("should execute prestop http hook properly [NodeConformance]", func() {
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PreStop: &v1.Handler{
|
28
vendor/k8s.io/kubernetes/test/e2e/common/networking.go
generated
vendored
28
vendor/k8s.io/kubernetes/test/e2e/common/networking.go
generated
vendored
@ -31,9 +31,10 @@ var _ = Describe("[sig-network] Networking", func() {
|
||||
// expect exactly one unique hostname. Each of these endpoints reports
|
||||
// its own hostname.
|
||||
/*
|
||||
Testname: networking-intra-pod-http
|
||||
Description: Try to hit test endpoints from a test container and make
|
||||
sure each of them can report a unique hostname.
|
||||
Release : v1.9
|
||||
Testname: Networking, intra pod http
|
||||
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
|
||||
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
|
||||
*/
|
||||
framework.ConformanceIt("should function for intra-pod communication: http [NodeConformance]", func() {
|
||||
config := framework.NewCoreNetworkingTestConfig(f)
|
||||
@ -43,9 +44,10 @@ var _ = Describe("[sig-network] Networking", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: networking-intra-pod-udp
|
||||
Description: Try to hit test endpoints from a test container using udp
|
||||
and make sure each of them can report a unique hostname.
|
||||
Release : v1.9
|
||||
Testname: Networking, intra pod udp
|
||||
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
|
||||
The kubectl exec on the webserver container MUST reach a udp port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
|
||||
*/
|
||||
framework.ConformanceIt("should function for intra-pod communication: udp [NodeConformance]", func() {
|
||||
config := framework.NewCoreNetworkingTestConfig(f)
|
||||
@ -55,9 +57,10 @@ var _ = Describe("[sig-network] Networking", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: networking-node-pod-http
|
||||
Description: Try to hit test endpoints from the pod and make sure each
|
||||
of them can report a unique hostname.
|
||||
Release : v1.9
|
||||
Testname: Networking, intra pod http, from node
|
||||
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
|
||||
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=tcp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
|
||||
*/
|
||||
framework.ConformanceIt("should function for node-pod communication: http [NodeConformance]", func() {
|
||||
config := framework.NewCoreNetworkingTestConfig(f)
|
||||
@ -67,9 +70,10 @@ var _ = Describe("[sig-network] Networking", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: networking-node-pod-udp
|
||||
Description: Try to hit test endpoints from the pod using udp and make sure
|
||||
each of them can report a unique hostname.
|
||||
Release : v1.9
|
||||
Testname: Networking, intra pod http, from node
|
||||
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
|
||||
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=udp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
|
||||
*/
|
||||
framework.ConformanceIt("should function for node-pod communication: udp [NodeConformance]", func() {
|
||||
config := framework.NewCoreNetworkingTestConfig(f)
|
||||
|
166
vendor/k8s.io/kubernetes/test/e2e/common/node_lease.go
generated
vendored
Normal file
166
vendor/k8s.io/kubernetes/test/e2e/common/node_lease.go
generated
vendored
Normal file
@ -0,0 +1,166 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
coordv1beta1 "k8s.io/api/coordination/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
v1node "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("[Feature:NodeLease][NodeAlphaFeature:NodeLease]", func() {
|
||||
var nodeName string
|
||||
f := framework.NewDefaultFramework("node-lease-test")
|
||||
|
||||
BeforeEach(func() {
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodes.Items)).NotTo(BeZero())
|
||||
nodeName = nodes.Items[0].ObjectMeta.Name
|
||||
})
|
||||
|
||||
Context("when the NodeLease feature is enabled", func() {
|
||||
It("the kubelet should create and update a lease in the kube-node-lease namespace", func() {
|
||||
leaseClient := f.ClientSet.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease)
|
||||
var (
|
||||
err error
|
||||
lease *coordv1beta1.Lease
|
||||
)
|
||||
By("check that lease for this Kubelet exists in the kube-node-lease namespace")
|
||||
Eventually(func() error {
|
||||
lease, err = leaseClient.Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 5*time.Minute, 5*time.Second).Should(BeNil())
|
||||
// check basic expectations for the lease
|
||||
Expect(expectLease(lease, nodeName)).To(BeNil())
|
||||
|
||||
By("check that node lease is updated at least once within the lease duration")
|
||||
Eventually(func() error {
|
||||
newLease, err := leaseClient.Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// check basic expectations for the latest lease
|
||||
if err := expectLease(newLease, nodeName); err != nil {
|
||||
return err
|
||||
}
|
||||
// check that RenewTime has been updated on the latest lease
|
||||
newTime := (*newLease.Spec.RenewTime).Time
|
||||
oldTime := (*lease.Spec.RenewTime).Time
|
||||
if !newTime.After(oldTime) {
|
||||
return fmt.Errorf("new lease has time %v, which is not after old lease time %v", newTime, oldTime)
|
||||
}
|
||||
return nil
|
||||
}, time.Duration(*lease.Spec.LeaseDurationSeconds)*time.Second,
|
||||
time.Duration(*lease.Spec.LeaseDurationSeconds/4)*time.Second)
|
||||
})
|
||||
|
||||
It("the kubelet should report node status infrequently", func() {
|
||||
By("wait until node is ready")
|
||||
framework.WaitForNodeToBeReady(f.ClientSet, nodeName, 5*time.Minute)
|
||||
|
||||
By("wait until there is node lease")
|
||||
var err error
|
||||
var lease *coordv1beta1.Lease
|
||||
Eventually(func() error {
|
||||
lease, err = f.ClientSet.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease).Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 5*time.Minute, 5*time.Second).Should(BeNil())
|
||||
// check basic expectations for the lease
|
||||
Expect(expectLease(lease, nodeName)).To(BeNil())
|
||||
leaseDuration := time.Duration(*lease.Spec.LeaseDurationSeconds) * time.Second
|
||||
|
||||
By("verify NodeStatus report period is longer than lease duration")
|
||||
// NodeStatus is reported from node to master when there is some change or
|
||||
// enough time has passed. So for here, keep checking the time diff
|
||||
// between 2 NodeStatus report, until it is longer than lease duration (
|
||||
// the same as nodeMonitorGracePeriod).
|
||||
heartbeatTime := getNextReadyConditionHeartbeatTime(f.ClientSet, nodeName, metav1.Time{})
|
||||
Eventually(func() error {
|
||||
nextHeartbeatTime := getNextReadyConditionHeartbeatTime(f.ClientSet, nodeName, heartbeatTime)
|
||||
|
||||
if nextHeartbeatTime.Time.After(heartbeatTime.Time.Add(leaseDuration)) {
|
||||
return nil
|
||||
}
|
||||
heartbeatTime = nextHeartbeatTime
|
||||
return fmt.Errorf("node status report period is shorter than lease duration")
|
||||
|
||||
// Enter next round immediately.
|
||||
}, 5*time.Minute, time.Nanosecond).Should(BeNil())
|
||||
|
||||
By("verify node is still in ready status even though node status report is infrequent")
|
||||
// This check on node status is only meaningful when this e2e test is
|
||||
// running as cluster e2e test, because node e2e test does not create and
|
||||
// run controller manager, i.e., no node lifecycle controller.
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
Expect(err).To(BeNil())
|
||||
_, readyCondition := v1node.GetNodeCondition(&node.Status, corev1.NodeReady)
|
||||
Expect(readyCondition.Status).To(Equal(corev1.ConditionTrue))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func getNextReadyConditionHeartbeatTime(clientSet clientset.Interface, nodeName string, prevHeartbeatTime metav1.Time) metav1.Time {
|
||||
var newHeartbeatTime metav1.Time
|
||||
Eventually(func() error {
|
||||
node, err := clientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, readyCondition := v1node.GetNodeCondition(&node.Status, corev1.NodeReady)
|
||||
Expect(readyCondition.Status).To(Equal(corev1.ConditionTrue))
|
||||
newHeartbeatTime = readyCondition.LastHeartbeatTime
|
||||
if prevHeartbeatTime.Before(&newHeartbeatTime) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("heartbeat has not changed yet")
|
||||
}, 5*time.Minute, 5*time.Second).Should(BeNil())
|
||||
return newHeartbeatTime
|
||||
}
|
||||
|
||||
func expectLease(lease *coordv1beta1.Lease, nodeName string) error {
|
||||
// expect values for HolderIdentity, LeaseDurationSeconds, and RenewTime
|
||||
if lease.Spec.HolderIdentity == nil {
|
||||
return fmt.Errorf("Spec.HolderIdentity should not be nil")
|
||||
}
|
||||
if lease.Spec.LeaseDurationSeconds == nil {
|
||||
return fmt.Errorf("Spec.LeaseDurationSeconds should not be nil")
|
||||
}
|
||||
if lease.Spec.RenewTime == nil {
|
||||
return fmt.Errorf("Spec.RenewTime should not be nil")
|
||||
}
|
||||
// ensure that the HolderIdentity matches the node name
|
||||
if *lease.Spec.HolderIdentity != nodeName {
|
||||
return fmt.Errorf("Spec.HolderIdentity (%v) should match the node name (%v)", *lease.Spec.HolderIdentity, nodeName)
|
||||
}
|
||||
return nil
|
||||
}
|
142
vendor/k8s.io/kubernetes/test/e2e/common/pods.go
generated
vendored
142
vendor/k8s.io/kubernetes/test/e2e/common/pods.go
generated
vendored
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -36,16 +37,20 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/kubelet"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var (
|
||||
buildBackOffDuration = time.Minute
|
||||
syncLoopFrequency = 10 * time.Second
|
||||
maxBackOffTolerance = time.Duration(1.3 * float64(kubelet.MaxContainerBackOff))
|
||||
// maxReadyStatusUpdateTolerance specifies the latency that allows kubelet to update pod status.
|
||||
// When kubelet is under heavy load (tests may be parallelized), the delay may be longer, hence
|
||||
// causing tests to be flaky.
|
||||
maxReadyStatusUpdateTolerance = 10 * time.Second
|
||||
)
|
||||
|
||||
// testHostIP tests that a pod gets a host IP
|
||||
@ -129,9 +134,9 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-created-pod-assigned-hostip
|
||||
Description: Make sure when a pod is created that it is assigned a host IP
|
||||
Address.
|
||||
Release : v1.9
|
||||
Testname: Pods, assigned hostip
|
||||
Description: Create a Pod. Pod status MUST return successfully and contains a valid IP address.
|
||||
*/
|
||||
framework.ConformanceIt("should get a host IP [NodeConformance]", func() {
|
||||
name := "pod-hostip-" + string(uuid.NewUUID())
|
||||
@ -151,9 +156,9 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-submitted-removed
|
||||
Description: Makes sure a pod is created, a watch can be setup for the pod,
|
||||
pod creation was observed, pod is deleted, and pod deletion is observed.
|
||||
Release : v1.9
|
||||
Testname: Pods, lifecycle
|
||||
Description: A Pod is created with a unique label. Pod MUST be accessible when queried using the label selector upon creation. Add a watch, check if the Pod is running. Pod then deleted, The pod deletion timestamp is observed. The watch MUST return the pod deleted event. Query with the original selector for the Pod MUST return empty list.
|
||||
*/
|
||||
framework.ConformanceIt("should be submitted and removed [NodeConformance]", func() {
|
||||
By("creating the pod")
|
||||
@ -171,7 +176,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -277,8 +282,9 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-updated-successfully
|
||||
Description: Make sure it is possible to successfully update a pod's labels.
|
||||
Release : v1.9
|
||||
Testname: Pods, update
|
||||
Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. Update the pod to change the value of the Label. Query for the Pod with the new value for the label MUST be successful.
|
||||
*/
|
||||
framework.ConformanceIt("should be updated [NodeConformance]", func() {
|
||||
By("creating the pod")
|
||||
@ -296,7 +302,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -330,10 +336,9 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-update-active-deadline-seconds
|
||||
Description: Make sure it is possible to create a pod, update its
|
||||
activeDeadlineSecondsValue, and then waits for the deadline to pass
|
||||
and verifies the pod is terminated.
|
||||
Release : v1.9
|
||||
Testname: Pods, ActiveDeadlineSeconds
|
||||
Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. The Pod is updated with ActiveDeadlineSeconds set on the Pod spec. Pod MUST terminate of the specified time elapses.
|
||||
*/
|
||||
framework.ConformanceIt("should allow activeDeadlineSeconds to be updated [NodeConformance]", func() {
|
||||
By("creating the pod")
|
||||
@ -351,7 +356,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -377,9 +382,9 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-contain-services-environment-variables
|
||||
Description: Make sure that when a pod is created it contains environment
|
||||
variables for each active service.
|
||||
Release : v1.9
|
||||
Testname: Pods, service environment variables
|
||||
Description: Create a server Pod listening on port 9376. A Service called fooservice is created for the server Pod listening on port 8765 targeting port 8080. If a new Pod is created in the cluster then the Pod MUST have the fooservice environment variables available from this new Pod. The new create Pod MUST have environment variables such as FOOSERVICE_SERVICE_HOST, FOOSERVICE_SERVICE_PORT, FOOSERVICE_PORT, FOOSERVICE_PORT_8765_TCP_PORT, FOOSERVICE_PORT_8765_TCP_PROTO, FOOSERVICE_PORT_8765_TCP and FOOSERVICE_PORT_8765_TCP_ADDR that are populated with proper values.
|
||||
*/
|
||||
framework.ConformanceIt("should contain environment variables for services [NodeConformance]", func() {
|
||||
// Make a pod that will be a service.
|
||||
@ -442,7 +447,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
},
|
||||
},
|
||||
@ -467,7 +472,13 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
}, maxRetries, "Container should have service environment variables set")
|
||||
})
|
||||
|
||||
It("should support remote command execution over websockets [NodeConformance]", func() {
|
||||
/*
|
||||
Release : v1.13
|
||||
Testname: Pods, remote command execution over websocket
|
||||
Description: A Pod is created. Websocket is created to retrieve exec command output from this pod.
|
||||
Message retrieved form Websocket MUST match with expected exec command output.
|
||||
*/
|
||||
framework.ConformanceIt("should support remote command execution over websockets [NodeConformance]", func() {
|
||||
config, err := framework.LoadConfig()
|
||||
Expect(err).NotTo(HaveOccurred(), "unable to get base config")
|
||||
|
||||
@ -481,7 +492,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "main",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 600"},
|
||||
},
|
||||
},
|
||||
@ -499,8 +510,8 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
Param("stderr", "1").
|
||||
Param("stdout", "1").
|
||||
Param("container", pod.Spec.Containers[0].Name).
|
||||
Param("command", "cat").
|
||||
Param("command", "/etc/resolv.conf")
|
||||
Param("command", "echo").
|
||||
Param("command", "remote execution test")
|
||||
|
||||
url := req.URL()
|
||||
ws, err := framework.OpenWebSocketForURL(url, config, []string{"channel.k8s.io"})
|
||||
@ -536,14 +547,20 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
if buf.Len() == 0 {
|
||||
return fmt.Errorf("Unexpected output from server")
|
||||
}
|
||||
if !strings.Contains(buf.String(), "nameserver") {
|
||||
return fmt.Errorf("Expected to find 'nameserver' in %q", buf.String())
|
||||
if !strings.Contains(buf.String(), "remote execution test") {
|
||||
return fmt.Errorf("Expected to find 'remote execution test' in %q", buf.String())
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, 10*time.Second).Should(BeNil())
|
||||
})
|
||||
|
||||
It("should support retrieving logs from the container over websockets [NodeConformance]", func() {
|
||||
/*
|
||||
Release : v1.13
|
||||
Testname: Pods, logs from websockets
|
||||
Description: A Pod is created. Websocket is created to retrieve log of a container from this pod.
|
||||
Message retrieved form Websocket MUST match with container's output.
|
||||
*/
|
||||
framework.ConformanceIt("should support retrieving logs from the container over websockets [NodeConformance]", func() {
|
||||
config, err := framework.LoadConfig()
|
||||
Expect(err).NotTo(HaveOccurred(), "unable to get base config")
|
||||
|
||||
@ -557,7 +574,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "main",
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 10000"},
|
||||
},
|
||||
},
|
||||
@ -600,6 +617,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
}
|
||||
})
|
||||
|
||||
// Slow (~7 mins)
|
||||
It("should have their auto-restart back-off timer reset on image update [Slow][NodeConformance]", func() {
|
||||
podName := "pod-back-off-image"
|
||||
containerName := "back-off"
|
||||
@ -612,7 +630,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh", "-c", "sleep 5", "/crash/missing"},
|
||||
},
|
||||
},
|
||||
@ -623,7 +641,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
|
||||
By("updating the image")
|
||||
podClient.Update(podName, func(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.NginxSlim)
|
||||
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.Nginx)
|
||||
})
|
||||
|
||||
time.Sleep(syncLoopFrequency)
|
||||
@ -640,7 +658,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
}
|
||||
})
|
||||
|
||||
// Slow issue #19027 (20 mins)
|
||||
// Slow by design (~27 mins) issue #19027
|
||||
It("should cap back-off at MaxContainerBackOff [Slow][NodeConformance]", func() {
|
||||
podName := "back-off-cap"
|
||||
containerName := "back-off-cap"
|
||||
@ -653,7 +671,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh", "-c", "sleep 5", "/crash/missing"},
|
||||
},
|
||||
},
|
||||
@ -694,4 +712,64 @@ var _ = framework.KubeDescribe("Pods", func() {
|
||||
framework.Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2)
|
||||
}
|
||||
})
|
||||
|
||||
// TODO(freehan): label the test to be [NodeConformance] after tests are proven to be stable.
|
||||
It("should support pod readiness gates [NodeFeature:PodReadinessGate]", func() {
|
||||
podName := "pod-ready"
|
||||
readinessGate1 := "k8s.io/test-condition1"
|
||||
readinessGate2 := "k8s.io/test-condition2"
|
||||
patchStatusFmt := `{"status":{"conditions":[{"type":%q, "status":%q}]}}`
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"test": "pod-readiness-gate"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pod-readiness-gate",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 10000"},
|
||||
},
|
||||
},
|
||||
ReadinessGates: []v1.PodReadinessGate{
|
||||
{ConditionType: v1.PodConditionType(readinessGate1)},
|
||||
{ConditionType: v1.PodConditionType(readinessGate2)},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
validatePodReadiness := func(expectReady bool) {
|
||||
Expect(wait.Poll(time.Second, maxReadyStatusUpdateTolerance, func() (bool, error) {
|
||||
podReady := podClient.PodIsReady(podName)
|
||||
res := expectReady == podReady
|
||||
if !res {
|
||||
framework.Logf("Expect the Ready condition of pod %q to be %v, but got %v", podName, expectReady, podReady)
|
||||
}
|
||||
return res, nil
|
||||
})).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
By("submitting the pod to kubernetes")
|
||||
podClient.CreateSync(pod)
|
||||
Expect(podClient.PodIsReady(podName)).To(BeFalse(), "Expect pod's Ready condition to be false initially.")
|
||||
|
||||
By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1))
|
||||
_, err := podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), "status")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Sleep for 10 seconds.
|
||||
time.Sleep(maxReadyStatusUpdateTolerance)
|
||||
Expect(podClient.PodIsReady(podName)).To(BeFalse(), "Expect pod's Ready condition to be false with only one condition in readinessGates equal to True")
|
||||
|
||||
By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2))
|
||||
_, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), "status")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
validatePodReadiness(true)
|
||||
|
||||
By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1))
|
||||
_, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), "status")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
validatePodReadiness(false)
|
||||
|
||||
})
|
||||
})
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e/common/privileged.go
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/common/privileged.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
type PrivilegedPodTestConfig struct {
|
||||
@ -90,14 +91,14 @@ func (c *PrivilegedPodTestConfig) createPodsSpec() *v1.Pod {
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: c.privilegedContainer,
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
SecurityContext: &v1.SecurityContext{Privileged: &isPrivileged},
|
||||
Command: []string{"/bin/sleep", "10000"},
|
||||
},
|
||||
{
|
||||
Name: c.notPrivilegedContainer,
|
||||
Image: busyboxImage,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
SecurityContext: &v1.SecurityContext{Privileged: ¬Privileged},
|
||||
Command: []string{"/bin/sleep", "10000"},
|
||||
|
1675
vendor/k8s.io/kubernetes/test/e2e/common/projected.go
generated
vendored
1675
vendor/k8s.io/kubernetes/test/e2e/common/projected.go
generated
vendored
File diff suppressed because it is too large
Load Diff
147
vendor/k8s.io/kubernetes/test/e2e/common/projected_combined.go
generated
vendored
Normal file
147
vendor/k8s.io/kubernetes/test/e2e/common/projected_combined.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var _ = Describe("[sig-storage] Projected combined", func() {
|
||||
f := framework.NewDefaultFramework("projected")
|
||||
|
||||
// Test multiple projections
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Projected Volume, multiple projections
|
||||
Description: A Pod is created with a projected volume source for secrets, configMap and downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the secrets, configMap values and the cpu and memory limits as well as cpu and memory requests from the mounted DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should project all components that make up the projection API [Projection][NodeConformance]", func() {
|
||||
var err error
|
||||
podName := "projected-volume-" + string(uuid.NewUUID())
|
||||
secretName := "secret-projected-all-test-volume-" + string(uuid.NewUUID())
|
||||
configMapName := "configmap-projected-all-test-volume-" + string(uuid.NewUUID())
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: configMapName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"configmap-data": "configmap-value-1",
|
||||
},
|
||||
}
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: secretName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"secret-data": []byte("secret-value-1"),
|
||||
},
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
|
||||
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
||||
}
|
||||
|
||||
pod := projectedAllVolumeBasePod(podName, secretName, configMapName, nil, nil)
|
||||
pod.Spec.Containers = []v1.Container{
|
||||
{
|
||||
Name: "projected-all-volume-test",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"sh", "-c", "cat /all/podname && cat /all/secret-data && cat /all/configmap-data"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/all",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
f.TestContainerOutput("Check all projections for projected volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("%s", podName),
|
||||
"secret-value-1",
|
||||
"configmap-value-1",
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func projectedAllVolumeBasePod(podName string, secretName string, configMapName string, labels, annotations map[string]string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "podinfo",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
DownwardAPI: &v1.DownwardAPIProjection{
|
||||
Items: []v1.DownwardAPIVolumeFile{
|
||||
{
|
||||
Path: "podname",
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Secret: &v1.SecretProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: secretName,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: configMapName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
return pod
|
||||
}
|
683
vendor/k8s.io/kubernetes/test/e2e/common/projected_configmap.go
generated
vendored
Normal file
683
vendor/k8s.io/kubernetes/test/e2e/common/projected_configmap.go
generated
vendored
Normal file
@ -0,0 +1,683 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("[sig-storage] Projected configMap", func() {
|
||||
f := framework.NewDefaultFramework("projected")
|
||||
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Projected Volume, ConfigMap, volume mode default
|
||||
Description: A Pod is created with projected volume source ‘ConfigMap’ to store a configMap with default permission mode. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r—-r—-.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
|
||||
doProjectedConfigMapE2EWithoutMappings(f, 0, 0, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Projected Volume, ConfigMap, volume mode 0400
|
||||
Description: A Pod is created with projected volume source ‘ConfigMap’ to store a configMap with permission mode set to 0400. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -r——-——-—-.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
|
||||
defaultMode := int32(0400)
|
||||
doProjectedConfigMapE2EWithoutMappings(f, 0, 0, &defaultMode)
|
||||
})
|
||||
|
||||
It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeFeature:FSGroup]", func() {
|
||||
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
|
||||
doProjectedConfigMapE2EWithoutMappings(f, 1000, 1001, &defaultMode)
|
||||
})
|
||||
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Projected Volume, ConfigMap, non-root user
|
||||
Description: A Pod is created with projected volume source ‘ConfigMap’ to store a configMap as non-root user with uid 1000. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw—r——r—-.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() {
|
||||
doProjectedConfigMapE2EWithoutMappings(f, 1000, 0, nil)
|
||||
})
|
||||
|
||||
It("should be consumable from pods in volume as non-root with FSGroup [NodeFeature:FSGroup]", func() {
|
||||
doProjectedConfigMapE2EWithoutMappings(f, 1000, 1001, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Projected Volume, ConfigMap, mapped
|
||||
Description: A Pod is created with projected volume source ‘ConfigMap’ to store a configMap with default permission mode. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -rw—r——r—-.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
|
||||
doProjectedConfigMapE2EWithMappings(f, 0, 0, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Projected Volume, ConfigMap, mapped, volume mode 0400
|
||||
Description: A Pod is created with projected volume source ‘ConfigMap’ to store a configMap with permission mode set to 0400. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r-—r——r—-.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [NodeConformance]", func() {
|
||||
mode := int32(0400)
|
||||
doProjectedConfigMapE2EWithMappings(f, 0, 0, &mode)
|
||||
})
|
||||
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Projected Volume, ConfigMap, mapped, non-root user
|
||||
Description: A Pod is created with projected volume source ‘ConfigMap’ to store a configMap as non-root user with uid 1000. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r-—r——r—-.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() {
|
||||
doProjectedConfigMapE2EWithMappings(f, 1000, 0, nil)
|
||||
})
|
||||
|
||||
It("should be consumable from pods in volume with mappings as non-root with FSGroup [NodeFeature:FSGroup]", func() {
|
||||
doProjectedConfigMapE2EWithMappings(f, 1000, 1001, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Projected Volume, ConfigMap, update
|
||||
Description: A Pod is created with projected volume source ‘ConfigMap’ to store a configMap and performs a create and update to new value. Pod MUST be able to create the configMap with value-1. Pod MUST be able to update the value in the confgiMap to value-2.
|
||||
*/
|
||||
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
|
||||
name := "projected-configmap-test-upd-" + string(uuid.NewUUID())
|
||||
volumeName := "projected-configmap-volume"
|
||||
volumeMountPath := "/etc/projected-configmap-volume"
|
||||
containerName := "projected-configmap-volume-test"
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-projected-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
By("Creating the pod")
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
}
|
||||
|
||||
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
|
||||
|
||||
By(fmt.Sprintf("Updating configmap %v", configMap.Name))
|
||||
configMap.ResourceVersion = "" // to force update
|
||||
configMap.Data["data-1"] = "value-2"
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
|
||||
|
||||
By("waiting to observe update in volume")
|
||||
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
|
||||
})
|
||||
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Projected Volume, ConfigMap, create, update and delete
|
||||
Description: Create a Pod with three containers with ConfigMaps namely a create, update and delete container. Create Container when started MUST not have configMap, update and delete containers MUST be created with a ConfigMap value as ‘value-1’. Create a configMap in the create container, the Pod MUST be able to read the configMap from the create container. Update the configMap in the update container, Pod MUST be able to read the updated configMap value. Delete the configMap in the delete container. Pod MUST fail to read the configMap from the delete container.
|
||||
*/
|
||||
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
trueVal := true
|
||||
volumeMountPath := "/etc/projected-configmap-volumes"
|
||||
|
||||
deleteName := "cm-test-opt-del-" + string(uuid.NewUUID())
|
||||
deleteContainerName := "delcm-volume-test"
|
||||
deleteVolumeName := "deletecm-volume"
|
||||
deleteConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: deleteName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
updateName := "cm-test-opt-upd-" + string(uuid.NewUUID())
|
||||
updateContainerName := "updcm-volume-test"
|
||||
updateVolumeName := "updatecm-volume"
|
||||
updateConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: updateName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
createName := "cm-test-opt-create-" + string(uuid.NewUUID())
|
||||
createContainerName := "createcm-volume-test"
|
||||
createVolumeName := "createcm-volume"
|
||||
createConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: createName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
|
||||
var err error
|
||||
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
|
||||
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-projected-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: deleteName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: updateName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: createName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: deleteContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/delete/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "delete"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/update/data-3"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "update"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/create/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: createVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "create"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
By("Creating the pod")
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollCreateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
|
||||
}
|
||||
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-configmap-volumes/create/data-1"))
|
||||
|
||||
pollUpdateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
|
||||
}
|
||||
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-configmap-volumes/update/data-3"))
|
||||
|
||||
pollDeleteLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
|
||||
}
|
||||
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
|
||||
|
||||
By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
|
||||
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
|
||||
updateConfigMap.ResourceVersion = "" // to force update
|
||||
delete(updateConfigMap.Data, "data-1")
|
||||
updateConfigMap.Data["data-3"] = "value-3"
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
|
||||
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
|
||||
}
|
||||
|
||||
By("waiting to observe update in volume")
|
||||
|
||||
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
|
||||
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-3"))
|
||||
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-configmap-volumes/delete/data-1"))
|
||||
})
|
||||
|
||||
/*
|
||||
Release : v1.9
|
||||
Testname: Projected Volume, ConfigMap, multiple volume paths
|
||||
Description: A Pod is created with a projected volume source ‘ConfigMap’ to store a configMap. The configMap is mapped to two different volume mounts. Pod MUST be able to read the content of the configMap successfully from the two volume mounts.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() {
|
||||
var (
|
||||
name = "projected-configmap-test-volume-" + string(uuid.NewUUID())
|
||||
volumeName = "projected-configmap-volume"
|
||||
volumeMountPath = "/etc/projected-configmap-volume"
|
||||
volumeName2 = "projected-configmap-volume-2"
|
||||
volumeMountPath2 = "/etc/projected-configmap-volume-2"
|
||||
configMap = newConfigMap(f, name)
|
||||
)
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-projected-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: volumeName2,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "projected-configmap-volume-test",
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Args: []string{"--file_content=/etc/projected-configmap-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
ReadOnly: true,
|
||||
},
|
||||
{
|
||||
Name: volumeName2,
|
||||
MountPath: volumeMountPath2,
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
f.TestContainerOutput("consume configMaps", pod, 0, []string{
|
||||
"content of file \"/etc/projected-configmap-volume/data-1\": value-1",
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
//The pod is in pending during volume creation until the configMap objects are available
|
||||
//or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timout exception unless it is marked optional.
|
||||
//Slow (~5 mins)
|
||||
It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() {
|
||||
volumeMountPath := "/etc/projected-configmap-volumes"
|
||||
podName := "pod-projected-configmaps-" + string(uuid.NewUUID())
|
||||
err := createNonOptionalConfigMapPod(f, volumeMountPath, podName)
|
||||
Expect(err).To(HaveOccurred(), "created pod %q with non-optional configMap in namespace %q", podName, f.Namespace.Name)
|
||||
})
|
||||
|
||||
//ConfigMap object defined for the pod, If a key is specified which is not present in the ConfigMap,
|
||||
// the volume setup will error unless it is marked optional, during the pod creation.
|
||||
//Slow (~5 mins)
|
||||
It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func() {
|
||||
volumeMountPath := "/etc/configmap-volumes"
|
||||
podName := "pod-configmaps-" + string(uuid.NewUUID())
|
||||
err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath, podName)
|
||||
Expect(err).To(HaveOccurred(), "created pod %q with non-optional configMap in namespace %q", podName, f.Namespace.Name)
|
||||
})
|
||||
})
|
||||
|
||||
func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, defaultMode *int32) {
|
||||
userID := int64(uid)
|
||||
groupID := int64(fsGroup)
|
||||
|
||||
var (
|
||||
name = "projected-configmap-test-volume-" + string(uuid.NewUUID())
|
||||
volumeName = "projected-configmap-volume"
|
||||
volumeMountPath = "/etc/projected-configmap-volume"
|
||||
configMap = newConfigMap(f, name)
|
||||
)
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-projected-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "projected-configmap-volume-test",
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Args: []string{
|
||||
"--file_content=/etc/projected-configmap-volume/data-1",
|
||||
"--file_mode=/etc/projected-configmap-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
if userID != 0 {
|
||||
pod.Spec.SecurityContext.RunAsUser = &userID
|
||||
}
|
||||
|
||||
if groupID != 0 {
|
||||
pod.Spec.SecurityContext.FSGroup = &groupID
|
||||
}
|
||||
|
||||
if defaultMode != nil {
|
||||
//pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].ConfigMap.DefaultMode = defaultMode
|
||||
pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = defaultMode
|
||||
} else {
|
||||
mode := int32(0644)
|
||||
defaultMode = &mode
|
||||
}
|
||||
|
||||
modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode))
|
||||
output := []string{
|
||||
"content of file \"/etc/projected-configmap-volume/data-1\": value-1",
|
||||
"mode of file \"/etc/projected-configmap-volume/data-1\": " + modeString,
|
||||
}
|
||||
f.TestContainerOutput("consume configMaps", pod, 0, output)
|
||||
}
|
||||
|
||||
func doProjectedConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, itemMode *int32) {
|
||||
userID := int64(uid)
|
||||
groupID := int64(fsGroup)
|
||||
|
||||
var (
|
||||
name = "projected-configmap-test-volume-map-" + string(uuid.NewUUID())
|
||||
volumeName = "projected-configmap-volume"
|
||||
volumeMountPath = "/etc/projected-configmap-volume"
|
||||
configMap = newConfigMap(f, name)
|
||||
)
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-projected-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "data-2",
|
||||
Path: "path/to/data-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "projected-configmap-volume-test",
|
||||
Image: imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
Args: []string{"--file_content=/etc/projected-configmap-volume/path/to/data-2",
|
||||
"--file_mode=/etc/projected-configmap-volume/path/to/data-2"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
if userID != 0 {
|
||||
pod.Spec.SecurityContext.RunAsUser = &userID
|
||||
}
|
||||
|
||||
if groupID != 0 {
|
||||
pod.Spec.SecurityContext.FSGroup = &groupID
|
||||
}
|
||||
|
||||
if itemMode != nil {
|
||||
//pod.Spec.Volumes[0].VolumeSource.ConfigMap.Items[0].Mode = itemMode
|
||||
pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = itemMode
|
||||
} else {
|
||||
mode := int32(0644)
|
||||
itemMode = &mode
|
||||
}
|
||||
|
||||
// Just check file mode if fsGroup is not set. If fsGroup is set, the
|
||||
// final mode is adjusted and we are not testing that case.
|
||||
output := []string{
|
||||
"content of file \"/etc/projected-configmap-volume/path/to/data-2\": value-2",
|
||||
}
|
||||
if fsGroup == 0 {
|
||||
modeString := fmt.Sprintf("%v", os.FileMode(*itemMode))
|
||||
output = append(output, "mode of file \"/etc/projected-configmap-volume/path/to/data-2\": "+modeString)
|
||||
}
|
||||
f.TestContainerOutput("consume configMaps", pod, 0, output)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user