vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -37,15 +37,15 @@ KUBELET_HEALTHZ_PORT=${KUBELET_HEALTHZ_PORT:-10248}
CTLRMGR_PORT=${CTLRMGR_PORT:-10252}
PROXY_HOST=127.0.0.1 # kubectl only serves on localhost.
IMAGE_NGINX="gcr.io/google-containers/nginx:1.7.9"
IMAGE_DEPLOYMENT_R1="gcr.io/google-containers/nginx:test-cmd" # deployment-revision1.yaml
IMAGE_NGINX="k8s.gcr.io/nginx:1.7.9"
IMAGE_DEPLOYMENT_R1="k8s.gcr.io/nginx:test-cmd" # deployment-revision1.yaml
IMAGE_DEPLOYMENT_R2="$IMAGE_NGINX" # deployment-revision2.yaml
IMAGE_PERL="gcr.io/google-containers/perl"
IMAGE_PAUSE_V2="gcr.io/google-containers/pause:2.0"
IMAGE_DAEMONSET_R2="gcr.io/google-containers/pause:latest"
IMAGE_DAEMONSET_R2_2="gcr.io/google-containers/nginx:test-cmd" # rollingupdate-daemonset-rv2.yaml
IMAGE_STATEFULSET_R1="gcr.io/google_containers/nginx-slim:0.7"
IMAGE_STATEFULSET_R2="gcr.io/google_containers/nginx-slim:0.8"
IMAGE_PERL="k8s.gcr.io/perl"
IMAGE_PAUSE_V2="k8s.gcr.io/pause:2.0"
IMAGE_DAEMONSET_R2="k8s.gcr.io/pause:latest"
IMAGE_DAEMONSET_R2_2="k8s.gcr.io/nginx:test-cmd" # rollingupdate-daemonset-rv2.yaml
IMAGE_STATEFULSET_R1="k8s.gcr.io/nginx-slim:0.7"
IMAGE_STATEFULSET_R2="k8s.gcr.io/nginx-slim:0.8"
# Expose kubectl directly for readability
PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH
@ -106,7 +106,7 @@ function record_command() {
juLog -output="${output}" -class="test-cmd" -name="${name}" "$@"
if [[ $? -ne 0 ]]; then
echo "Error when running ${name}"
foundError="True"
foundError="${foundError}""${name}"", "
fi
set -o nounset
@ -719,9 +719,9 @@ run_pod_tests() {
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'changed-with-yaml:'
## Patch pod from JSON can change image
# Command
kubectl patch "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "gcr.io/google_containers/pause-amd64:3.0"}]}}'
# Post-condition: valid-pod POD has image gcr.io/google_containers/pause-amd64:3.0
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/pause-amd64:3.0:'
kubectl patch "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "k8s.gcr.io/pause-amd64:3.1"}]}}'
# Post-condition: valid-pod POD has expected image
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/pause-amd64:3.1:'
## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected
ERROR_FILE="${KUBE_TEMP}/conflict-error"
@ -802,13 +802,13 @@ __EOF__
kubectl delete node node-v1-test "${kube_flags[@]}"
## kubectl edit can update the image field of a POD. tmp-editor.sh is a fake editor
echo -e "#!/bin/bash\n${SED} -i \"s/nginx/gcr.io\/google_containers\/serve_hostname/g\" \$1" > /tmp/tmp-editor.sh
echo -e "#!/bin/bash\n${SED} -i \"s/nginx/k8s.gcr.io\/serve_hostname/g\" \$1" > /tmp/tmp-editor.sh
chmod +x /tmp/tmp-editor.sh
# Pre-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
[[ "$(EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod --output-patch=true | grep Patch:)" ]]
# Post-condition: valid-pod POD has image gcr.io/google_containers/serve_hostname
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/serve_hostname:'
# Post-condition: valid-pod POD has image k8s.gcr.io/serve_hostname
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'k8s.gcr.io/serve_hostname:'
# cleaning
rm /tmp/tmp-editor.sh
@ -1156,14 +1156,26 @@ run_kubectl_apply_deployments_tests() {
kubectl apply -f hack/testdata/deployment-label-change1.yaml "${kube_flags[@]}"
# check right deployment exists
kube::test::get_object_assert 'deployment nginx' "{{${id_field}}}" 'nginx'
# apply deployment with wrong labels mismatch selector throws errors
# apply deployment with new labels and a conflicting resourceVersion
output_message=$(! kubectl apply -f hack/testdata/deployment-label-change2.yaml 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'Invalid value'
# apply deployment with --force and --overwrite will success
kube::test::if_has_string "${output_message}" 'Error from server (Conflict)'
# apply deployment with --force and --overwrite will succeed
kubectl apply -f hack/testdata/deployment-label-change2.yaml --overwrite=true --force=true --grace-period=10
# check the changed deployment
output_message=$(kubectl apply view-last-applied deploy/nginx -o json 2>&1 "${kube_flags[@]}" |grep nginx2)
kube::test::if_has_string "${output_message}" '"name": "nginx2"'
# applying a resource (with --force) that is both conflicting and invalid will
# cause the server to only return a "Conflict" error when we attempt to patch.
# This means that we will delete the existing resource after receiving 5 conflict
# errors in a row from the server, and will attempt to create the modified
# resource that we are passing to "apply". Since the modified resource is also
# invalid, we will receive an invalid error when we attempt to create it, after
# having deleted the old resource. Ensure that when this case is reached, the
# old resource is restored once again, and the validation error is printed.
output_message=$(! kubectl apply -f hack/testdata/deployment-label-change3.yaml --force 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'Invalid value'
# Ensure that the old object has been restored
kube::test::get_object_assert 'deployment nginx' "{{${template_labels}}}" 'nginx2'
# cleanup
kubectl delete deployments --all --grace-period=10
@ -1311,6 +1323,117 @@ run_kubectl_run_tests() {
set +o errexit
}
run_kubectl_server_print_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl get --experimental-server-print"
### Test retrieval of all types in discovery
# Pre-condition: no resources exist
output_message=$(kubectl get pods --experimental-server-print 2>&1 "${kube_flags[@]}")
# Post-condition: Expect text indicating no resources were found
kube::test::if_has_string "${output_message}" 'No resources found.'
### Test retrieval of pods against server-side printing
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get pod "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get pod --experimental-server-print "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
### Test retrieval of daemonsets against server-side printing
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
# Post-condition: daemonset is created
kube::test::get_object_assert ds "{{range.items}}{{$id_field}}:{{end}}" 'bind:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get ds "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get ds --experimental-server-print "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
### Test retrieval of replicationcontrollers against server-side printing
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Post-condition: frontend replication controller is created
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get rc "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get rc --experimental-server-print "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
### Test retrieval of replicasets against server-side printing
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# Post-condition: frontend replica set is created
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get rs "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get rs --experimental-server-print "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
### Test retrieval of jobs against server-side printing
kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: assertion object exists
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get jobs/pi "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get jobs/pi --experimental-server-print "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
### Test retrieval of clusterroles against server-side printing
kubectl create "${kube_flags[@]}" clusterrole sample-role --verb=* --resource=pods
# Post-Condition: assertion object exists
kube::test::get_object_assert clusterrole/sample-role "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
# Compare "old" output with experimental output and ensure both are the same
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
expected_output=$(kubectl get clusterroles/sample-role "${kube_flags[@]}" | awk 'NF{NF--};1')
actual_output=$(kubectl get clusterroles/sample-role --experimental-server-print "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
### Test retrieval of crds against server-side printing
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
{
"kind": "CustomResourceDefinition",
"apiVersion": "apiextensions.k8s.io/v1beta1",
"metadata": {
"name": "foos.company.com"
},
"spec": {
"group": "company.com",
"version": "v1",
"names": {
"plural": "foos",
"kind": "Foo"
}
}
}
__EOF__
# Post-Condition: assertion object exists
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{$id_field}}:{{end}}" 'foos.company.com:'
# Test that we can list this new CustomResource
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Compare "old" output with experimental output and ensure both are the same
expected_output=$(kubectl get foos "${kube_flags[@]}")
actual_output=$(kubectl get foos --experimental-server-print "${kube_flags[@]}" | awk 'NF{NF--};1')
kube::test::if_has_string "${actual_output}" "${expected_output}"
# teardown
kubectl delete customresourcedefinitions/foos.company.com "${kube_flags_with_token[@]}"
kubectl delete clusterroles/sample-role "${kube_flags_with_token[@]}"
kubectl delete jobs pi "${kube_flags[@]}"
kubectl delete rs frontend "${kube_flags[@]}"
kubectl delete rc frontend "${kube_flags[@]}"
kubectl delete ds bind "${kube_flags[@]}"
kubectl delete pod valid-pod "${kube_flags[@]}"
}
run_kubectl_get_tests() {
set -o nounset
set -o errexit
@ -1397,11 +1520,15 @@ run_kubectl_get_tests() {
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/pods 200 OK"
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/replicationcontrollers 200 OK"
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/services 200 OK"
kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/daemonsets 200 OK"
kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/deployments 200 OK"
kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/replicasets 200 OK"
kube::test::if_has_string "${output_message}" "/apis/apps/v1/namespaces/default/statefulsets 200 OK"
kube::test::if_has_string "${output_message}" "/apis/autoscaling/v1/namespaces/default/horizontalpodautoscalers 200"
kube::test::if_has_string "${output_message}" "/apis/batch/v1/namespaces/default/jobs 200 OK"
kube::test::if_has_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/deployments 200 OK"
kube::test::if_has_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/replicasets 200 OK"
kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/daemonsets 200 OK"
kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/deployments 200 OK"
kube::test::if_has_not_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/replicasets 200 OK"
### Test kubectl get chunk size
output_message=$(kubectl --v=6 get clusterrole --chunk-size=10 2>&1 "${kube_flags[@]}")
@ -1414,6 +1541,18 @@ run_kubectl_get_tests() {
# Post-condition: Check if we get a limit and continue
kube::test::if_has_string "${output_message}" "/clusterroles?limit=500 200 OK"
### Test kubectl get chunk size does not result in a --watch error when resource list is served in multiple chunks
# Pre-condition: no ConfigMaps exist
kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}:{{end}}" ''
# Post-condition: Create three configmaps and ensure that we can --watch them with a --chunk-size of 1
kubectl create cm one "${kube_flags[@]}"
kubectl create cm two "${kube_flags[@]}"
kubectl create cm three "${kube_flags[@]}"
output_message=$(kubectl get configmap --chunk-size=1 --watch --request-timeout=1s 2>&1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" "watch is only supported on individual resources"
output_message=$(kubectl get configmap --chunk-size=1 --watch-only --request-timeout=1s 2>&1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" "watch is only supported on individual resources"
### Test --allow-missing-template-keys
# Pre-condition: no POD exists
create_and_use_new_namespace
@ -1444,7 +1583,7 @@ run_kubectl_get_tests() {
kube::test::if_has_string "${output_message}" 'valid-pod' # pod details
output_message=$(kubectl get pods/valid-pod -o name -w --request-timeout=1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" 'STATUS' # no headers
kube::test::if_has_string "${output_message}" 'pods/valid-pod' # resource name
kube::test::if_has_string "${output_message}" 'pod/valid-pod' # resource name
output_message=$(kubectl get pods/valid-pod -o yaml -w --request-timeout=1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" 'STATUS' # no headers
kube::test::if_has_string "${output_message}" 'name: valid-pod' # yaml
@ -1558,6 +1697,33 @@ __EOF__
# Post-Condition: assertion object exist
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{$id_field}}:{{end}}" 'bars.company.com:foos.company.com:'
# This test ensures that the name printer is able to output a resource
# in the proper "kind.group/resource_name" format, and that the
# resource builder is able to resolve a GVK when a kind.group pair is given.
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
{
"kind": "CustomResourceDefinition",
"apiVersion": "apiextensions.k8s.io/v1beta1",
"metadata": {
"name": "resources.mygroup.example.com"
},
"spec": {
"group": "mygroup.example.com",
"version": "v1alpha1",
"scope": "Namespaced",
"names": {
"plural": "resources",
"singular": "resource",
"kind": "Kind",
"listKind": "KindList"
}
}
}
__EOF__
# Post-Condition: assertion crd with non-matching kind and resource exists
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{$id_field}}:{{end}}" 'bars.company.com:foos.company.com:resources.mygroup.example.com:'
run_non_native_resource_tests
# teardown
@ -1605,6 +1771,28 @@ run_non_native_resource_tests() {
# Test that we can list this new CustomResource (bars)
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can list this new CustomResource (resources)
kube::test::get_object_assert resources "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create a new resource of type Kind
kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/resource.yaml "${kube_flags[@]}"
# Test that -o name returns kind.group/resourcename
output_message=$(kubectl "${kube_flags[@]}" get resource/myobj -o name)
kube::test::if_has_string "${output_message}" 'kind.mygroup.example.com/myobj'
output_message=$(kubectl "${kube_flags[@]}" get resources/myobj -o name)
kube::test::if_has_string "${output_message}" 'kind.mygroup.example.com/myobj'
output_message=$(kubectl "${kube_flags[@]}" get kind.mygroup.example.com/myobj -o name)
kube::test::if_has_string "${output_message}" 'kind.mygroup.example.com/myobj'
# Delete the resource with cascade.
kubectl "${kube_flags[@]}" delete resources myobj --cascade=true
# Make sure it's gone
kube::test::get_object_assert resources "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create a new resource of type Foo
kubectl "${kube_flags[@]}" create -f hack/testdata/CRD/foo.yaml "${kube_flags[@]}"
@ -1633,7 +1821,7 @@ run_non_native_resource_tests() {
kubectl "${kube_flags[@]}" get foos -o "go-template={{range .items}}{{.someField}}{{end}}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos/test -o "go-template={{.someField}}" --allow-missing-template-keys=false
output_message=$(kubectl "${kube_flags[@]}" get foos/test -o name)
kube::test::if_has_string "${output_message}" 'foos/test'
kube::test::if_has_string "${output_message}" 'foo.company.com/test'
# Test patching
kube::log::status "Testing CustomResource patching"
@ -1716,7 +1904,7 @@ run_non_native_resource_tests() {
# Stop the watcher and the patch loop.
kill -9 ${watch_pid}
kill -9 ${patch_pid}
kube::test::if_has_string "${watch_output}" 'bars/test'
kube::test::if_has_string "${watch_output}" 'bar.company.com/test'
# Delete the resource without cascade.
kubectl "${kube_flags[@]}" delete bars test --cascade=false
@ -2309,11 +2497,15 @@ run_configmap_tests() {
kube::test::get_object_assert 'configmaps --namespace=test-configmaps' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create configmap test-configmap --from-literal=key1=value1 --namespace=test-configmaps
kubectl create configmap test-binary-configmap --from-file <( head -c 256 /dev/urandom ) --namespace=test-configmaps
# Post-condition: configmap exists and has expected values
kube::test::get_object_assert 'configmap/test-configmap --namespace=test-configmaps' "{{$id_field}}" 'test-configmap'
kube::test::get_object_assert 'configmap/test-binary-configmap --namespace=test-configmaps' "{{$id_field}}" 'test-binary-configmap'
[[ "$(kubectl get configmap/test-configmap --namespace=test-configmaps -o yaml "${kube_flags[@]}" | grep 'key1: value1')" ]]
[[ "$(kubectl get configmap/test-binary-configmap --namespace=test-configmaps -o yaml "${kube_flags[@]}" | grep 'binaryData')" ]]
# Clean-up
kubectl delete configmap test-configmap --namespace=test-configmaps
kubectl delete configmap test-binary-configmap --namespace=test-configmaps
kubectl delete namespace test-configmaps
set +o nounset
@ -2746,7 +2938,7 @@ run_deployment_tests() {
create_and_use_new_namespace
kube::log::status "Testing deployments"
# Test kubectl create deployment (using default - old generator)
kubectl create deployment test-nginx-extensions --image=gcr.io/google-containers/nginx:test-cmd
kubectl create deployment test-nginx-extensions --image=k8s.gcr.io/nginx:test-cmd
# Post-Condition: Deployment "nginx" is created.
kube::test::get_object_assert 'deploy test-nginx-extensions' "{{$container_name_field}}" 'nginx'
# and old generator was used, iow. old defaults are applied
@ -2761,7 +2953,7 @@ run_deployment_tests() {
kubectl delete deployment test-nginx-extensions "${kube_flags[@]}"
# Test kubectl create deployment
kubectl create deployment test-nginx-apps --image=gcr.io/google-containers/nginx:test-cmd --generator=deployment-basic/apps.v1beta1
kubectl create deployment test-nginx-apps --image=k8s.gcr.io/nginx:test-cmd --generator=deployment-basic/apps.v1beta1
# Post-Condition: Deployment "nginx" is created.
kube::test::get_object_assert 'deploy test-nginx-apps' "{{$container_name_field}}" 'nginx'
# and new generator was used, iow. new defaults are applied
@ -2806,7 +2998,7 @@ run_deployment_tests() {
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Create deployment
kubectl create deployment nginx-deployment --image=gcr.io/google-containers/nginx:test-cmd
kubectl create deployment nginx-deployment --image=k8s.gcr.io/nginx:test-cmd
# Wait for rs to come up.
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
# Delete the deployment with cascade set to false.
@ -3064,7 +3256,7 @@ run_rs_tests() {
# Test set commands
# Pre-condition: frontend replica set exists at generation 1
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '1'
kubectl set image rs/frontend "${kube_flags[@]}" *=gcr.io/google-containers/pause:test-cmd
kubectl set image rs/frontend "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '2'
kubectl set env rs/frontend "${kube_flags[@]}" foo=bar
kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '3'
@ -3151,7 +3343,7 @@ run_daemonset_tests() {
# Template Generation should stay 1
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
# Test set commands
kubectl set image daemonsets/bind "${kube_flags[@]}" *=gcr.io/google-containers/pause:test-cmd
kubectl set image daemonsets/bind "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '2'
kubectl set env daemonsets/bind "${kube_flags[@]}" foo=bar
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '3'
@ -3453,13 +3645,13 @@ run_kubectl_config_set_tests() {
cert_data=$(echo "#Comment" && cat "${TMPDIR:-/tmp}/apiserver.crt")
kubectl config set clusters.test-cluster.certificate-authority-data "$cert_data" --set-raw-bytes
r_writen=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
r_written=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
encoded=$(echo -n "$cert_data" | base64)
kubectl config set clusters.test-cluster.certificate-authority-data "$encoded"
e_writen=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
e_written=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
test "$e_writen" == "$r_writen"
test "$e_written" == "$r_written"
set +o nounset
set +o errexit
@ -3471,10 +3663,8 @@ run_kubectl_local_proxy_tests() {
kube::log::status "Testing kubectl local proxy"
# Make sure the UI can be proxied
start-proxy
check-curl-proxy-code /ui 307
check-curl-proxy-code /api/ui 404
check-curl-proxy-code /api/kubernetes 404
check-curl-proxy-code /api/v1/namespaces 200
if kube::test::if_supports_resource "${metrics}" ; then
check-curl-proxy-code /metrics 200
@ -3492,7 +3682,8 @@ run_kubectl_local_proxy_tests() {
# Custom paths let you see everything.
start-proxy /custom
check-curl-proxy-code /custom/ui 307
check-curl-proxy-code /custom/api/kubernetes 404
check-curl-proxy-code /custom/api/v1/namespaces 200
if kube::test::if_supports_resource "${metrics}" ; then
check-curl-proxy-code /custom/metrics 200
fi
@ -3724,7 +3915,7 @@ run_cmd_with_img_tests() {
# Test that a valid image reference value is provided as the value of --image in `kubectl run <name> --image`
output_message=$(kubectl run test1 --image=validname)
kube::test::if_has_string "${output_message}" 'deployment "test1" created'
kube::test::if_has_string "${output_message}" 'deployment.apps "test1" created'
kubectl delete deployments test1
# test invalid image name
output_message=$(! kubectl run test2 --image=InvalidImageName 2>&1)
@ -4090,8 +4281,8 @@ run_resource_aliasing_tests() {
create_and_use_new_namespace
kube::log::status "Testing resource aliasing"
kubectl create -f examples/storage/cassandra/cassandra-controller.yaml "${kube_flags[@]}"
kubectl create -f examples/storage/cassandra/cassandra-service.yaml "${kube_flags[@]}"
kubectl create -f test/e2e/testing-manifests/statefulset/cassandra/controller.yaml "${kube_flags[@]}"
kubectl create -f test/e2e/testing-manifests/statefulset/cassandra/service.yaml "${kube_flags[@]}"
object="all -l'app=cassandra'"
request="{{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}"
@ -4116,6 +4307,8 @@ run_kubectl_explain_tests() {
# shortcuts work
kubectl explain po
kubectl explain po.status.message
# cronjob work
kubectl explain cronjob
set +o nounset
set +o errexit
@ -4300,6 +4493,51 @@ run_cluster_management_tests() {
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
# create test pods we can work with
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "test-pod-1",
"labels": {
"e": "f"
}
},
"spec": {
"containers": [
{
"name": "container-1",
"resources": {},
"image": "test-image"
}
]
}
}
__EOF__
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "test-pod-2",
"labels": {
"c": "d"
}
},
"spec": {
"containers": [
{
"name": "container-1",
"resources": {},
"image": "test-image"
}
]
}
}
__EOF__
### kubectl cordon update with --dry-run does not mark node unschedulable
# Pre-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
@ -4314,6 +4552,20 @@ run_cluster_management_tests() {
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
### kubectl drain with --pod-selector only evicts pods that match the given selector
# Pre-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
# Pre-condition: test-pod-1 and test-pod-2 exist
kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
kubectl drain "127.0.0.1" --pod-selector 'e in (f)'
# only "test-pod-1" should have been matched and deleted - test-pod-2 should still exist
kube::test::get_object_assert "pods/test-pod-2" "{{.metadata.name}}" 'test-pod-2'
# delete pod no longer in use
kubectl delete pod/test-pod-2
# Post-condition: node is schedulable
kubectl uncordon "127.0.0.1"
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
### kubectl uncordon update with --dry-run is a no-op
# Pre-condition: node is already schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
@ -4468,7 +4720,7 @@ run_impersonation_tests() {
# Requires an env var SUPPORTED_RESOURCES which is a comma separated list of
# resources for which tests should be run.
runTests() {
foundError="False"
foundError=""
if [ -z "${SUPPORTED_RESOURCES:-}" ]; then
echo "Need to set SUPPORTED_RESOURCES env var. It is a list of resources that are supported and hence should be tested. Set it to (*) to test all resources"
@ -4521,6 +4773,7 @@ runTests() {
hpa_min_field=".spec.minReplicas"
hpa_max_field=".spec.maxReplicas"
hpa_cpu_field=".spec.targetCPUUtilizationPercentage"
template_labels=".spec.template.metadata.labels.name"
statefulset_replicas_field=".spec.replicas"
statefulset_observed_generation=".status.observedGeneration"
job_parallelism_field=".spec.parallelism"
@ -4639,6 +4892,7 @@ runTests() {
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_get_tests
record_command run_kubectl_server_print_tests
fi
@ -4966,8 +5220,8 @@ runTests() {
kube::test::clear_all
if [ "$foundError" == "True" ]; then
echo "TEST FAILED"
if [[ -n "${foundError}" ]]; then
echo "FAILED TESTS: ""${foundError}"
exit 1
fi
}